From 0fb8b64dd5ba97bf6a9248a78b697fb66bd30ebb Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 7 Aug 2024 16:23:56 +0200 Subject: [PATCH] Release Version 1.0.0 (#124) * prepare for 1.0.0 * change tittle * cleaning * cleaning * add API key setup * add agent example * improve old client message --- .genignore | 2 + .gitattributes | 2 + .github/workflows/build_publish.yaml | 90 -- .../sdk_generation_mistralai_azure_sdk.yaml | 29 + .../sdk_generation_mistralai_gcp_sdk.yaml | 29 + .../sdk_generation_mistralai_sdk.yaml | 29 + .../workflows/sdk_publish_mistralai_sdk.yaml | 20 + .gitignore | 42 +- .speakeasy/gen.lock | 272 +++++ .speakeasy/gen.yaml | 42 + .speakeasy/workflow.lock | 52 + .speakeasy/workflow.yaml | 39 + .vscode/settings.json | 6 + CONTRIBUTING.md | 26 + LICENSE | 2 +- MIGRATION.md | 216 ++++ Makefile | 6 - OLD-README.md | 62 ++ README.md | 666 +++++++++++- USAGE.md | 153 +++ docs/models/agentscompletionrequest.md | 17 + .../models/agentscompletionrequestmessages.md | 23 + docs/models/agentscompletionrequeststop.md | 19 + .../agentscompletionrequesttoolchoice.md | 10 + docs/models/agentscompletionstreamrequest.md | 17 + .../agentscompletionstreamrequeststop.md | 19 + docs/models/archiveftmodelout.md | 10 + docs/models/arguments.md | 17 + docs/models/assistantmessage.md | 11 + docs/models/assistantmessagerole.md | 8 + docs/models/chatcompletionchoice.md | 10 + docs/models/chatcompletionrequest.md | 20 + docs/models/chatcompletionresponse.md | 13 + docs/models/chatcompletionstreamrequest.md | 20 + .../chatcompletionstreamrequestmessages.md | 29 + .../models/chatcompletionstreamrequeststop.md | 19 + .../chatcompletionstreamrequesttoolchoice.md | 10 + docs/models/checkpointout.md | 10 + docs/models/completionchunk.md | 13 + docs/models/completionevent.md | 8 + docs/models/completionresponsestreamchoice.md | 10 + ...pletionresponsestreamchoicefinishreason.md | 11 + docs/models/content.md | 17 + docs/models/contentchunk.md | 9 + docs/models/deletefileout.md | 10 + docs/models/deletemodelout.md | 10 + ...deletemodelv1modelsmodeliddeleterequest.md | 8 + docs/models/deltamessage.md | 10 + docs/models/detailedjobout.md | 26 + docs/models/detailedjoboutstatus.md | 17 + docs/models/embeddingrequest.md | 10 + docs/models/embeddingresponse.md | 12 + docs/models/embeddingresponsedata.md | 10 + docs/models/eventout.md | 10 + docs/models/file.md | 10 + .../models/filesapiroutesdeletefilerequest.md | 8 + .../filesapiroutesretrievefilerequest.md | 8 + ...sapiroutesuploadfilemultipartbodyparams.md | 9 + docs/models/fileschema.md | 16 + docs/models/fimcompletionrequest.md | 17 + docs/models/fimcompletionrequeststop.md | 19 + docs/models/fimcompletionresponse.md | 13 + docs/models/fimcompletionstreamrequest.md | 17 + docs/models/fimcompletionstreamrequeststop.md | 19 + docs/models/finetuneablemodel.md | 14 + docs/models/finishreason.md | 12 + docs/models/ftmodelcapabilitiesout.md | 11 + docs/models/ftmodelout.md | 19 + docs/models/function.md | 10 + docs/models/functioncall.md | 9 + docs/models/githubrepositoryin.md | 13 + docs/models/githubrepositoryout.md | 13 + docs/models/httpvalidationerror.md | 10 + docs/models/inputs.md | 19 + docs/models/jobin.md | 15 + docs/models/jobmetadataout.md | 14 + docs/models/jobout.md | 24 + ...sfinetuningarchivefinetunedmodelrequest.md | 8 + ...tesfinetuningcancelfinetuningjobrequest.md | 8 + ...esfinetuningcreatefinetuningjobresponse.md | 19 + ...routesfinetuninggetfinetuningjobrequest.md | 8 + ...outesfinetuninggetfinetuningjobsrequest.md | 16 + ...utesfinetuningstartfinetuningjobrequest.md | 8 + ...inetuningunarchivefinetunedmodelrequest.md | 8 + ...esfinetuningupdatefinetunedmodelrequest.md | 9 + docs/models/jobsout.md | 10 + docs/models/legacyjobmetadataout.md | 19 + docs/models/listfilesout.md | 9 + docs/models/loc.md | 17 + docs/models/messages.md | 29 + docs/models/metricout.md | 12 + docs/models/modelcapabilities.md | 11 + docs/models/modelcard.md | 19 + docs/models/modellist.md | 9 + docs/models/queryparamstatus.md | 19 + docs/models/responseformat.md | 8 + docs/models/responseformats.md | 11 + docs/models/retrievefileout.md | 16 + .../retrievemodelv1modelsmodelidgetrequest.md | 8 + docs/models/role.md | 8 + docs/models/sampletype.md | 9 + docs/models/security.md | 8 + docs/models/source.md | 9 + docs/models/status.md | 19 + docs/models/stop.md | 19 + docs/models/systemmessage.md | 9 + docs/models/textchunk.md | 9 + docs/models/tool.md | 9 + docs/models/toolcall.md | 10 + docs/models/toolchoice.md | 10 + docs/models/toolmessage.md | 11 + docs/models/toolmessagerole.md | 8 + docs/models/trainingfile.md | 9 + docs/models/trainingparameters.md | 11 + docs/models/trainingparametersin.md | 13 + docs/models/unarchiveftmodelout.md | 10 + docs/models/updateftmodelin.md | 9 + docs/models/uploadfileout.md | 16 + docs/models/usageinfo.md | 10 + docs/models/usermessage.md | 9 + docs/models/usermessagecontent.md | 17 + docs/models/usermessagerole.md | 8 + docs/models/utils/retryconfig.md | 24 + docs/models/validationerror.md | 10 + docs/models/wandbintegration.md | 12 + docs/models/wandbintegrationout.md | 11 + docs/sdks/agents/README.md | 117 +++ docs/sdks/chat/README.md | 128 +++ docs/sdks/embeddings/README.md | 53 + docs/sdks/files/README.md | 179 ++++ docs/sdks/fim/README.md | 112 ++ docs/sdks/finetuning/README.md | 5 + docs/sdks/jobs/README.md | 225 ++++ docs/sdks/mistral/README.md | 9 + docs/sdks/models/README.md | 259 +++++ examples/async_agents_no_streaming.py | 24 + examples/async_chat_no_streaming.py | 12 +- examples/async_chat_with_streaming.py | 19 +- ...completion.py => async_code_completion.py} | 6 +- examples/async_completion.py | 33 - examples/async_embeddings.py | 8 +- examples/async_files.py | 18 +- examples/async_jobs.py | 30 +- examples/async_jobs_chat.py | 35 +- examples/async_list_models.py | 6 +- examples/azure/chat_no_streaming.py.py | 16 + examples/chat_no_streaming.py | 10 +- examples/chat_with_streaming.py | 14 +- examples/chatbot_with_streaming.py | 46 +- examples/code_completion.py | 8 +- examples/completion_with_streaming.py | 9 +- examples/dry_run_job.py | 16 +- examples/embeddings.py | 8 +- examples/files.py | 16 +- examples/function_calling.py | 63 +- examples/gcp/async_chat_no_streaming.py | 24 + examples/jobs.py | 34 +- examples/json_format.py | 14 +- examples/list_models.py | 6 +- packages/mistralai_azure/.genignore | 4 + packages/mistralai_azure/.gitattributes | 2 + packages/mistralai_azure/.gitignore | 8 + packages/mistralai_azure/.speakeasy/gen.lock | 133 +++ packages/mistralai_azure/.speakeasy/gen.yaml | 41 + .../mistralai_azure/.vscode/settings.json | 6 + packages/mistralai_azure/CONTRIBUTING.md | 26 + packages/mistralai_azure/README.md | 430 ++++++++ packages/mistralai_azure/USAGE.md | 55 + .../mistralai_azure/docs/models/arguments.md | 17 + .../docs/models/assistantmessage.md | 11 + .../docs/models/assistantmessagerole.md | 8 + .../docs/models/chatcompletionchoice.md | 10 + .../chatcompletionchoicefinishreason.md | 12 + .../docs/models/chatcompletionrequest.md | 20 + .../models/chatcompletionrequestmessages.md | 29 + .../docs/models/chatcompletionrequeststop.md | 19 + .../models/chatcompletionrequesttoolchoice.md | 10 + .../docs/models/chatcompletionresponse.md | 13 + .../models/chatcompletionstreamrequest.md | 20 + .../docs/models/completionchunk.md | 13 + .../docs/models/completionevent.md | 8 + .../models/completionresponsestreamchoice.md | 10 + .../mistralai_azure/docs/models/content.md | 17 + .../docs/models/contentchunk.md | 9 + .../docs/models/deltamessage.md | 10 + .../docs/models/finishreason.md | 11 + .../mistralai_azure/docs/models/function.md | 10 + .../docs/models/functioncall.md | 9 + .../docs/models/httpvalidationerror.md | 10 + packages/mistralai_azure/docs/models/loc.md | 17 + .../mistralai_azure/docs/models/messages.md | 29 + .../docs/models/responseformat.md | 8 + .../docs/models/responseformats.md | 11 + packages/mistralai_azure/docs/models/role.md | 8 + .../mistralai_azure/docs/models/security.md | 8 + packages/mistralai_azure/docs/models/stop.md | 19 + .../docs/models/systemmessage.md | 9 + .../mistralai_azure/docs/models/textchunk.md | 9 + packages/mistralai_azure/docs/models/tool.md | 9 + .../mistralai_azure/docs/models/toolcall.md | 10 + .../mistralai_azure/docs/models/toolchoice.md | 10 + .../docs/models/toolmessage.md | 11 + .../docs/models/toolmessagerole.md | 8 + .../mistralai_azure/docs/models/usageinfo.md | 10 + .../docs/models/usermessage.md | 9 + .../docs/models/usermessagecontent.md | 17 + .../docs/models/usermessagerole.md | 8 + .../docs/models/utils/retryconfig.md | 24 + .../docs/models/validationerror.md | 10 + .../mistralai_azure/docs/sdks/chat/README.md | 129 +++ .../docs/sdks/mistralazure/README.md | 9 + packages/mistralai_azure/poetry.lock | 638 ++++++++++++ packages/mistralai_azure/poetry.toml | 2 + packages/mistralai_azure/py.typed | 1 + packages/mistralai_azure/pylintrc | 658 ++++++++++++ packages/mistralai_azure/pyproject.toml | 56 + packages/mistralai_azure/scripts/compile.sh | 83 ++ packages/mistralai_azure/scripts/publish.sh | 5 + .../src/mistralai_azure/__init__.py | 5 + .../src/mistralai_azure/_hooks/__init__.py | 5 + .../_hooks/custom_user_agent.py | 16 + .../mistralai_azure/_hooks/registration.py | 15 + .../src/mistralai_azure/_hooks/sdkhooks.py | 57 + .../src/mistralai_azure/_hooks/types.py | 76 ++ .../src/mistralai_azure/basesdk.py | 253 +++++ .../src/mistralai_azure/chat.py | 470 +++++++++ .../src/mistralai_azure/httpclient.py | 78 ++ .../src/mistralai_azure/models/__init__.py | 28 + .../models/assistantmessage.py | 53 + .../models/chatcompletionchoice.py | 22 + .../models/chatcompletionrequest.py | 109 ++ .../models/chatcompletionresponse.py | 27 + .../models/chatcompletionstreamrequest.py | 107 ++ .../mistralai_azure/models/completionchunk.py | 27 + .../mistralai_azure/models/completionevent.py | 15 + .../models/completionresponsestreamchoice.py | 48 + .../mistralai_azure/models/contentchunk.py | 17 + .../mistralai_azure/models/deltamessage.py | 47 + .../src/mistralai_azure/models/function.py | 19 + .../mistralai_azure/models/functioncall.py | 22 + .../models/httpvalidationerror.py | 23 + .../mistralai_azure/models/responseformat.py | 18 + .../src/mistralai_azure/models/sdkerror.py | 22 + .../src/mistralai_azure/models/security.py | 16 + .../mistralai_azure/models/systemmessage.py | 26 + .../src/mistralai_azure/models/textchunk.py | 17 + .../src/mistralai_azure/models/tool.py | 18 + .../src/mistralai_azure/models/toolcall.py | 20 + .../src/mistralai_azure/models/toolmessage.py | 50 + .../src/mistralai_azure/models/usageinfo.py | 18 + .../src/mistralai_azure/models/usermessage.py | 26 + .../mistralai_azure/models/validationerror.py | 24 + .../src/mistralai_azure/py.typed | 1 + .../src/mistralai_azure/sdk.py | 107 ++ .../src/mistralai_azure/sdkconfiguration.py | 54 + .../src/mistralai_azure/types/__init__.py | 21 + .../src/mistralai_azure/types/basemodel.py | 39 + .../src/mistralai_azure/utils/__init__.py | 84 ++ .../src/mistralai_azure/utils/annotations.py | 19 + .../src/mistralai_azure/utils/enums.py | 34 + .../mistralai_azure/utils/eventstreaming.py | 178 ++++ .../src/mistralai_azure/utils/forms.py | 207 ++++ .../src/mistralai_azure/utils/headers.py | 136 +++ .../src/mistralai_azure/utils/logger.py | 16 + .../src/mistralai_azure/utils/metadata.py | 118 +++ .../src/mistralai_azure/utils/queryparams.py | 203 ++++ .../mistralai_azure/utils/requestbodies.py | 66 ++ .../src/mistralai_azure/utils/retries.py | 216 ++++ .../src/mistralai_azure/utils/security.py | 168 +++ .../src/mistralai_azure/utils/serializers.py | 181 ++++ .../src/mistralai_azure/utils/url.py | 150 +++ .../src/mistralai_azure/utils/values.py | 128 +++ packages/mistralai_gcp/.genignore | 4 + packages/mistralai_gcp/.gitattributes | 2 + packages/mistralai_gcp/.gitignore | 8 + packages/mistralai_gcp/.speakeasy/gen.lock | 142 +++ packages/mistralai_gcp/.speakeasy/gen.yaml | 44 + packages/mistralai_gcp/.vscode/settings.json | 6 + packages/mistralai_gcp/CONTRIBUTING.md | 26 + packages/mistralai_gcp/README.md | 425 ++++++++ packages/mistralai_gcp/USAGE.md | 51 + .../mistralai_gcp/docs/models/arguments.md | 17 + .../docs/models/assistantmessage.md | 11 + .../docs/models/assistantmessagerole.md | 8 + .../docs/models/chatcompletionchoice.md | 10 + .../chatcompletionchoicefinishreason.md | 12 + .../docs/models/chatcompletionrequest.md | 19 + .../models/chatcompletionrequestmessages.md | 29 + .../docs/models/chatcompletionrequeststop.md | 19 + .../models/chatcompletionrequesttoolchoice.md | 10 + .../docs/models/chatcompletionresponse.md | 13 + .../models/chatcompletionstreamrequest.md | 19 + .../docs/models/completionchunk.md | 13 + .../docs/models/completionevent.md | 8 + .../models/completionresponsestreamchoice.md | 10 + packages/mistralai_gcp/docs/models/content.md | 17 + .../mistralai_gcp/docs/models/contentchunk.md | 9 + .../mistralai_gcp/docs/models/deltamessage.md | 10 + .../docs/models/fimcompletionrequest.md | 17 + .../docs/models/fimcompletionrequeststop.md | 19 + .../docs/models/fimcompletionresponse.md | 13 + .../docs/models/fimcompletionstreamrequest.md | 17 + .../models/fimcompletionstreamrequeststop.md | 19 + .../mistralai_gcp/docs/models/finishreason.md | 11 + .../mistralai_gcp/docs/models/function.md | 10 + .../mistralai_gcp/docs/models/functioncall.md | 9 + .../docs/models/httpvalidationerror.md | 10 + packages/mistralai_gcp/docs/models/loc.md | 17 + .../mistralai_gcp/docs/models/messages.md | 29 + .../docs/models/responseformat.md | 8 + .../docs/models/responseformats.md | 11 + packages/mistralai_gcp/docs/models/role.md | 8 + .../mistralai_gcp/docs/models/security.md | 8 + packages/mistralai_gcp/docs/models/stop.md | 19 + .../docs/models/systemmessage.md | 9 + .../mistralai_gcp/docs/models/textchunk.md | 9 + packages/mistralai_gcp/docs/models/tool.md | 9 + .../mistralai_gcp/docs/models/toolcall.md | 10 + .../mistralai_gcp/docs/models/toolchoice.md | 10 + .../mistralai_gcp/docs/models/toolmessage.md | 11 + .../docs/models/toolmessagerole.md | 8 + .../mistralai_gcp/docs/models/usageinfo.md | 10 + .../mistralai_gcp/docs/models/usermessage.md | 9 + .../docs/models/usermessagecontent.md | 17 + .../docs/models/usermessagerole.md | 8 + .../docs/models/utils/retryconfig.md | 24 + .../docs/models/validationerror.md | 10 + .../mistralai_gcp/docs/sdks/chat/README.md | 121 +++ .../mistralai_gcp/docs/sdks/fim/README.md | 107 ++ .../docs/sdks/mistralgcp/README.md | 9 + packages/mistralai_gcp/poetry.lock | 848 +++++++++++++++ packages/mistralai_gcp/poetry.toml | 2 + packages/mistralai_gcp/py.typed | 1 + packages/mistralai_gcp/pylintrc | 658 ++++++++++++ packages/mistralai_gcp/pyproject.toml | 58 ++ packages/mistralai_gcp/scripts/compile.sh | 83 ++ packages/mistralai_gcp/scripts/publish.sh | 5 + .../src/mistralai_gcp/__init__.py | 5 + .../src/mistralai_gcp/_hooks/__init__.py | 5 + .../mistralai_gcp/_hooks/custom_user_agent.py | 16 + .../src/mistralai_gcp/_hooks/registration.py | 15 + .../src/mistralai_gcp/_hooks/sdkhooks.py | 57 + .../src/mistralai_gcp/_hooks/types.py | 76 ++ .../src/mistralai_gcp/basesdk.py | 253 +++++ .../mistralai_gcp/src/mistralai_gcp/chat.py | 458 +++++++++ .../mistralai_gcp/src/mistralai_gcp/fim.py | 438 ++++++++ .../src/mistralai_gcp/httpclient.py | 78 ++ .../src/mistralai_gcp/models/__init__.py | 31 + .../mistralai_gcp/models/assistantmessage.py | 53 + .../models/chatcompletionchoice.py | 22 + .../models/chatcompletionrequest.py | 105 ++ .../models/chatcompletionresponse.py | 27 + .../models/chatcompletionstreamrequest.py | 103 ++ .../mistralai_gcp/models/completionchunk.py | 27 + .../mistralai_gcp/models/completionevent.py | 15 + .../models/completionresponsestreamchoice.py | 48 + .../src/mistralai_gcp/models/contentchunk.py | 17 + .../src/mistralai_gcp/models/deltamessage.py | 47 + .../models/fimcompletionrequest.py | 94 ++ .../models/fimcompletionresponse.py | 27 + .../models/fimcompletionstreamrequest.py | 92 ++ .../src/mistralai_gcp/models/function.py | 19 + .../src/mistralai_gcp/models/functioncall.py | 22 + .../models/httpvalidationerror.py | 23 + .../mistralai_gcp/models/responseformat.py | 18 + .../src/mistralai_gcp/models/sdkerror.py | 22 + .../src/mistralai_gcp/models/security.py | 16 + .../src/mistralai_gcp/models/systemmessage.py | 26 + .../src/mistralai_gcp/models/textchunk.py | 17 + .../src/mistralai_gcp/models/tool.py | 18 + .../src/mistralai_gcp/models/toolcall.py | 20 + .../src/mistralai_gcp/models/toolmessage.py | 50 + .../src/mistralai_gcp/models/usageinfo.py | 18 + .../src/mistralai_gcp/models/usermessage.py | 26 + .../mistralai_gcp/models/validationerror.py | 24 + .../mistralai_gcp/src/mistralai_gcp/py.typed | 1 + .../mistralai_gcp/src/mistralai_gcp/sdk.py | 174 ++++ .../src/mistralai_gcp/sdkconfiguration.py | 54 + .../src/mistralai_gcp/types/__init__.py | 21 + .../src/mistralai_gcp/types/basemodel.py | 39 + .../src/mistralai_gcp/utils/__init__.py | 84 ++ .../src/mistralai_gcp/utils/annotations.py | 19 + .../src/mistralai_gcp/utils/enums.py | 34 + .../src/mistralai_gcp/utils/eventstreaming.py | 178 ++++ .../src/mistralai_gcp/utils/forms.py | 207 ++++ .../src/mistralai_gcp/utils/headers.py | 136 +++ .../src/mistralai_gcp/utils/logger.py | 16 + .../src/mistralai_gcp/utils/metadata.py | 118 +++ .../src/mistralai_gcp/utils/queryparams.py | 203 ++++ .../src/mistralai_gcp/utils/requestbodies.py | 66 ++ .../src/mistralai_gcp/utils/retries.py | 216 ++++ .../src/mistralai_gcp/utils/security.py | 168 +++ .../src/mistralai_gcp/utils/serializers.py | 181 ++++ .../src/mistralai_gcp/utils/url.py | 150 +++ .../src/mistralai_gcp/utils/values.py | 128 +++ poetry.lock | 778 +++++++++----- poetry.toml | 2 + py.typed | 1 + pylintrc | 658 ++++++++++++ pyproject.toml | 71 +- scripts/compile.sh | 83 ++ scripts/publish.sh | 5 + src/mistralai/__init__.py | 5 + src/mistralai/_hooks/__init__.py | 5 + src/mistralai/_hooks/custom_user_agent.py | 16 + src/mistralai/_hooks/deprecation_warning.py | 26 + src/mistralai/_hooks/registration.py | 17 + src/mistralai/_hooks/sdkhooks.py | 57 + src/mistralai/_hooks/types.py | 76 ++ src/mistralai/agents.py | 434 ++++++++ src/mistralai/async_client.py | 418 +------- src/mistralai/basesdk.py | 253 +++++ src/mistralai/chat.py | 470 +++++++++ src/mistralai/client.py | 419 +------- src/mistralai/client_base.py | 211 ---- src/mistralai/constants.py | 5 - src/mistralai/embeddings.py | 182 ++++ src/mistralai/exceptions.py | 54 - src/mistralai/files.py | 684 ++++++++++-- src/mistralai/fim.py | 438 ++++++++ src/mistralai/fine_tuning.py | 16 + src/mistralai/httpclient.py | 78 ++ src/mistralai/jobs.py | 972 +++++++++++++++--- src/mistralai/models/__init__.py | 82 ++ .../models/agentscompletionrequest.py | 96 ++ .../models/agentscompletionstreamrequest.py | 92 ++ src/mistralai/models/archiveftmodelout.py | 19 + src/mistralai/models/assistantmessage.py | 53 + src/mistralai/models/chat_completion.py | 93 -- src/mistralai/models/chatcompletionchoice.py | 22 + src/mistralai/models/chatcompletionrequest.py | 109 ++ .../models/chatcompletionresponse.py | 27 + .../models/chatcompletionstreamrequest.py | 107 ++ src/mistralai/models/checkpointout.py | 25 + src/mistralai/models/common.py | 9 - src/mistralai/models/completionchunk.py | 27 + src/mistralai/models/completionevent.py | 15 + .../models/completionresponsestreamchoice.py | 48 + src/mistralai/models/contentchunk.py | 17 + ...elete_model_v1_models_model_id_deleteop.py | 18 + src/mistralai/models/deletefileout.py | 24 + src/mistralai/models/deletemodelout.py | 25 + src/mistralai/models/deltamessage.py | 47 + src/mistralai/models/detailedjobout.py | 91 ++ src/mistralai/models/embeddingrequest.py | 61 ++ src/mistralai/models/embeddingresponse.py | 24 + src/mistralai/models/embeddingresponsedata.py | 19 + src/mistralai/models/embeddings.py | 19 - src/mistralai/models/eventout.py | 50 + src/mistralai/models/files.py | 23 - .../models/files_api_routes_delete_fileop.py | 16 + .../files_api_routes_retrieve_fileop.py | 16 + .../models/files_api_routes_upload_fileop.py | 51 + src/mistralai/models/fileschema.py | 71 ++ src/mistralai/models/fimcompletionrequest.py | 94 ++ src/mistralai/models/fimcompletionresponse.py | 27 + .../models/fimcompletionstreamrequest.py | 92 ++ src/mistralai/models/finetuneablemodel.py | 8 + .../models/ftmodelcapabilitiesout.py | 21 + src/mistralai/models/ftmodelout.py | 65 ++ src/mistralai/models/function.py | 19 + src/mistralai/models/functioncall.py | 22 + src/mistralai/models/githubrepositoryin.py | 52 + src/mistralai/models/githubrepositoryout.py | 52 + src/mistralai/models/httpvalidationerror.py | 23 + src/mistralai/models/jobin.py | 73 ++ src/mistralai/models/jobmetadataout.py | 54 + src/mistralai/models/jobout.py | 107 ++ src/mistralai/models/jobs.py | 100 -- ..._fine_tuning_archive_fine_tuned_modelop.py | 18 + ...es_fine_tuning_cancel_fine_tuning_jobop.py | 18 + ...es_fine_tuning_create_fine_tuning_jobop.py | 15 + ...outes_fine_tuning_get_fine_tuning_jobop.py | 18 + ...utes_fine_tuning_get_fine_tuning_jobsop.py | 81 ++ ...tes_fine_tuning_start_fine_tuning_jobop.py | 16 + ...ine_tuning_unarchive_fine_tuned_modelop.py | 18 + ...s_fine_tuning_update_fine_tuned_modelop.py | 21 + src/mistralai/models/jobsout.py | 20 + src/mistralai/models/legacyjobmetadataout.py | 80 ++ src/mistralai/models/listfilesout.py | 17 + src/mistralai/models/metricout.py | 50 + src/mistralai/models/modelcapabilities.py | 21 + src/mistralai/models/modelcard.py | 66 ++ src/mistralai/models/modellist.py | 18 + src/mistralai/models/models.py | 39 - src/mistralai/models/responseformat.py | 18 + ...retrieve_model_v1_models_model_id_getop.py | 18 + src/mistralai/models/retrievefileout.py | 71 ++ src/mistralai/models/sampletype.py | 7 + src/mistralai/models/sdkerror.py | 22 + src/mistralai/models/security.py | 16 + src/mistralai/models/source.py | 7 + src/mistralai/models/systemmessage.py | 26 + src/mistralai/models/textchunk.py | 17 + src/mistralai/models/tool.py | 18 + src/mistralai/models/toolcall.py | 20 + src/mistralai/models/toolmessage.py | 50 + src/mistralai/models/trainingfile.py | 17 + src/mistralai/models/trainingparameters.py | 48 + src/mistralai/models/trainingparametersin.py | 56 + src/mistralai/models/unarchiveftmodelout.py | 19 + src/mistralai/models/updateftmodelin.py | 44 + src/mistralai/models/uploadfileout.py | 71 ++ src/mistralai/models/usageinfo.py | 18 + src/mistralai/models/usermessage.py | 26 + src/mistralai/models/validationerror.py | 24 + src/mistralai/models/wandbintegration.py | 56 + src/mistralai/models/wandbintegrationout.py | 52 + src/mistralai/models_.py | 928 +++++++++++++++++ src/mistralai/py.typed | 1 + src/mistralai/sdk.py | 119 +++ src/mistralai/sdkconfiguration.py | 54 + src/mistralai/types/__init__.py | 21 + src/mistralai/types/basemodel.py | 39 + src/mistralai/utils/__init__.py | 86 ++ src/mistralai/utils/annotations.py | 19 + src/mistralai/utils/enums.py | 34 + src/mistralai/utils/eventstreaming.py | 178 ++++ src/mistralai/utils/forms.py | 207 ++++ src/mistralai/utils/headers.py | 136 +++ src/mistralai/utils/logger.py | 16 + src/mistralai/utils/metadata.py | 118 +++ src/mistralai/utils/queryparams.py | 203 ++++ src/mistralai/utils/requestbodies.py | 66 ++ src/mistralai/utils/retries.py | 216 ++++ src/mistralai/utils/security.py | 185 ++++ src/mistralai/utils/serializers.py | 181 ++++ src/mistralai/utils/url.py | 150 +++ src/mistralai/utils/values.py | 128 +++ tests/__init__.py | 0 tests/conftest.py | 19 - tests/test_chat.py | 149 --- tests/test_chat_async.py | 157 --- tests/test_completion.py | 99 -- tests/test_delete_model.py | 26 - tests/test_delete_model_async.py | 28 - tests/test_embedder.py | 66 -- tests/test_embedder_async.py | 69 -- tests/test_files.py | 105 -- tests/test_files_async.py | 110 -- tests/test_jobs.py | 128 --- tests/test_jobs_async.py | 133 --- tests/test_list_models.py | 30 - tests/test_list_models_async.py | 32 - tests/utils.py | 335 ------ 545 files changed, 30004 insertions(+), 3720 deletions(-) create mode 100644 .genignore create mode 100644 .gitattributes delete mode 100644 .github/workflows/build_publish.yaml create mode 100644 .github/workflows/sdk_generation_mistralai_azure_sdk.yaml create mode 100644 .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml create mode 100644 .github/workflows/sdk_generation_mistralai_sdk.yaml create mode 100644 .github/workflows/sdk_publish_mistralai_sdk.yaml create mode 100644 .speakeasy/gen.lock create mode 100644 .speakeasy/gen.yaml create mode 100644 .speakeasy/workflow.lock create mode 100644 .speakeasy/workflow.yaml create mode 100644 .vscode/settings.json create mode 100644 CONTRIBUTING.md create mode 100644 MIGRATION.md delete mode 100644 Makefile create mode 100644 OLD-README.md create mode 100644 USAGE.md create mode 100644 docs/models/agentscompletionrequest.md create mode 100644 docs/models/agentscompletionrequestmessages.md create mode 100644 docs/models/agentscompletionrequeststop.md create mode 100644 docs/models/agentscompletionrequesttoolchoice.md create mode 100644 docs/models/agentscompletionstreamrequest.md create mode 100644 docs/models/agentscompletionstreamrequeststop.md create mode 100644 docs/models/archiveftmodelout.md create mode 100644 docs/models/arguments.md create mode 100644 docs/models/assistantmessage.md create mode 100644 docs/models/assistantmessagerole.md create mode 100644 docs/models/chatcompletionchoice.md create mode 100644 docs/models/chatcompletionrequest.md create mode 100644 docs/models/chatcompletionresponse.md create mode 100644 docs/models/chatcompletionstreamrequest.md create mode 100644 docs/models/chatcompletionstreamrequestmessages.md create mode 100644 docs/models/chatcompletionstreamrequeststop.md create mode 100644 docs/models/chatcompletionstreamrequesttoolchoice.md create mode 100644 docs/models/checkpointout.md create mode 100644 docs/models/completionchunk.md create mode 100644 docs/models/completionevent.md create mode 100644 docs/models/completionresponsestreamchoice.md create mode 100644 docs/models/completionresponsestreamchoicefinishreason.md create mode 100644 docs/models/content.md create mode 100644 docs/models/contentchunk.md create mode 100644 docs/models/deletefileout.md create mode 100644 docs/models/deletemodelout.md create mode 100644 docs/models/deletemodelv1modelsmodeliddeleterequest.md create mode 100644 docs/models/deltamessage.md create mode 100644 docs/models/detailedjobout.md create mode 100644 docs/models/detailedjoboutstatus.md create mode 100644 docs/models/embeddingrequest.md create mode 100644 docs/models/embeddingresponse.md create mode 100644 docs/models/embeddingresponsedata.md create mode 100644 docs/models/eventout.md create mode 100644 docs/models/file.md create mode 100644 docs/models/filesapiroutesdeletefilerequest.md create mode 100644 docs/models/filesapiroutesretrievefilerequest.md create mode 100644 docs/models/filesapiroutesuploadfilemultipartbodyparams.md create mode 100644 docs/models/fileschema.md create mode 100644 docs/models/fimcompletionrequest.md create mode 100644 docs/models/fimcompletionrequeststop.md create mode 100644 docs/models/fimcompletionresponse.md create mode 100644 docs/models/fimcompletionstreamrequest.md create mode 100644 docs/models/fimcompletionstreamrequeststop.md create mode 100644 docs/models/finetuneablemodel.md create mode 100644 docs/models/finishreason.md create mode 100644 docs/models/ftmodelcapabilitiesout.md create mode 100644 docs/models/ftmodelout.md create mode 100644 docs/models/function.md create mode 100644 docs/models/functioncall.md create mode 100644 docs/models/githubrepositoryin.md create mode 100644 docs/models/githubrepositoryout.md create mode 100644 docs/models/httpvalidationerror.md create mode 100644 docs/models/inputs.md create mode 100644 docs/models/jobin.md create mode 100644 docs/models/jobmetadataout.md create mode 100644 docs/models/jobout.md create mode 100644 docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md create mode 100644 docs/models/jobsout.md create mode 100644 docs/models/legacyjobmetadataout.md create mode 100644 docs/models/listfilesout.md create mode 100644 docs/models/loc.md create mode 100644 docs/models/messages.md create mode 100644 docs/models/metricout.md create mode 100644 docs/models/modelcapabilities.md create mode 100644 docs/models/modelcard.md create mode 100644 docs/models/modellist.md create mode 100644 docs/models/queryparamstatus.md create mode 100644 docs/models/responseformat.md create mode 100644 docs/models/responseformats.md create mode 100644 docs/models/retrievefileout.md create mode 100644 docs/models/retrievemodelv1modelsmodelidgetrequest.md create mode 100644 docs/models/role.md create mode 100644 docs/models/sampletype.md create mode 100644 docs/models/security.md create mode 100644 docs/models/source.md create mode 100644 docs/models/status.md create mode 100644 docs/models/stop.md create mode 100644 docs/models/systemmessage.md create mode 100644 docs/models/textchunk.md create mode 100644 docs/models/tool.md create mode 100644 docs/models/toolcall.md create mode 100644 docs/models/toolchoice.md create mode 100644 docs/models/toolmessage.md create mode 100644 docs/models/toolmessagerole.md create mode 100644 docs/models/trainingfile.md create mode 100644 docs/models/trainingparameters.md create mode 100644 docs/models/trainingparametersin.md create mode 100644 docs/models/unarchiveftmodelout.md create mode 100644 docs/models/updateftmodelin.md create mode 100644 docs/models/uploadfileout.md create mode 100644 docs/models/usageinfo.md create mode 100644 docs/models/usermessage.md create mode 100644 docs/models/usermessagecontent.md create mode 100644 docs/models/usermessagerole.md create mode 100644 docs/models/utils/retryconfig.md create mode 100644 docs/models/validationerror.md create mode 100644 docs/models/wandbintegration.md create mode 100644 docs/models/wandbintegrationout.md create mode 100644 docs/sdks/agents/README.md create mode 100644 docs/sdks/chat/README.md create mode 100644 docs/sdks/embeddings/README.md create mode 100644 docs/sdks/files/README.md create mode 100644 docs/sdks/fim/README.md create mode 100644 docs/sdks/finetuning/README.md create mode 100644 docs/sdks/jobs/README.md create mode 100644 docs/sdks/mistral/README.md create mode 100644 docs/sdks/models/README.md create mode 100755 examples/async_agents_no_streaming.py rename examples/{completion.py => async_code_completion.py} (78%) delete mode 100644 examples/async_completion.py create mode 100644 examples/azure/chat_no_streaming.py.py create mode 100755 examples/gcp/async_chat_no_streaming.py create mode 100644 packages/mistralai_azure/.genignore create mode 100644 packages/mistralai_azure/.gitattributes create mode 100644 packages/mistralai_azure/.gitignore create mode 100644 packages/mistralai_azure/.speakeasy/gen.lock create mode 100644 packages/mistralai_azure/.speakeasy/gen.yaml create mode 100644 packages/mistralai_azure/.vscode/settings.json create mode 100644 packages/mistralai_azure/CONTRIBUTING.md create mode 100644 packages/mistralai_azure/README.md create mode 100644 packages/mistralai_azure/USAGE.md create mode 100644 packages/mistralai_azure/docs/models/arguments.md create mode 100644 packages/mistralai_azure/docs/models/assistantmessage.md create mode 100644 packages/mistralai_azure/docs/models/assistantmessagerole.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionchoice.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequest.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequeststop.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionresponse.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md create mode 100644 packages/mistralai_azure/docs/models/completionchunk.md create mode 100644 packages/mistralai_azure/docs/models/completionevent.md create mode 100644 packages/mistralai_azure/docs/models/completionresponsestreamchoice.md create mode 100644 packages/mistralai_azure/docs/models/content.md create mode 100644 packages/mistralai_azure/docs/models/contentchunk.md create mode 100644 packages/mistralai_azure/docs/models/deltamessage.md create mode 100644 packages/mistralai_azure/docs/models/finishreason.md create mode 100644 packages/mistralai_azure/docs/models/function.md create mode 100644 packages/mistralai_azure/docs/models/functioncall.md create mode 100644 packages/mistralai_azure/docs/models/httpvalidationerror.md create mode 100644 packages/mistralai_azure/docs/models/loc.md create mode 100644 packages/mistralai_azure/docs/models/messages.md create mode 100644 packages/mistralai_azure/docs/models/responseformat.md create mode 100644 packages/mistralai_azure/docs/models/responseformats.md create mode 100644 packages/mistralai_azure/docs/models/role.md create mode 100644 packages/mistralai_azure/docs/models/security.md create mode 100644 packages/mistralai_azure/docs/models/stop.md create mode 100644 packages/mistralai_azure/docs/models/systemmessage.md create mode 100644 packages/mistralai_azure/docs/models/textchunk.md create mode 100644 packages/mistralai_azure/docs/models/tool.md create mode 100644 packages/mistralai_azure/docs/models/toolcall.md create mode 100644 packages/mistralai_azure/docs/models/toolchoice.md create mode 100644 packages/mistralai_azure/docs/models/toolmessage.md create mode 100644 packages/mistralai_azure/docs/models/toolmessagerole.md create mode 100644 packages/mistralai_azure/docs/models/usageinfo.md create mode 100644 packages/mistralai_azure/docs/models/usermessage.md create mode 100644 packages/mistralai_azure/docs/models/usermessagecontent.md create mode 100644 packages/mistralai_azure/docs/models/usermessagerole.md create mode 100644 packages/mistralai_azure/docs/models/utils/retryconfig.md create mode 100644 packages/mistralai_azure/docs/models/validationerror.md create mode 100644 packages/mistralai_azure/docs/sdks/chat/README.md create mode 100644 packages/mistralai_azure/docs/sdks/mistralazure/README.md create mode 100644 packages/mistralai_azure/poetry.lock create mode 100644 packages/mistralai_azure/poetry.toml create mode 100644 packages/mistralai_azure/py.typed create mode 100644 packages/mistralai_azure/pylintrc create mode 100644 packages/mistralai_azure/pyproject.toml create mode 100755 packages/mistralai_azure/scripts/compile.sh create mode 100755 packages/mistralai_azure/scripts/publish.sh create mode 100644 packages/mistralai_azure/src/mistralai_azure/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/types.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/basesdk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/chat.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/httpclient.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/completionevent.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/function.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/functioncall.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/responseformat.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/security.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/textchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/tool.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/toolcall.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/usermessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/validationerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/py.typed create mode 100644 packages/mistralai_azure/src/mistralai_azure/sdk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/types/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/types/basemodel.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/annotations.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/enums.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/forms.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/headers.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/logger.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/metadata.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/retries.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/security.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/serializers.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/url.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/values.py create mode 100644 packages/mistralai_gcp/.genignore create mode 100644 packages/mistralai_gcp/.gitattributes create mode 100644 packages/mistralai_gcp/.gitignore create mode 100644 packages/mistralai_gcp/.speakeasy/gen.lock create mode 100644 packages/mistralai_gcp/.speakeasy/gen.yaml create mode 100644 packages/mistralai_gcp/.vscode/settings.json create mode 100644 packages/mistralai_gcp/CONTRIBUTING.md create mode 100644 packages/mistralai_gcp/README.md create mode 100644 packages/mistralai_gcp/USAGE.md create mode 100644 packages/mistralai_gcp/docs/models/arguments.md create mode 100644 packages/mistralai_gcp/docs/models/assistantmessage.md create mode 100644 packages/mistralai_gcp/docs/models/assistantmessagerole.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionchoice.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequest.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionresponse.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md create mode 100644 packages/mistralai_gcp/docs/models/completionchunk.md create mode 100644 packages/mistralai_gcp/docs/models/completionevent.md create mode 100644 packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md create mode 100644 packages/mistralai_gcp/docs/models/content.md create mode 100644 packages/mistralai_gcp/docs/models/contentchunk.md create mode 100644 packages/mistralai_gcp/docs/models/deltamessage.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionrequest.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionresponse.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md create mode 100644 packages/mistralai_gcp/docs/models/finishreason.md create mode 100644 packages/mistralai_gcp/docs/models/function.md create mode 100644 packages/mistralai_gcp/docs/models/functioncall.md create mode 100644 packages/mistralai_gcp/docs/models/httpvalidationerror.md create mode 100644 packages/mistralai_gcp/docs/models/loc.md create mode 100644 packages/mistralai_gcp/docs/models/messages.md create mode 100644 packages/mistralai_gcp/docs/models/responseformat.md create mode 100644 packages/mistralai_gcp/docs/models/responseformats.md create mode 100644 packages/mistralai_gcp/docs/models/role.md create mode 100644 packages/mistralai_gcp/docs/models/security.md create mode 100644 packages/mistralai_gcp/docs/models/stop.md create mode 100644 packages/mistralai_gcp/docs/models/systemmessage.md create mode 100644 packages/mistralai_gcp/docs/models/textchunk.md create mode 100644 packages/mistralai_gcp/docs/models/tool.md create mode 100644 packages/mistralai_gcp/docs/models/toolcall.md create mode 100644 packages/mistralai_gcp/docs/models/toolchoice.md create mode 100644 packages/mistralai_gcp/docs/models/toolmessage.md create mode 100644 packages/mistralai_gcp/docs/models/toolmessagerole.md create mode 100644 packages/mistralai_gcp/docs/models/usageinfo.md create mode 100644 packages/mistralai_gcp/docs/models/usermessage.md create mode 100644 packages/mistralai_gcp/docs/models/usermessagecontent.md create mode 100644 packages/mistralai_gcp/docs/models/usermessagerole.md create mode 100644 packages/mistralai_gcp/docs/models/utils/retryconfig.md create mode 100644 packages/mistralai_gcp/docs/models/validationerror.md create mode 100644 packages/mistralai_gcp/docs/sdks/chat/README.md create mode 100644 packages/mistralai_gcp/docs/sdks/fim/README.md create mode 100644 packages/mistralai_gcp/docs/sdks/mistralgcp/README.md create mode 100644 packages/mistralai_gcp/poetry.lock create mode 100644 packages/mistralai_gcp/poetry.toml create mode 100644 packages/mistralai_gcp/py.typed create mode 100644 packages/mistralai_gcp/pylintrc create mode 100644 packages/mistralai_gcp/pyproject.toml create mode 100755 packages/mistralai_gcp/scripts/compile.sh create mode 100755 packages/mistralai_gcp/scripts/publish.sh create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/basesdk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/chat.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/fim.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/httpclient.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/function.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/security.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/tool.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/py.typed create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/sdk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/security.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/url.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/values.py create mode 100644 poetry.toml create mode 100644 py.typed create mode 100644 pylintrc create mode 100755 scripts/compile.sh create mode 100755 scripts/publish.sh create mode 100644 src/mistralai/_hooks/__init__.py create mode 100644 src/mistralai/_hooks/custom_user_agent.py create mode 100644 src/mistralai/_hooks/deprecation_warning.py create mode 100644 src/mistralai/_hooks/registration.py create mode 100644 src/mistralai/_hooks/sdkhooks.py create mode 100644 src/mistralai/_hooks/types.py create mode 100644 src/mistralai/agents.py create mode 100644 src/mistralai/basesdk.py create mode 100644 src/mistralai/chat.py delete mode 100644 src/mistralai/client_base.py delete mode 100644 src/mistralai/constants.py create mode 100644 src/mistralai/embeddings.py delete mode 100644 src/mistralai/exceptions.py create mode 100644 src/mistralai/fim.py create mode 100644 src/mistralai/fine_tuning.py create mode 100644 src/mistralai/httpclient.py create mode 100644 src/mistralai/models/agentscompletionrequest.py create mode 100644 src/mistralai/models/agentscompletionstreamrequest.py create mode 100644 src/mistralai/models/archiveftmodelout.py create mode 100644 src/mistralai/models/assistantmessage.py delete mode 100644 src/mistralai/models/chat_completion.py create mode 100644 src/mistralai/models/chatcompletionchoice.py create mode 100644 src/mistralai/models/chatcompletionrequest.py create mode 100644 src/mistralai/models/chatcompletionresponse.py create mode 100644 src/mistralai/models/chatcompletionstreamrequest.py create mode 100644 src/mistralai/models/checkpointout.py delete mode 100644 src/mistralai/models/common.py create mode 100644 src/mistralai/models/completionchunk.py create mode 100644 src/mistralai/models/completionevent.py create mode 100644 src/mistralai/models/completionresponsestreamchoice.py create mode 100644 src/mistralai/models/contentchunk.py create mode 100644 src/mistralai/models/delete_model_v1_models_model_id_deleteop.py create mode 100644 src/mistralai/models/deletefileout.py create mode 100644 src/mistralai/models/deletemodelout.py create mode 100644 src/mistralai/models/deltamessage.py create mode 100644 src/mistralai/models/detailedjobout.py create mode 100644 src/mistralai/models/embeddingrequest.py create mode 100644 src/mistralai/models/embeddingresponse.py create mode 100644 src/mistralai/models/embeddingresponsedata.py delete mode 100644 src/mistralai/models/embeddings.py create mode 100644 src/mistralai/models/eventout.py delete mode 100644 src/mistralai/models/files.py create mode 100644 src/mistralai/models/files_api_routes_delete_fileop.py create mode 100644 src/mistralai/models/files_api_routes_retrieve_fileop.py create mode 100644 src/mistralai/models/files_api_routes_upload_fileop.py create mode 100644 src/mistralai/models/fileschema.py create mode 100644 src/mistralai/models/fimcompletionrequest.py create mode 100644 src/mistralai/models/fimcompletionresponse.py create mode 100644 src/mistralai/models/fimcompletionstreamrequest.py create mode 100644 src/mistralai/models/finetuneablemodel.py create mode 100644 src/mistralai/models/ftmodelcapabilitiesout.py create mode 100644 src/mistralai/models/ftmodelout.py create mode 100644 src/mistralai/models/function.py create mode 100644 src/mistralai/models/functioncall.py create mode 100644 src/mistralai/models/githubrepositoryin.py create mode 100644 src/mistralai/models/githubrepositoryout.py create mode 100644 src/mistralai/models/httpvalidationerror.py create mode 100644 src/mistralai/models/jobin.py create mode 100644 src/mistralai/models/jobmetadataout.py create mode 100644 src/mistralai/models/jobout.py delete mode 100644 src/mistralai/models/jobs.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py create mode 100644 src/mistralai/models/jobsout.py create mode 100644 src/mistralai/models/legacyjobmetadataout.py create mode 100644 src/mistralai/models/listfilesout.py create mode 100644 src/mistralai/models/metricout.py create mode 100644 src/mistralai/models/modelcapabilities.py create mode 100644 src/mistralai/models/modelcard.py create mode 100644 src/mistralai/models/modellist.py delete mode 100644 src/mistralai/models/models.py create mode 100644 src/mistralai/models/responseformat.py create mode 100644 src/mistralai/models/retrieve_model_v1_models_model_id_getop.py create mode 100644 src/mistralai/models/retrievefileout.py create mode 100644 src/mistralai/models/sampletype.py create mode 100644 src/mistralai/models/sdkerror.py create mode 100644 src/mistralai/models/security.py create mode 100644 src/mistralai/models/source.py create mode 100644 src/mistralai/models/systemmessage.py create mode 100644 src/mistralai/models/textchunk.py create mode 100644 src/mistralai/models/tool.py create mode 100644 src/mistralai/models/toolcall.py create mode 100644 src/mistralai/models/toolmessage.py create mode 100644 src/mistralai/models/trainingfile.py create mode 100644 src/mistralai/models/trainingparameters.py create mode 100644 src/mistralai/models/trainingparametersin.py create mode 100644 src/mistralai/models/unarchiveftmodelout.py create mode 100644 src/mistralai/models/updateftmodelin.py create mode 100644 src/mistralai/models/uploadfileout.py create mode 100644 src/mistralai/models/usageinfo.py create mode 100644 src/mistralai/models/usermessage.py create mode 100644 src/mistralai/models/validationerror.py create mode 100644 src/mistralai/models/wandbintegration.py create mode 100644 src/mistralai/models/wandbintegrationout.py create mode 100644 src/mistralai/models_.py create mode 100644 src/mistralai/sdk.py create mode 100644 src/mistralai/sdkconfiguration.py create mode 100644 src/mistralai/types/__init__.py create mode 100644 src/mistralai/types/basemodel.py create mode 100644 src/mistralai/utils/__init__.py create mode 100644 src/mistralai/utils/annotations.py create mode 100644 src/mistralai/utils/enums.py create mode 100644 src/mistralai/utils/eventstreaming.py create mode 100644 src/mistralai/utils/forms.py create mode 100644 src/mistralai/utils/headers.py create mode 100644 src/mistralai/utils/logger.py create mode 100644 src/mistralai/utils/metadata.py create mode 100644 src/mistralai/utils/queryparams.py create mode 100644 src/mistralai/utils/requestbodies.py create mode 100644 src/mistralai/utils/retries.py create mode 100644 src/mistralai/utils/security.py create mode 100644 src/mistralai/utils/serializers.py create mode 100644 src/mistralai/utils/url.py create mode 100644 src/mistralai/utils/values.py delete mode 100644 tests/__init__.py delete mode 100644 tests/conftest.py delete mode 100644 tests/test_chat.py delete mode 100644 tests/test_chat_async.py delete mode 100644 tests/test_completion.py delete mode 100644 tests/test_delete_model.py delete mode 100644 tests/test_delete_model_async.py delete mode 100644 tests/test_embedder.py delete mode 100644 tests/test_embedder_async.py delete mode 100644 tests/test_files.py delete mode 100644 tests/test_files_async.py delete mode 100644 tests/test_jobs.py delete mode 100644 tests/test_jobs_async.py delete mode 100644 tests/test_list_models.py delete mode 100644 tests/test_list_models_async.py delete mode 100644 tests/utils.py diff --git a/.genignore b/.genignore new file mode 100644 index 0000000..1186de6 --- /dev/null +++ b/.genignore @@ -0,0 +1,2 @@ +pyproject.toml +examples/* \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..4d75d59 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml deleted file mode 100644 index a696f10..0000000 --- a/.github/workflows/build_publish.yaml +++ /dev/null @@ -1,90 +0,0 @@ -name: Lint / Test / Publish - -on: - push: - branches: ["main"] - - # We only deploy on tags and main branch - tags: - # Only run on tags that match the following regex - # This will match tags like 1.0.0, 1.0.1, etc. - - "[0-9]+.[0-9]+.[0-9]+" - - # Lint and test on pull requests - pull_request: - -jobs: - lint_and_test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] - steps: - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # Ruff - - name: Ruff check - run: | - poetry run ruff check . - - - name: Ruff check - run: | - poetry run ruff format . --check - - # Mypy - - name: Mypy Check - run: | - poetry run mypy . - - # Tests - - name: Run Tests - run: | - poetry run pytest . - - publish: - if: startsWith(github.ref, 'refs/tags') - runs-on: ubuntu-latest - needs: lint_and_test - steps: - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # build package using poetry - - name: Build Package - run: | - poetry build - - # Publish to PyPi - - name: Pypi publish - run: | - poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} - poetry publish diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml new file mode 100644 index 0000000..7ec5bb8 --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -0,0 +1,29 @@ +name: Generate MISTRAL-PYTHON-SDK-AZURE +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistral-python-sdk-azure + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml new file mode 100644 index 0000000..c4da64f --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -0,0 +1,29 @@ +name: Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistral-python-sdk-google-cloud + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml new file mode 100644 index 0000000..7d0540e --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -0,0 +1,29 @@ +name: Generate MISTRALAI +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistralai-sdk + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml new file mode 100644 index 0000000..8716024 --- /dev/null +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -0,0 +1,20 @@ +name: Publish MISTRALAI-SDK +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + push: + branches: + - main + paths: + - RELEASES.md + - '*/RELEASES.md' +jobs: + publish: + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@v15 + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.gitignore b/.gitignore index 1e27b2b..999b933 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,14 @@ +.venv/ +pyrightconfig.json +src/*.egg-info/ +.python-version +.DS_Store # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class - # C extensions *.so - # Distribution / packaging .Python build/ @@ -25,17 +28,14 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST - # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec - # Installer logs pip-log.txt pip-delete-this-directory.txt - # Unit test / coverage reports htmlcov/ .tox/ @@ -50,75 +50,61 @@ coverage.xml .hypothesis/ .pytest_cache/ cover/ - # Translations *.mo *.pot - # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal - # Flask stuff: instance/ .webassets-cache - # Scrapy stuff: .scrapy - # Sphinx documentation docs/_build/ - # PyBuilder .pybuilder/ target/ - # Jupyter Notebook .ipynb_checkpoints - # IPython profile_default/ ipython_config.py - # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version - # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock - # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock - # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. -# https://pdm.fming.dev/#use-with-ide +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control .pdm.toml - +.pdm-python +.pdm-build/ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ - # Celery stuff celerybeat-schedule celerybeat.pid - # SageMath parsed files *.sage.py - # Environments .env .venv @@ -127,38 +113,26 @@ venv/ ENV/ env.bak/ venv.bak/ - # Spyder project settings .spyderproject .spyproject - # Rope project settings .ropeproject - # mkdocs documentation /site - # mypy .mypy_cache/ .dmypy.json dmypy.json - # Pyre type checker .pyre/ - # pytype static type analyzer .pytype/ - # Cython debug symbols cython_debug/ - # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ - -scratch/ - -changes.diff \ No newline at end of file diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock new file mode 100644 index 0000000..cf86634 --- /dev/null +++ b/.speakeasy/gen.lock @@ -0,0 +1,272 @@ +lockVersion: 2.0.0 +id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 +management: + docChecksum: ab9fe4a3c278619e334a828e2c336554 + docVersion: 0.0.2 + speakeasyVersion: 1.356.0 + generationVersion: 2.388.1 + releaseVersion: 1.0.0-rc.2 + configChecksum: 09abab5b4ed374c8d48d4e9b9ca6eb65 + published: true +features: + python: + additionalDependencies: 1.0.0 + constsAndDefaults: 1.0.2 + core: 5.3.4 + defaultEnabledRetries: 0.2.0 + envVarSecurityUsage: 0.3.1 + examples: 3.0.0 + flatRequests: 1.0.1 + flattening: 3.0.0 + globalSecurity: 3.0.1 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.0.0 + multipartFileContentType: 1.0.0 + nameOverrides: 3.0.0 + nullables: 1.0.0 + responseFormat: 1.0.0 + retries: 3.0.0 + sdkHooks: 1.0.0 + serverEvents: 1.0.2 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.0.1 + uploadStreams: 1.0.0 +generatedFiles: + - src/mistralai/sdkconfiguration.py + - src/mistralai/models_.py + - src/mistralai/files.py + - src/mistralai/jobs.py + - src/mistralai/fine_tuning.py + - src/mistralai/chat.py + - src/mistralai/fim.py + - src/mistralai/agents.py + - src/mistralai/embeddings.py + - src/mistralai/sdk.py + - .vscode/settings.json + - poetry.toml + - py.typed + - pylintrc + - scripts/compile.sh + - scripts/publish.sh + - src/mistralai/__init__.py + - src/mistralai/basesdk.py + - src/mistralai/httpclient.py + - src/mistralai/py.typed + - src/mistralai/types/__init__.py + - src/mistralai/types/basemodel.py + - src/mistralai/utils/__init__.py + - src/mistralai/utils/annotations.py + - src/mistralai/utils/enums.py + - src/mistralai/utils/eventstreaming.py + - src/mistralai/utils/forms.py + - src/mistralai/utils/headers.py + - src/mistralai/utils/logger.py + - src/mistralai/utils/metadata.py + - src/mistralai/utils/queryparams.py + - src/mistralai/utils/requestbodies.py + - src/mistralai/utils/retries.py + - src/mistralai/utils/security.py + - src/mistralai/utils/serializers.py + - src/mistralai/utils/url.py + - src/mistralai/utils/values.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/modellist.py + - src/mistralai/models/modelcard.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/ftmodelout.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/source.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/jobout.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/finetuneablemodel.py + - src/mistralai/models/trainingparameters.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/jobin.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/trainingparametersin.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/detailedjobout.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/metricout.py + - src/mistralai/models/eventout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/tool.py + - src/mistralai/models/function.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/security.py + - src/mistralai/models/__init__.py + - docs/models/modellist.md + - docs/models/modelcard.md + - docs/models/modelcapabilities.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/validationerror.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/ftmodelout.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/updateftmodelin.md + - docs/models/archiveftmodelout.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/unarchiveftmodelout.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/uploadfileout.md + - docs/models/source.md + - docs/models/sampletype.md + - docs/models/file.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/listfilesout.md + - docs/models/fileschema.md + - docs/models/retrievefileout.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/deletefileout.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/jobsout.md + - docs/models/status.md + - docs/models/jobout.md + - docs/models/jobmetadataout.md + - docs/models/githubrepositoryout.md + - docs/models/wandbintegrationout.md + - docs/models/finetuneablemodel.md + - docs/models/trainingparameters.md + - docs/models/queryparamstatus.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/legacyjobmetadataout.md + - docs/models/jobin.md + - docs/models/githubrepositoryin.md + - docs/models/wandbintegration.md + - docs/models/trainingparametersin.md + - docs/models/trainingfile.md + - docs/models/detailedjoboutstatus.md + - docs/models/detailedjobout.md + - docs/models/checkpointout.md + - docs/models/metricout.md + - docs/models/eventout.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/chatcompletionresponse.md + - docs/models/finishreason.md + - docs/models/chatcompletionchoice.md + - docs/models/assistantmessagerole.md + - docs/models/assistantmessage.md + - docs/models/toolcall.md + - docs/models/arguments.md + - docs/models/functioncall.md + - docs/models/usageinfo.md + - docs/models/stop.md + - docs/models/messages.md + - docs/models/toolchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/tool.md + - docs/models/function.md + - docs/models/responseformats.md + - docs/models/responseformat.md + - docs/models/content.md + - docs/models/role.md + - docs/models/systemmessage.md + - docs/models/contentchunk.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/usermessage.md + - docs/models/textchunk.md + - docs/models/toolmessagerole.md + - docs/models/toolmessage.md + - docs/models/completionevent.md + - docs/models/completionchunk.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completionresponsestreamchoice.md + - docs/models/deltamessage.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/inputs.md + - docs/models/embeddingrequest.md + - docs/models/security.md + - docs/sdks/mistral/README.md + - docs/models/utils/retryconfig.md + - docs/sdks/models/README.md + - docs/sdks/files/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/chat/README.md + - docs/sdks/fim/README.md + - docs/sdks/agents/README.md + - docs/sdks/embeddings/README.md + - USAGE.md + - .gitattributes + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/_hooks/__init__.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml new file mode 100644 index 0000000..13a2538 --- /dev/null +++ b/.speakeasy/gen.yaml @@ -0,0 +1,42 @@ +configVersion: 2.0.0 +generation: + sdkClassName: Mistral + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + auth: + oAuth2ClientCredentialsEnabled: true +python: + version: 1.0.0-rc.2 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + authors: + - Mistral + clientServerStatusCodesAsErrors: true + description: Python Client SDK for the Mistral AI API. + enumFormat: union + envVarPrefix: MISTRAL + flattenGlobalSecurity: true + flattenRequests: true + imports: + option: openapi + paths: + callbacks: "" + errors: "" + operations: "" + shared: "" + webhooks: "" + inputModelSuffix: input + maxMethodParams: 4 + methodArguments: infer-optional-args + outputModelSuffix: output + packageName: mistralai + responseFormat: flat + templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock new file mode 100644 index 0000000..47bfbe0 --- /dev/null +++ b/.speakeasy/workflow.lock @@ -0,0 +1,52 @@ +speakeasyVersion: 1.356.0 +sources: {} +targets: + mistralai-azure-sdk: + source: mistral-azure-source + outLocation: ./packages/mistralai_azure + mistralai-gcp-sdk: + source: mistral-google-cloud-source + outLocation: ./packages/mistralai_gcp + mistralai-sdk: + source: mistral-openapi + outLocation: /Users/gaspard/public-mistral/client-python +workflow: + workflowVersion: 1.0.0 + speakeasyVersion: latest + sources: + mistral-azure-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + mistral-google-cloud-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + mistral-openapi: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + targets: + mistralai-azure-sdk: + target: python + source: mistral-azure-source + output: ./packages/mistralai_azure + publish: + pypi: + token: $pypi_token + mistralai-gcp-sdk: + target: python + source: mistral-google-cloud-source + output: ./packages/mistralai_gcp + publish: + pypi: + token: $pypi_token + mistralai-sdk: + target: python + source: mistral-openapi + publish: + pypi: + token: $pypi_token diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml new file mode 100644 index 0000000..4076ff3 --- /dev/null +++ b/.speakeasy/workflow.yaml @@ -0,0 +1,39 @@ +workflowVersion: 1.0.0 +speakeasyVersion: latest +sources: + mistral-azure-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + mistral-google-cloud-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + mistral-openapi: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi +targets: + mistralai-azure-sdk: + target: python + source: mistral-azure-source + output: ./packages/mistralai_azure + publish: + pypi: + token: $pypi_token + mistralai-gcp-sdk: + target: python + source: mistral-google-cloud-source + output: ./packages/mistralai_gcp + publish: + pypi: + token: $pypi_token + mistralai-sdk: + target: python + source: mistral-openapi + publish: + pypi: + token: $pypi_token diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..8d79f0a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": ["tests", "-vv"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "pylint.args": ["--rcfile=pylintrc"] +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..d585717 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/LICENSE b/LICENSE index 261eeb9..bec1276 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2024 Mistral AI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 0000000..6582b85 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,216 @@ + +# Migration Guide for MistralAI Client from 0.\*.\* to 1.0.0 + +We have made significant changes to the `mistralai` library to improve its usability and consistency. This guide will help you migrate your code from the old client to the new one. + +## Major Changes + +1. **Unified Client Class**: + - The `MistralClient` and `MistralAsyncClient` classes have been consolidated into a single `Mistral` class. + - This simplifies the API by providing a single entry point for both synchronous and asynchronous operations. + +2. **Method Names and Structure**: + - The method names and structure have been updated for better clarity and consistency. + - For example: + - `client.chat` is now `client.chat.complete` for non-streaming calls + - `client.chat_stream` is now `client.chat.stream` for streaming calls + - Async `client.chat` is now `client.chat.complete_async` for async non-streaming calls + - Async `client.chat_stream` is now `client.chat.stream_async` for async streaming calls + + +## Method changes + +### Sync + +| Old Methods | New Methods | +| -------------------------- | -------------------------------- | +| `MistralCLient` | `Mistral` | +| `client.chat` | `client.chat.complete` | +| `client.chat_stream` | `client.chat.stream` | +| `client.completions` | `client.fim.complete` | +| `client.completions_strem` | `client.fim.stream` | +| `client.embeddings` | `client.embeddings.create` | +| `client.list_models` | `client.models.list` | +| `client.delete_model` | `client.models.delete` | +| `client.files.create` | `client.files.upload` | +| `client.files.list` | `client.files.list` | +| `client.files.retrieve` | `client.files.retrieve` | +| `client.files.delete` | `client.files.delete` | +| `client.jobs.create` | `client.fine_tuning.jobs.create` | +| `client.jobs.list` | `client.fine_tuning.jobs.list` | +| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | +| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | + +### Async + +| Old Methods | New Methods | +| -------------------------------- | -------------------------------------- | +| `MistralAsyncClient` | `Mistral` | +| `async_client.chat` | `client.chat.complete_async` | +| `async_client.chat_stream` | `client.chat.stream_async` | +| `async_client.completions` | `client.fim.complete_async` | +| `async_client.completions_strem` | `client.fim.stream_async` | +| `async_client.embeddings` | `client.embeddings.create_async` | +| `async_client.list_models` | `client.models.list_async` | +| `async_client.delete_model` | `client.models.delete_async` | +| `async_client.files.create` | `client.files.upload_async` | +| `async_client.files.list` | `client.files.list_async` | +| `async_client.files.retrieve` | `client.files.retrieve_async` | +| `async_client.files.delete` | `client.files.delete_async` | +| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | +| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | +| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | +| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | + +### Message Changes + +The `ChatMessage` class has been replaced with a more flexible system. You can now use the `SystemMessage`, `UserMessage`, `AssistantMessage`, and `ToolMessage` classes to create messages. + +The return object of the stream call methods have been modified to `chunk.data.choices[0].delta.content` from `chunk.choices[0].delta.content`. + +## Example Migrations + +### Example 1: Non-Streaming Chat + +**Old:** +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralClient(api_key=api_key) + +messages = [ + ChatMessage(role="user", content="What is the best French cheese?") +] + +# No streaming +chat_response = client.chat( + model=model, + messages=messages, +) + +print(chat_response.choices[0].message.content) +``` + +**New:** + +```python +import os +from mistralai import Mistral, UserMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = Mistral(api_key=api_key) + + messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, + ] + +chat_response = client.chat.complete( + model = model, + messages = messages, +) + +print(chat_response.choices[0].message.content) +``` + +### Example 2: Streaming Chat + +**Old:** + +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralClient(api_key=api_key) + +messages = [ + ChatMessage(role="user", content="What is the best French cheese?") +] + +# With streaming +stream_response = client.chat_stream(model=model, messages=messages) + +for chunk in stream_response: + print(chunk.choices[0].delta.content) +``` +**New:** +```python +from mistralai import Mistral, UserMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = Mistral(api_key=api_key) + +messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, +] + +stream_response = client.chat.stream_async( + model = model, + messages = messages, +) + +for chunk in stream_response: + print(chunk.data.choices[0].delta.content) +``` + +### Example 3: Async + +**Old:** +```python +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralAsyncClient(api_key=api_key) + +messages = [ + ChatMessage(role="user", content="What is the best French cheese?") +] + +# With async +async_response = client.chat_stream(model=model, messages=messages) + +async for chunk in async_response: + print(chunk.choices[0].delta.content) +``` + +**New:** +```python +from mistralai import Mistral, UserMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = Mistral(api_key=api_key) + +messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, +] + +# With async +async_response = await client.chat.stream_async(model=model, messages=messages) + +async for chunk in async_response: + print(chunk.data.choices[0].delta.content) +``` diff --git a/Makefile b/Makefile deleted file mode 100644 index 188291f..0000000 --- a/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -.PHONY: lint - -lint: - poetry run ruff check --fix . - poetry run ruff format . - poetry run mypy . diff --git a/OLD-README.md b/OLD-README.md new file mode 100644 index 0000000..22967f9 --- /dev/null +++ b/OLD-README.md @@ -0,0 +1,62 @@ +# Mistral Python Client + +This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) + +You can use the Mistral Python client to interact with the Mistral AI API. + +## Installing + +```bash +pip install mistralai +``` + +### From Source + +This client uses `poetry` as a dependency and virtual environment manager. + +You can install poetry with + +```bash +pip install poetry +``` + +`poetry` will set up a virtual environment and install dependencies with the following command: + +```bash +poetry install +``` + +## Run examples + +You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. + +### API Key Setup + +Running the examples requires a Mistral AI API key. + +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. + +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + +### Using poetry run + +```bash +cd examples +poetry run python chat_no_streaming.py +``` + +### Using poetry shell + +```bash +poetry shell +cd examples + +>> python chat_no_streaming.py +``` diff --git a/README.md b/README.md index 22967f9..d207a89 100644 --- a/README.md +++ b/README.md @@ -1,62 +1,672 @@ # Mistral Python Client -This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) +## Migration warning + +This documentation is for Mistral AI SDK v1. You can find more details on how to migrate from v0 to v1 [here](MIGRATION.md) -You can use the Mistral Python client to interact with the Mistral AI API. +## API Key Setup -## Installing +Before you begin, you will need a Mistral AI API key. + +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + + +## SDK Installation + +PIP ```bash pip install mistralai ``` -### From Source +Poetry +```bash +poetry add mistralai +``` + -This client uses `poetry` as a dependency and virtual environment manager. + +## SDK Example Usage -You can install poetry with +### Create Chat Completions -```bash -pip install poetry +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + # handle response + pass ``` -`poetry` will set up a virtual environment and install dependencies with the following command: +
-```bash -poetry install +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Upload a file + +This example shows how to upload a file. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.files.upload_async(file={ + "file_name": "your_file_here", + "content": open("", "rb"), + }) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Create Agents Completions + +This example shows how to create agents completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.agents.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + if res is not None: + # handle response + pass + +asyncio.run(main()) ``` + -## Run examples + +### More examples You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. -### API Key Setup -Running the examples requires a Mistral AI API key. +## Providers' SDKs Example Usage -1. Get your own Mistral API Key: -2. Set your Mistral API Key as an environment variable. You only need to do this once. +### Azure AI -```bash -# set Mistral API Key (using zsh for example) -$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv +**Prerequisites** -# reload the environment (or just quit and open a new terminal) -$ source ~/.zshenv +Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/azure). + +```python +import asyncio +import os + +from mistralai_azure import MistralAzure + +client = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + +async def main() -> None: + res = await client.chat.complete_async( + max_tokens= 100, + temperature= 0.5, + messages= [ + { + "content": "Hello there!", + "role": "user" + } + ] + ) + print(res) + +asyncio.run(main()) ``` +The documentation for the Azure SDK is available [here](packages/mistralai_azure/README.md). + +### Google Cloud + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). -### Using poetry run +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running ```bash -cd examples -poetry run python chat_no_streaming.py +gcloud auth application-default login ``` -### Using poetry shell +**Step 1: Install** + +Install the extras dependencies specific to Google Cloud: ```bash -poetry shell -cd examples +pip install mistralai[gcp] +``` + +**Step 2: Example Usage** + +Here's a basic example to get you started. + +```python +import asyncio +from mistralai_gcp import MistralGoogleCloud + +client = MistralGoogleCloud() + + +async def main() -> None: + res = await client.chat.complete_async( + model= "mistral-small-2402", + messages= [ + { + "content": "Hello there!", + "role": "user" + } + ] + ) + print(res) + +asyncio.run(main()) +``` + +The documentation for the GCP SDK is available [here](packages/mistralai_gcp/README.md). + + + +## Available Resources and Operations + +### [models](docs/sdks/models/README.md) + +* [list](docs/sdks/models/README.md#list) - List Models +* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model +* [delete](docs/sdks/models/README.md#delete) - Delete Model +* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model +* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model +* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model + +### [files](docs/sdks/files/README.md) + +* [upload](docs/sdks/files/README.md#upload) - Upload File +* [list](docs/sdks/files/README.md#list) - List Files +* [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File +* [delete](docs/sdks/files/README.md#delete) - Delete File + + +### [fine_tuning.jobs](docs/sdks/jobs/README.md) + +* [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs +* [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job +* [get](docs/sdks/jobs/README.md#get) - Get Fine Tuning Job +* [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job +* [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job + +### [chat](docs/sdks/chat/README.md) + +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion + +### [fim](docs/sdks/fim/README.md) + +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion + +### [agents](docs/sdks/agents/README.md) + +* [complete](docs/sdks/agents/README.md#complete) - Chat Completion +* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion + +### [embeddings](docs/sdks/embeddings/README.md) + +* [create](docs/sdks/embeddings/README.md#create) - Embeddings + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.stream(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## File uploads + +Certain SDK methods accept file objects as part of a request body or multi-part request. It is possible and typically recommended to upload files as a stream rather than reading the entire contents into memory. This avoids excessive memory consumption and potentially crashing with out-of-memory errors when working with very large files. The following example demonstrates how to attach a file stream to a request. + +> [!TIP] +> +> For endpoints that handle file uploads bytes arrays can also be used. However, using streams is recommended for large files. +> + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass + +``` + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistral.utils import BackoffStrategy, RetryConfig +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list(, + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + +if res is not None: + # handle response + pass ->> python chat_no_streaming.py ``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistral.utils import BackoffStrategy, RetryConfig +from mistralai import Mistral +import os + +s = Mistral( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai import Mistral, models +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = None +try: + res = s.models.list() + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | + +#### Example + +```python +from mistralai import Mistral +import os + +s = Mistral( + server="prod", + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai import Mistral +import os + +s = Mistral( + server_url="https://api.mistral.ai", + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai import Mistral +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = Mistral(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai import Mistral +from mistralai.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = Mistral(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | Environment Variable | +| --------- | ---- | ----------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | + +To authenticate with the API the `null` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + + +## Debugging + +To emit debug logs for SDK requests and responses you can pass a logger object directly into your SDK object. + +```python +from mistralai import Mistral +import logging + +logging.basicConfig(level=logging.DEBUG) +s = Mistral(debug_logger=logging.getLogger("mistralai")) +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/USAGE.md b/USAGE.md new file mode 100644 index 0000000..aace195 --- /dev/null +++ b/USAGE.md @@ -0,0 +1,153 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Upload a file + +This example shows how to upload a file. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.files.upload_async(file={ + "file_name": "your_file_here", + "content": open("", "rb"), + }) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Create Agents Completions + +This example shows how to create agents completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.agents.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md new file mode 100644 index 0000000..2d0d672 --- /dev/null +++ b/docs/models/agentscompletionrequest.md @@ -0,0 +1,17 @@ +# AgentsCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/agentscompletionrequestmessages.md new file mode 100644 index 0000000..946ef46 --- /dev/null +++ b/docs/models/agentscompletionrequestmessages.md @@ -0,0 +1,23 @@ +# AgentsCompletionRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequeststop.md b/docs/models/agentscompletionrequeststop.md new file mode 100644 index 0000000..21ce6fb --- /dev/null +++ b/docs/models/agentscompletionrequeststop.md @@ -0,0 +1,19 @@ +# AgentsCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequesttoolchoice.md b/docs/models/agentscompletionrequesttoolchoice.md new file mode 100644 index 0000000..4d58fb7 --- /dev/null +++ b/docs/models/agentscompletionrequesttoolchoice.md @@ -0,0 +1,10 @@ +# AgentsCompletionRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md new file mode 100644 index 0000000..c318774 --- /dev/null +++ b/docs/models/agentscompletionstreamrequest.md @@ -0,0 +1,17 @@ +# AgentsCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequeststop.md b/docs/models/agentscompletionstreamrequeststop.md new file mode 100644 index 0000000..981005f --- /dev/null +++ b/docs/models/agentscompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# AgentsCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/archiveftmodelout.md b/docs/models/archiveftmodelout.md new file mode 100644 index 0000000..c2e8f8e --- /dev/null +++ b/docs/models/archiveftmodelout.md @@ -0,0 +1,10 @@ +# ArchiveFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/arguments.md b/docs/models/arguments.md new file mode 100644 index 0000000..2e54e27 --- /dev/null +++ b/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md new file mode 100644 index 0000000..0c36cde --- /dev/null +++ b/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessagerole.md b/docs/models/assistantmessagerole.md new file mode 100644 index 0000000..658229e --- /dev/null +++ b/docs/models/assistantmessagerole.md @@ -0,0 +1,8 @@ +# AssistantMessageRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md new file mode 100644 index 0000000..c916fc0 --- /dev/null +++ b/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | +| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md new file mode 100644 index 0000000..cfb3596 --- /dev/null +++ b/docs/models/chatcompletionrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionresponse.md b/docs/models/chatcompletionresponse.md new file mode 100644 index 0000000..ad37615 --- /dev/null +++ b/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 0000000..8c3a0ba --- /dev/null +++ b/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequestmessages.md b/docs/models/chatcompletionstreamrequestmessages.md new file mode 100644 index 0000000..4799061 --- /dev/null +++ b/docs/models/chatcompletionstreamrequestmessages.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequeststop.md b/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 0000000..a48460a --- /dev/null +++ b/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 0000000..37a6e9b --- /dev/null +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionStreamRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/checkpointout.md b/docs/models/checkpointout.md new file mode 100644 index 0000000..053592d --- /dev/null +++ b/docs/models/checkpointout.md @@ -0,0 +1,10 @@ +# CheckpointOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `metrics` | [models.MetricOut](../models/metricout.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | +| `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/completionchunk.md b/docs/models/completionchunk.md new file mode 100644 index 0000000..b8ae6a0 --- /dev/null +++ b/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionevent.md b/docs/models/completionevent.md new file mode 100644 index 0000000..7a66e8f --- /dev/null +++ b/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionresponsestreamchoice.md b/docs/models/completionresponsestreamchoice.md new file mode 100644 index 0000000..1532c25 --- /dev/null +++ b/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionresponsestreamchoicefinishreason.md b/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 0000000..0fece47 --- /dev/null +++ b/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/content.md new file mode 100644 index 0000000..a833dc2 --- /dev/null +++ b/docs/models/content.md @@ -0,0 +1,17 @@ +# Content + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md new file mode 100644 index 0000000..64fc80d --- /dev/null +++ b/docs/models/contentchunk.md @@ -0,0 +1,9 @@ +# ContentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/deletefileout.md b/docs/models/deletefileout.md new file mode 100644 index 0000000..4709cc4 --- /dev/null +++ b/docs/models/deletefileout.md @@ -0,0 +1,10 @@ +# DeleteFileOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type that was deleted | file | +| `deleted` | *bool* | :heavy_check_mark: | The deletion status. | false | \ No newline at end of file diff --git a/docs/models/deletemodelout.md b/docs/models/deletemodelout.md new file mode 100644 index 0000000..5fd4df7 --- /dev/null +++ b/docs/models/deletemodelout.md @@ -0,0 +1,10 @@ +# DeleteModelOut + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted model. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `object` | *Optional[str]* | :heavy_minus_sign: | The object type that was deleted | | +| `deleted` | *Optional[bool]* | :heavy_minus_sign: | The deletion status | true | \ No newline at end of file diff --git a/docs/models/deletemodelv1modelsmodeliddeleterequest.md b/docs/models/deletemodelv1modelsmodeliddeleterequest.md new file mode 100644 index 0000000..d9bc15f --- /dev/null +++ b/docs/models/deletemodelv1modelsmodeliddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteModelV1ModelsModelIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md new file mode 100644 index 0000000..4cb9e91 --- /dev/null +++ b/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjobout.md b/docs/models/detailedjobout.md new file mode 100644 index 0000000..f52d5cd --- /dev/null +++ b/docs/models/detailedjobout.md @@ -0,0 +1,26 @@ +# DetailedJobOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | +| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.DetailedJobOutStatus](../models/detailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `job_type` | *str* | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.WandbIntegrationOut](../models/wandbintegrationout.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.GithubRepositoryOut](../models/githubrepositoryout.md)] | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjoboutstatus.md b/docs/models/detailedjoboutstatus.md new file mode 100644 index 0000000..955d5a2 --- /dev/null +++ b/docs/models/detailedjoboutstatus.md @@ -0,0 +1,17 @@ +# DetailedJobOutStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md new file mode 100644 index 0000000..584a8be --- /dev/null +++ b/docs/models/embeddingrequest.md @@ -0,0 +1,10 @@ +# EmbeddingRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | \ No newline at end of file diff --git a/docs/models/embeddingresponse.md b/docs/models/embeddingresponse.md new file mode 100644 index 0000000..2bd85b4 --- /dev/null +++ b/docs/models/embeddingresponse.md @@ -0,0 +1,12 @@ +# EmbeddingResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `data` | List[[models.EmbeddingResponseData](../models/embeddingresponsedata.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingresponsedata.md b/docs/models/embeddingresponsedata.md new file mode 100644 index 0000000..20b5061 --- /dev/null +++ b/docs/models/embeddingresponsedata.md @@ -0,0 +1,10 @@ +# EmbeddingResponseData + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | embedding | +| `embedding` | List[*float*] | :heavy_minus_sign: | N/A | [
0.1,
0.2,
0.3
] | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | 0 | \ No newline at end of file diff --git a/docs/models/eventout.md b/docs/models/eventout.md new file mode 100644 index 0000000..c6f69ad --- /dev/null +++ b/docs/models/eventout.md @@ -0,0 +1,10 @@ +# EventOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the event. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | +| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/file.md b/docs/models/file.md new file mode 100644 index 0000000..37cc418 --- /dev/null +++ b/docs/models/file.md @@ -0,0 +1,10 @@ +# File + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `file_name` | *str* | :heavy_check_mark: | N/A | +| `content` | *Union[bytes, IO[bytes], io.BufferedReader]* | :heavy_check_mark: | N/A | +| `content_type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesdeletefilerequest.md b/docs/models/filesapiroutesdeletefilerequest.md new file mode 100644 index 0000000..1b02c2d --- /dev/null +++ b/docs/models/filesapiroutesdeletefilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesDeleteFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesretrievefilerequest.md b/docs/models/filesapiroutesretrievefilerequest.md new file mode 100644 index 0000000..961bae1 --- /dev/null +++ b/docs/models/filesapiroutesretrievefilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesRetrieveFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md new file mode 100644 index 0000000..1a6dfc6 --- /dev/null +++ b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md @@ -0,0 +1,9 @@ +# FilesAPIRoutesUploadFileMultiPartBodyParams + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `purpose` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md new file mode 100644 index 0000000..fd3ec08 --- /dev/null +++ b/docs/models/fileschema.md @@ -0,0 +1,16 @@ +# FileSchema + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md new file mode 100644 index 0000000..b4b024e --- /dev/null +++ b/docs/models/fimcompletionrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/docs/models/fimcompletionrequeststop.md b/docs/models/fimcompletionrequeststop.md new file mode 100644 index 0000000..a0dbb00 --- /dev/null +++ b/docs/models/fimcompletionrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/fimcompletionresponse.md b/docs/models/fimcompletionresponse.md new file mode 100644 index 0000000..da786a1 --- /dev/null +++ b/docs/models/fimcompletionresponse.md @@ -0,0 +1,13 @@ +# FIMCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md new file mode 100644 index 0000000..acffb53 --- /dev/null +++ b/docs/models/fimcompletionstreamrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequeststop.md b/docs/models/fimcompletionstreamrequeststop.md new file mode 100644 index 0000000..5a9e2ff --- /dev/null +++ b/docs/models/fimcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/finetuneablemodel.md b/docs/models/finetuneablemodel.md new file mode 100644 index 0000000..cb42928 --- /dev/null +++ b/docs/models/finetuneablemodel.md @@ -0,0 +1,14 @@ +# FineTuneableModel + +The name of the model to fine-tune. + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `OPEN_MISTRAL_7B` | open-mistral-7b | +| `MISTRAL_SMALL_LATEST` | mistral-small-latest | +| `CODESTRAL_LATEST` | codestral-latest | +| `MISTRAL_LARGE_LATEST` | mistral-large-latest | +| `OPEN_MISTRAL_NEMO` | open-mistral-nemo | \ No newline at end of file diff --git a/docs/models/finishreason.md b/docs/models/finishreason.md new file mode 100644 index 0000000..2af53f6 --- /dev/null +++ b/docs/models/finishreason.md @@ -0,0 +1,12 @@ +# FinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/ftmodelcapabilitiesout.md new file mode 100644 index 0000000..3cb5237 --- /dev/null +++ b/docs/models/ftmodelcapabilitiesout.md @@ -0,0 +1,11 @@ +# FTModelCapabilitiesOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelout.md b/docs/models/ftmodelout.md new file mode 100644 index 0000000..8d081f6 --- /dev/null +++ b/docs/models/ftmodelout.md @@ -0,0 +1,19 @@ +# FTModelOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/function.md b/docs/models/function.md new file mode 100644 index 0000000..8af398f --- /dev/null +++ b/docs/models/function.md @@ -0,0 +1,10 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncall.md b/docs/models/functioncall.md new file mode 100644 index 0000000..7ccd90d --- /dev/null +++ b/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md new file mode 100644 index 0000000..1a6be96 --- /dev/null +++ b/docs/models/githubrepositoryin.md @@ -0,0 +1,13 @@ +# GithubRepositoryIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md new file mode 100644 index 0000000..fbabf1e --- /dev/null +++ b/docs/models/githubrepositoryout.md @@ -0,0 +1,13 @@ +# GithubRepositoryOut + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/httpvalidationerror.md b/docs/models/httpvalidationerror.md new file mode 100644 index 0000000..6389243 --- /dev/null +++ b/docs/models/httpvalidationerror.md @@ -0,0 +1,10 @@ +# HTTPValidationError + +Validation Error + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/inputs.md b/docs/models/inputs.md new file mode 100644 index 0000000..45264f9 --- /dev/null +++ b/docs/models/inputs.md @@ -0,0 +1,19 @@ +# Inputs + +Text to embed. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/jobin.md b/docs/models/jobin.md new file mode 100644 index 0000000..6358e7a --- /dev/null +++ b/docs/models/jobin.md @@ -0,0 +1,15 @@ +# JobIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `hyperparameters` | [models.TrainingParametersIn](../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.WandbIntegration](../models/wandbintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `repositories` | List[[models.GithubRepositoryIn](../models/githubrepositoryin.md)] | :heavy_minus_sign: | N/A | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | \ No newline at end of file diff --git a/docs/models/jobmetadataout.md b/docs/models/jobmetadataout.md new file mode 100644 index 0000000..6218a16 --- /dev/null +++ b/docs/models/jobmetadataout.md @@ -0,0 +1,14 @@ +# JobMetadataOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobout.md b/docs/models/jobout.md new file mode 100644 index 0000000..0b88fba --- /dev/null +++ b/docs/models/jobout.md @@ -0,0 +1,24 @@ +# JobOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | +| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `job_type` | *str* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | *Optional[str]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.WandbIntegrationOut](../models/wandbintegrationout.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `repositories` | List[[models.GithubRepositoryOut](../models/githubrepositoryout.md)] | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md new file mode 100644 index 0000000..f9700df --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md new file mode 100644 index 0000000..883cbac --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md new file mode 100644 index 0000000..dd12c71 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCreateFineTuningJobResponse + +OK + + +## Supported Types + +### `models.JobOut` + +```python +value: models.JobOut = /* values here */ +``` + +### `models.LegacyJobMetadataOut` + +```python +value: models.LegacyJobMetadataOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md new file mode 100644 index 0000000..fde1980 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md new file mode 100644 index 0000000..9d25d79 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -0,0 +1,16 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md new file mode 100644 index 0000000..4429fe4 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md new file mode 100644 index 0000000..95c1734 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md new file mode 100644 index 0000000..6d93832 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md @@ -0,0 +1,9 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `update_ft_model_in` | [models.UpdateFTModelIn](../models/updateftmodelin.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md new file mode 100644 index 0000000..d3b10a8 --- /dev/null +++ b/docs/models/jobsout.md @@ -0,0 +1,10 @@ +# JobsOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `total` | *int* | :heavy_check_mark: | N/A | +| `data` | List[[models.JobOut](../models/jobout.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md new file mode 100644 index 0000000..04925ba --- /dev/null +++ b/docs/models/legacyjobmetadataout.md @@ -0,0 +1,19 @@ +# LegacyJobMetadataOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `details` | *str* | :heavy_check_mark: | N/A | | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | The approximated time (in seconds) for the fine-tuning process to complete. | 220 | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | The cost of the fine-tuning job. | 10 | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | The currency used for the fine-tuning job cost. | EUR | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of tokens consumed by one training step. | 131072 | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens used during the fine-tuning process. | 1310720 | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens in the training dataset. | 305375 | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md new file mode 100644 index 0000000..3694739 --- /dev/null +++ b/docs/models/listfilesout.md @@ -0,0 +1,9 @@ +# ListFilesOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | +| `object` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/loc.md b/docs/models/loc.md new file mode 100644 index 0000000..d6094ac --- /dev/null +++ b/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/messages.md b/docs/models/messages.md new file mode 100644 index 0000000..1d39450 --- /dev/null +++ b/docs/models/messages.md @@ -0,0 +1,29 @@ +# Messages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/metricout.md b/docs/models/metricout.md new file mode 100644 index 0000000..3c552ba --- /dev/null +++ b/docs/models/metricout.md @@ -0,0 +1,12 @@ +# MetricOut + +Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `train_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_mean_token_accuracy` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md new file mode 100644 index 0000000..8902097 --- /dev/null +++ b/docs/models/modelcapabilities.md @@ -0,0 +1,11 @@ +# ModelCapabilities + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelcard.md b/docs/models/modelcard.md new file mode 100644 index 0000000..8795141 --- /dev/null +++ b/docs/models/modelcard.md @@ -0,0 +1,19 @@ +# ModelCard + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `root` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modellist.md b/docs/models/modellist.md new file mode 100644 index 0000000..e3fefee --- /dev/null +++ b/docs/models/modellist.md @@ -0,0 +1,9 @@ +# ModelList + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.ModelCard](../models/modelcard.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/queryparamstatus.md b/docs/models/queryparamstatus.md new file mode 100644 index 0000000..dcd2090 --- /dev/null +++ b/docs/models/queryparamstatus.md @@ -0,0 +1,19 @@ +# QueryParamStatus + +The current job state to filter on. When set, the other results are not displayed. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md new file mode 100644 index 0000000..2704eab --- /dev/null +++ b/docs/models/responseformat.md @@ -0,0 +1,8 @@ +# ResponseFormat + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md new file mode 100644 index 0000000..ce35fbb --- /dev/null +++ b/docs/models/responseformats.md @@ -0,0 +1,11 @@ +# ResponseFormats + +An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md new file mode 100644 index 0000000..1a62457 --- /dev/null +++ b/docs/models/retrievefileout.md @@ -0,0 +1,16 @@ +# RetrieveFileOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/retrievemodelv1modelsmodelidgetrequest.md b/docs/models/retrievemodelv1modelsmodelidgetrequest.md new file mode 100644 index 0000000..f1280f8 --- /dev/null +++ b/docs/models/retrievemodelv1modelsmodelidgetrequest.md @@ -0,0 +1,8 @@ +# RetrieveModelV1ModelsModelIDGetRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/role.md b/docs/models/role.md new file mode 100644 index 0000000..affca78 --- /dev/null +++ b/docs/models/role.md @@ -0,0 +1,8 @@ +# Role + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/docs/models/sampletype.md b/docs/models/sampletype.md new file mode 100644 index 0000000..888fd63 --- /dev/null +++ b/docs/models/sampletype.md @@ -0,0 +1,9 @@ +# SampleType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `PRETRAIN` | pretrain | +| `INSTRUCT` | instruct | \ No newline at end of file diff --git a/docs/models/security.md b/docs/models/security.md new file mode 100644 index 0000000..2e0839d --- /dev/null +++ b/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/source.md b/docs/models/source.md new file mode 100644 index 0000000..ef05562 --- /dev/null +++ b/docs/models/source.md @@ -0,0 +1,9 @@ +# Source + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `UPLOAD` | upload | +| `REPOSITORY` | repository | \ No newline at end of file diff --git a/docs/models/status.md b/docs/models/status.md new file mode 100644 index 0000000..5e22eb7 --- /dev/null +++ b/docs/models/status.md @@ -0,0 +1,19 @@ +# Status + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/stop.md b/docs/models/stop.md new file mode 100644 index 0000000..ba40ca8 --- /dev/null +++ b/docs/models/stop.md @@ -0,0 +1,19 @@ +# Stop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md new file mode 100644 index 0000000..7f82798 --- /dev/null +++ b/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md new file mode 100644 index 0000000..34e4dd6 --- /dev/null +++ b/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/tool.md b/docs/models/tool.md new file mode 100644 index 0000000..291394c --- /dev/null +++ b/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md new file mode 100644 index 0000000..bd2dc9f --- /dev/null +++ b/docs/models/toolcall.md @@ -0,0 +1,10 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolchoice.md b/docs/models/toolchoice.md new file mode 100644 index 0000000..b84f51f --- /dev/null +++ b/docs/models/toolchoice.md @@ -0,0 +1,10 @@ +# ToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md new file mode 100644 index 0000000..364339e --- /dev/null +++ b/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagerole.md b/docs/models/toolmessagerole.md new file mode 100644 index 0000000..c24e59c --- /dev/null +++ b/docs/models/toolmessagerole.md @@ -0,0 +1,8 @@ +# ToolMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/docs/models/trainingfile.md b/docs/models/trainingfile.md new file mode 100644 index 0000000..cde218b --- /dev/null +++ b/docs/models/trainingfile.md @@ -0,0 +1,9 @@ +# TrainingFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparameters.md b/docs/models/trainingparameters.md new file mode 100644 index 0000000..4356c33 --- /dev/null +++ b/docs/models/trainingparameters.md @@ -0,0 +1,11 @@ +# TrainingParameters + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/trainingparametersin.md new file mode 100644 index 0000000..afc094d --- /dev/null +++ b/docs/models/trainingparametersin.md @@ -0,0 +1,13 @@ +# TrainingParametersIn + +The fine-tuning hyperparameter settings used in a fine-tune job. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/unarchiveftmodelout.md new file mode 100644 index 0000000..aa26792 --- /dev/null +++ b/docs/models/unarchiveftmodelout.md @@ -0,0 +1,10 @@ +# UnarchiveFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateftmodelin.md b/docs/models/updateftmodelin.md new file mode 100644 index 0000000..4e55b1a --- /dev/null +++ b/docs/models/updateftmodelin.md @@ -0,0 +1,9 @@ +# UpdateFTModelIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md new file mode 100644 index 0000000..7eef9bc --- /dev/null +++ b/docs/models/uploadfileout.md @@ -0,0 +1,16 @@ +# UploadFileOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usageinfo.md b/docs/models/usageinfo.md new file mode 100644 index 0000000..9f56a3a --- /dev/null +++ b/docs/models/usageinfo.md @@ -0,0 +1,10 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | +| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | +| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md new file mode 100644 index 0000000..3d96f1c --- /dev/null +++ b/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/usermessagecontent.md b/docs/models/usermessagecontent.md new file mode 100644 index 0000000..86ebd18 --- /dev/null +++ b/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/docs/models/usermessagerole.md b/docs/models/usermessagerole.md new file mode 100644 index 0000000..171124e --- /dev/null +++ b/docs/models/usermessagerole.md @@ -0,0 +1,8 @@ +# UserMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/utils/retryconfig.md b/docs/models/utils/retryconfig.md new file mode 100644 index 0000000..69dd549 --- /dev/null +++ b/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/docs/models/validationerror.md b/docs/models/validationerror.md new file mode 100644 index 0000000..7a1654a --- /dev/null +++ b/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md new file mode 100644 index 0000000..d48e501 --- /dev/null +++ b/docs/models/wandbintegration.md @@ -0,0 +1,12 @@ +# WandbIntegration + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md new file mode 100644 index 0000000..a51067b --- /dev/null +++ b/docs/models/wandbintegrationout.md @@ -0,0 +1,11 @@ +# WandbIntegrationOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md new file mode 100644 index 0000000..e8740f3 --- /dev/null +++ b/docs/sdks/agents/README.md @@ -0,0 +1,117 @@ +# Agents +(*agents*) + +## Overview + +Agents API. + +### Available Operations + +* [complete](#complete) - Chat Completion +* [stream](#stream) - Stream Agents completion + +## complete + +Chat Completion + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.stream(model="codestral-2405", prompt="def", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md new file mode 100644 index 0000000..e941104 --- /dev/null +++ b/docs/sdks/chat/README.md @@ -0,0 +1,128 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [complete](#complete) - Chat Completion +* [stream](#stream) - Stream chat completion + +## complete + +Chat Completion + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.stream(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md new file mode 100644 index 0000000..ee46f9b --- /dev/null +++ b/docs/sdks/embeddings/README.md @@ -0,0 +1,53 @@ +# Embeddings +(*embeddings*) + +## Overview + +Embeddings API. + +### Available Operations + +* [create](#create) - Embeddings + +## create + +Embeddings + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.embeddings.create(inputs="", model="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.EmbeddingResponse](../../models/embeddingresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md new file mode 100644 index 0000000..897556f --- /dev/null +++ b/docs/sdks/files/README.md @@ -0,0 +1,179 @@ +# Files +(*files*) + +## Overview + +Files API + +### Available Operations + +* [upload](#upload) - Upload File +* [list](#list) - List Files +* [retrieve](#retrieve) - Retrieve File +* [delete](#delete) - Delete File + +## upload + +Upload a file that can be used across various endpoints. + +The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + +Please contact us if you need to increase these storage limits. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.UploadFileOut](../../models/uploadfileout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## list + +Returns a list of files that belong to the user's organization. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.ListFilesOut](../../models/listfilesout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## retrieve + +Returns information about a specific file. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.retrieve(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.RetrieveFileOut](../../models/retrievefileout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## delete + +Delete a file. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.delete(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DeleteFileOut](../../models/deletefileout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md new file mode 100644 index 0000000..784b521 --- /dev/null +++ b/docs/sdks/fim/README.md @@ -0,0 +1,112 @@ +# Fim +(*fim*) + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [complete](#complete) - Fim Completion +* [stream](#stream) - Stream fim completion + +## complete + +FIM completion. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md new file mode 100644 index 0000000..2b357f2 --- /dev/null +++ b/docs/sdks/finetuning/README.md @@ -0,0 +1,5 @@ +# FineTuning +(*fine_tuning*) + +### Available Operations + diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md new file mode 100644 index 0000000..3366c73 --- /dev/null +++ b/docs/sdks/jobs/README.md @@ -0,0 +1,225 @@ +# Jobs +(*fine_tuning.jobs*) + +### Available Operations + +* [list](#list) - Get Fine Tuning Jobs +* [create](#create) - Create Fine Tuning Job +* [get](#get) - Get Fine Tuning Job +* [cancel](#cancel) - Cancel Fine Tuning Job +* [start](#start) - Start Fine Tuning Job + +## list + +Get a list of fine-tuning jobs for your organization and user. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.JobsOut](../../models/jobsout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## create + +Create a new fine-tuning job, it will be queued for processing. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | [models.FineTuneableModel](../../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `hyperparameters` | [models.TrainingParametersIn](../../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.WandbIntegration](../../models/wandbintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `repositories` | List[[models.GithubRepositoryIn](../../models/githubrepositoryin.md)] | :heavy_minus_sign: | N/A | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## get + +Get a fine-tuned job details by its UUID. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DetailedJobOut](../../models/detailedjobout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## cancel + +Request the cancellation of a fine tuning job. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DetailedJobOut](../../models/detailedjobout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## start + +Request the start of a validated fine tuning job. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DetailedJobOut](../../models/detailedjobout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/mistral/README.md b/docs/sdks/mistral/README.md new file mode 100644 index 0000000..d4e985e --- /dev/null +++ b/docs/sdks/mistral/README.md @@ -0,0 +1,9 @@ +# Mistral SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md new file mode 100644 index 0000000..051aa53 --- /dev/null +++ b/docs/sdks/models/README.md @@ -0,0 +1,259 @@ +# Models +(*models*) + +## Overview + +Model Management API + +### Available Operations + +* [list](#list) - List Models +* [retrieve](#retrieve) - Retrieve Model +* [delete](#delete) - Delete Model +* [update](#update) - Update Fine Tuned Model +* [archive](#archive) - Archive Fine Tuned Model +* [unarchive](#unarchive) - Unarchive Fine Tuned Model + +## list + +List all models available to the user. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.ModelList](../../models/modellist.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## retrieve + +Retrieve a model information. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ModelCard](../../models/modelcard.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## delete + +Delete a fine-tuned model. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.DeleteModelOut](../../models/deletemodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## update + +Update a model name or description. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FTModelOut](../../models/ftmodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## archive + +Archive a fine-tuned model. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ArchiveFTModelOut](../../models/archiveftmodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## unarchive + +Un-archive a fine-tuned model. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.UnarchiveFTModelOut](../../models/unarchiveftmodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | diff --git a/examples/async_agents_no_streaming.py b/examples/async_agents_no_streaming.py new file mode 100755 index 0000000..799333b --- /dev/null +++ b/examples/async_agents_no_streaming.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai import Mistral +from mistralai.models import UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + chat_response = await client.agents.complete_async( + agent_id="", + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_chat_no_streaming.py b/examples/async_chat_no_streaming.py index 0eef8c3..9448f09 100755 --- a/examples/async_chat_no_streaming.py +++ b/examples/async_chat_no_streaming.py @@ -3,25 +3,23 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage async def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) - chat_response = await client.chat( + chat_response = await client.chat.complete_async( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[UserMessage(content="What is the best French cheese?")], ) print(chat_response.choices[0].message.content) - await client.close() - if __name__ == "__main__": asyncio.run(main()) diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py index f26b73d..736c47a 100755 --- a/examples/async_chat_with_streaming.py +++ b/examples/async_chat_with_streaming.py @@ -3,30 +3,29 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage async def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) print("Chat response:") - response = client.chat_stream( + response = await client.chat.stream_async( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ + UserMessage(content="What is the best French cheese?give the best 50") + ], ) - async for chunk in response: - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") + if chunk.data.choices[0].delta.content is not None: + print(chunk.data.choices[0].delta.content, end="") print("\n") - await client.close() - if __name__ == "__main__": asyncio.run(main()) diff --git a/examples/completion.py b/examples/async_code_completion.py similarity index 78% rename from examples/completion.py rename to examples/async_code_completion.py index f76f0f1..a6bc571 100644 --- a/examples/completion.py +++ b/examples/async_code_completion.py @@ -3,18 +3,18 @@ import asyncio import os -from mistralai.client import MistralClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) prompt = "def fibonacci(n: int):" suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" - response = client.completion( + response = await client.fim.complete_async( model="codestral-latest", prompt=prompt, suffix=suffix, diff --git a/examples/async_completion.py b/examples/async_completion.py deleted file mode 100644 index 6aa22b4..0000000 --- a/examples/async_completion.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai.async_client import MistralAsyncClient - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = MistralAsyncClient(api_key=api_key) - - prompt = "def fibonacci(n: int):" - suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" - - response = await client.completion( - model="codestral-latest", - prompt=prompt, - suffix=suffix, - ) - - print( - f""" -{prompt} -{response.choices[0].message.content} -{suffix} -""" - ) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/async_embeddings.py b/examples/async_embeddings.py index a7ecd47..781e87a 100755 --- a/examples/async_embeddings.py +++ b/examples/async_embeddings.py @@ -3,17 +3,17 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) - embeddings_batch_response = await client.embeddings( + embeddings_batch_response = await client.embeddings.create_async( model="mistral-embed", - input=["What is the best French cheese?"] * 10, + inputs=["What is the best French cheese?"] * 10, ) print(embeddings_batch_response) diff --git a/examples/async_files.py b/examples/async_files.py index 1022b7a..64c9948 100644 --- a/examples/async_files.py +++ b/examples/async_files.py @@ -3,28 +3,34 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient +from mistralai import Mistral +from mistralai.models import File async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create a new file - created_file = await client.files.create(file=open("examples/file.jsonl", "rb").read()) + created_file = await client.files.upload_async( + file=File( + file_name="training_file.jsonl", + content=open("examples/file.jsonl", "rb").read(), + ) + ) print(created_file) # List files - files = await client.files.list() + files = await client.files.list_async() print(files) # Retrieve a file - retrieved_file = await client.files.retrieve(created_file.id) + retrieved_file = await client.files.retrieve_async(file_id=created_file.id) print(retrieved_file) # Delete a file - deleted_file = await client.files.delete(created_file.id) + deleted_file = await client.files.delete_async(file_id=created_file.id) print(deleted_file) diff --git a/examples/async_jobs.py b/examples/async_jobs.py index 792735d..b1f9e3b 100644 --- a/examples/async_jobs.py +++ b/examples/async_jobs.py @@ -3,27 +3,31 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import File, TrainingParametersIn async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = await client.files.create(file=f) + training_file = await client.files.upload_async( + file=File(file_name="file.jsonl", content=f) + ) with open("examples/validation_file.jsonl", "rb") as f: - validation_file = await client.files.create(file=f) + validation_file = await client.files.upload_async( + file=File(file_name="validation_file.jsonl", content=f) + ) # Create a new job - created_job = await client.jobs.create( + created_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", - training_files=[training_file.id], + training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParameters( + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), @@ -31,20 +35,20 @@ async def main(): print(created_job) # List jobs - jobs = await client.jobs.list(page=0, page_size=5) + jobs = await client.fine_tuning.jobs.list_async(page=0, page_size=5) print(jobs) # Retrieve a job - retrieved_job = await client.jobs.retrieve(created_job.id) + retrieved_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(retrieved_job) # Cancel a job - canceled_job = await client.jobs.cancel(created_job.id) + canceled_job = await client.fine_tuning.jobs.cancel_async(job_id=created_job.id) print(canceled_job) # Delete files - await client.files.delete(training_file.id) - await client.files.delete(validation_file.id) + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) if __name__ == "__main__": diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index e501914..7e0d057 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -3,27 +3,31 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import File, TrainingParametersIn POLLING_INTERVAL = 10 async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = await client.files.create(file=f) + training_file = await client.files.upload_async( + file=File(file_name="file.jsonl", content=f) + ) with open("examples/validation_file.jsonl", "rb") as f: - validation_file = await client.files.create(file=f) + validation_file = await client.files.upload_async( + file=File(file_name="validation_file.jsonl", content=f) + ) # Create a new job - created_job = await client.jobs.create( + created_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", - training_files=[training_file.id], + training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParameters( + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), @@ -31,7 +35,7 @@ async def main(): print(created_job) while created_job.status in ["RUNNING", "QUEUED"]: - created_job = await client.jobs.retrieve(created_job.id) + created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") await asyncio.sleep(POLLING_INTERVAL) @@ -40,10 +44,13 @@ async def main(): return # Chat with model - response = await client.chat( + response = await client.chat.complete_async( model=created_job.fine_tuned_model, messages=[ - {"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, + { + "role": "system", + "content": "Marv is a factual chatbot that is also sarcastic.", + }, {"role": "user", "content": "What is the capital of France ?"}, ], ) @@ -51,11 +58,11 @@ async def main(): print(response.choices[0].message.content) # Delete files - await client.files.delete(training_file.id) - await client.files.delete(validation_file.id) + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) # Delete fine-tuned model - await client.delete_model(created_job.fine_tuned_model) + await client.models.delete_async(model_id=created_job.fine_tuned_model) if __name__ == "__main__": diff --git a/examples/async_list_models.py b/examples/async_list_models.py index b6de5d5..4243d86 100755 --- a/examples/async_list_models.py +++ b/examples/async_list_models.py @@ -3,15 +3,15 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) - list_models_response = await client.list_models() + list_models_response = await client.models.list_async() print(list_models_response) diff --git a/examples/azure/chat_no_streaming.py.py b/examples/azure/chat_no_streaming.py.py new file mode 100644 index 0000000..485b594 --- /dev/null +++ b/examples/azure/chat_no_streaming.py.py @@ -0,0 +1,16 @@ +import os + +from mistralai_azure import MistralAzure + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +res = client.chat.complete( + messages=[ + {"role": "user", "content": "What is the capital of France?"}, + ], + # you don't need model as it will always be "azureai" +) +print(res.choices[0].message.content) diff --git a/examples/chat_no_streaming.py b/examples/chat_no_streaming.py index 797b58d..72506dd 100755 --- a/examples/chat_no_streaming.py +++ b/examples/chat_no_streaming.py @@ -2,19 +2,19 @@ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - chat_response = client.chat( + chat_response = client.chat.complete( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[UserMessage(content="What is the best French cheese?")], ) print(chat_response.choices[0].message.content) diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py index bc818d3..5fc7503 100755 --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -2,22 +2,22 @@ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - for chunk in client.chat_stream( + for chunk in client.chat.stream( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[UserMessage(content="What is the best French cheese?")], ): - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") + + print(chunk.data.choices[0].delta.content) if __name__ == "__main__": diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 4304551..bf0f638 100755 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -8,8 +8,8 @@ import readline import sys -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import AssistantMessage, SystemMessage, UserMessage MODEL_LIST = [ "mistral-small-latest", @@ -52,7 +52,9 @@ def completer(text, state): options = find_completions(COMMAND_LIST, line_parts[:-1]) try: - return [option for option in options if option.startswith(line_parts[-1])][state] + return [option for option in options if option.startswith(line_parts[-1])][ + state + ] except IndexError: return None @@ -64,10 +66,12 @@ def completer(text, state): class ChatBot: - def __init__(self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE): + def __init__( + self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE + ): if not api_key: raise ValueError("An API key must be provided to use the Mistral API.") - self.client = MistralClient(api_key=api_key) + self.client = Mistral(api_key=api_key) self.model = model self.temperature = temperature self.system_message = system_message @@ -88,11 +92,13 @@ def opening_instructions(self): def new_chat(self): print("") - print(f"Starting new chat with model: {self.model}, temperature: {self.temperature}") + print( + f"Starting new chat with model: {self.model}, temperature: {self.temperature}" + ) print("") self.messages = [] if self.system_message: - self.messages.append(ChatMessage(role="system", content=self.system_message)) + self.messages.append(SystemMessage(content=self.system_message)) def switch_model(self, input): model = self.get_arguments(input) @@ -138,13 +144,17 @@ def run_inference(self, content): print("MISTRAL:") print("") - self.messages.append(ChatMessage(role="user", content=content)) + self.messages.append(UserMessage(content=content)) assistant_response = "" - logger.debug(f"Running inference with model: {self.model}, temperature: {self.temperature}") + logger.debug( + f"Running inference with model: {self.model}, temperature: {self.temperature}" + ) logger.debug(f"Sending messages: {self.messages}") - for chunk in self.client.chat_stream(model=self.model, temperature=self.temperature, messages=self.messages): - response = chunk.choices[0].delta.content + for chunk in self.client.chat.stream( + model=self.model, temperature=self.temperature, messages=self.messages + ): + response = chunk.data.choices[0].delta.content if response is not None: print(response, end="", flush=True) assistant_response += response @@ -152,7 +162,7 @@ def run_inference(self, content): print("", flush=True) if assistant_response: - self.messages.append(ChatMessage(role="assistant", content=assistant_response)) + self.messages.append(AssistantMessage(content=assistant_response)) logger.debug(f"Current messages: {self.messages}") def get_command(self, input): @@ -204,7 +214,9 @@ def exit(self): if __name__ == "__main__": - parser = argparse.ArgumentParser(description="A simple chatbot using the Mistral API") + parser = argparse.ArgumentParser( + description="A simple chatbot using the Mistral API" + ) parser.add_argument( "--api-key", default=os.environ.get("MISTRAL_API_KEY"), @@ -217,7 +229,9 @@ def exit(self): default=DEFAULT_MODEL, help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s", ) - parser.add_argument("-s", "--system-message", help="Optional system message to prepend.") + parser.add_argument( + "-s", "--system-message", help="Optional system message to prepend." + ) parser.add_argument( "-t", "--temperature", @@ -225,7 +239,9 @@ def exit(self): default=DEFAULT_TEMPERATURE, help="Optional temperature for chat inference. Defaults to %(default)s", ) - parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging") + parser.add_argument( + "-d", "--debug", action="store_true", help="Enable debug logging" + ) args = parser.parse_args() diff --git a/examples/code_completion.py b/examples/code_completion.py index f76f0f1..f3d70a6 100644 --- a/examples/code_completion.py +++ b/examples/code_completion.py @@ -3,27 +3,27 @@ import asyncio import os -from mistralai.client import MistralClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) prompt = "def fibonacci(n: int):" suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" - response = client.completion( + response = client.fim.complete( model="codestral-latest", prompt=prompt, suffix=suffix, ) - print( f""" {prompt} {response.choices[0].message.content} +{response.choices[0].message.role} {suffix} """ ) diff --git a/examples/completion_with_streaming.py b/examples/completion_with_streaming.py index f0760bf..5bee203 100644 --- a/examples/completion_with_streaming.py +++ b/examples/completion_with_streaming.py @@ -3,25 +3,24 @@ import asyncio import os -from mistralai.client import MistralClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) prompt = "def fibonacci(n: int):" suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" print(prompt) - for chunk in client.completion_stream( + for chunk in client.fim.stream( model="codestral-latest", prompt=prompt, suffix=suffix, ): - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") + print(chunk.data.choices[0].delta.content, end="") print(suffix) diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py index 2e1af6d..0701b19 100644 --- a/examples/dry_run_job.py +++ b/examples/dry_run_job.py @@ -3,24 +3,26 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import TrainingParametersIn async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = await client.files.create(file=f) + training_file = await client.files.upload_async( + file={"file_name": "test-file.jsonl", "content": f} + ) # Create a new job - dry_run_job = await client.jobs.create( + dry_run_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", - training_files=[training_file.id], - hyperparameters=TrainingParameters( + training_files=[{"file_id": training_file.id, "weight": 1}], + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), diff --git a/examples/embeddings.py b/examples/embeddings.py index ffde00a..046c87d 100755 --- a/examples/embeddings.py +++ b/examples/embeddings.py @@ -2,17 +2,17 @@ import os -from mistralai.client import MistralClient +from mistralai import Mistral def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - embeddings_response = client.embeddings( + embeddings_response = client.embeddings.create( model="mistral-embed", - input=["What is the best French cheese?"] * 10, + inputs=["What is the best French cheese?"] * 10, ) print(embeddings_response) diff --git a/examples/files.py b/examples/files.py index af034b9..a10fd03 100644 --- a/examples/files.py +++ b/examples/files.py @@ -2,16 +2,22 @@ import os -from mistralai.client import MistralClient +from mistralai import Mistral +from mistralai.models import File def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create a new file - created_file = client.files.create(file=("training_file.jsonl", open("examples/file.jsonl", "rb").read())) + created_file = client.files.upload( + file=File( + file_name="training_file.jsonl", + content=open("examples/file.jsonl", "rb").read(), + ) + ) print(created_file) # List files @@ -19,11 +25,11 @@ def main(): print(files) # Retrieve a file - retrieved_file = client.files.retrieve(created_file.id) + retrieved_file = client.files.retrieve(file_id=created_file.id) print(retrieved_file) # Delete a file - deleted_file = client.files.delete(created_file.id) + deleted_file = client.files.delete(file_id=created_file.id) print(deleted_file) diff --git a/examples/function_calling.py b/examples/function_calling.py index 76fb2e1..76ce489 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -3,15 +3,24 @@ import os from typing import Dict, List -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage, Function +from mistralai import Mistral +from mistralai.models.assistantmessage import AssistantMessage +from mistralai.models.function import Function +from mistralai.models.toolmessage import ToolMessage +from mistralai.models.usermessage import UserMessage # Assuming we have the following data data = { "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], "customer_id": ["C001", "C002", "C003", "C002", "C001"], "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], - "payment_date": ["2021-10-05", "2021-10-06", "2021-10-07", "2021-10-05", "2021-10-08"], + "payment_date": [ + "2021-10-05", + "2021-10-06", + "2021-10-07", + "2021-10-05", + "2021-10-08", + ], "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], } @@ -20,14 +29,16 @@ def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) - return json.dumps({"status": "Error - transaction id not found"}) + else: + return json.dumps({"status": "Error - transaction id not found"}) def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"date": data["payment_date"][i]}) - return json.dumps({"status": "Error - transaction id not found"}) + else: + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { @@ -44,7 +55,12 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: parameters={ "type": "object", "required": ["transaction_id"], - "properties": {"transaction_id": {"type": "string", "description": "The transaction id."}}, + "properties": { + "transaction_id": { + "type": "string", + "description": "The transaction id.", + } + }, }, ), }, @@ -56,7 +72,12 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: parameters={ "type": "object", "required": ["transaction_id"], - "properties": {"transaction_id": {"type": "string", "description": "The transaction id."}}, + "properties": { + "transaction_id": { + "type": "string", + "description": "The transaction id.", + } + }, }, ), }, @@ -65,30 +86,38 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-small-latest" -client = MistralClient(api_key=api_key) +client = Mistral(api_key=api_key) -messages = [ChatMessage(role="user", content="What's the status of my transaction?")] +messages = [UserMessage(content="What's the status of my transaction?")] -response = client.chat(model=model, messages=messages, tools=tools) +response = client.chat.complete(model=model, messages=messages, tools=tools) print(response.choices[0].message.content) -messages.append(ChatMessage(role="assistant", content=response.choices[0].message.content)) -messages.append(ChatMessage(role="user", content="My transaction ID is T1001.")) +messages.append(AssistantMessage(content=response.choices[0].message.content)) +messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat(model=model, messages=messages, tools=tools) +response = client.chat.complete(model=model, messages=messages, tools=tools) tool_call = response.choices[0].message.tool_calls[0] function_name = tool_call.function.name function_params = json.loads(tool_call.function.arguments) -print(f"calling function_name: {function_name}, with function_params: {function_params}") +print( + f"calling function_name: {function_name}, with function_params: {function_params}" +) function_result = names_to_functions[function_name](**function_params) messages.append(response.choices[0].message) -messages.append(ChatMessage(role="tool", name=function_name, content=function_result, tool_call_id=tool_call.id)) - -response = client.chat(model=model, messages=messages, tools=tools) +messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, + ) +) + +response = client.chat.complete(model=model, messages=messages, tools=tools) print(f"{response.choices[0].message.content}") diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py new file mode 100755 index 0000000..178f151 --- /dev/null +++ b/examples/gcp/async_chat_no_streaming.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai_gcp import MistralGoogleCloud +from mistralai_gcp.models.usermessage import UserMessage + + +async def main(): + model = "mistral-large-2407" + + client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) + + chat_response = await client.chat.complete_async( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/jobs.py b/examples/jobs.py index 2ba8ae7..246edac 100644 --- a/examples/jobs.py +++ b/examples/jobs.py @@ -1,48 +1,52 @@ #!/usr/bin/env python import os -from mistralai.client import MistralClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import File, TrainingParametersIn def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = client.files.create(file=f) + training_file = client.files.upload( + file=File(file_name="file.jsonl", content=f) + ) with open("examples/validation_file.jsonl", "rb") as f: - validation_file = client.files.create(file=f) + validation_file = client.files.upload( + file=File(file_name="validation_file.jsonl", content=f) + ) # Create a new job - created_job = client.jobs.create( + created_job = client.fine_tuning.jobs.create( model="open-mistral-7b", - training_files=[training_file.id], + training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParameters( + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), ) print(created_job) - jobs = client.jobs.list(created_after=created_job.created_at - 10) - for job in jobs.data: - print(f"Retrieved job: {job.id}") + # List jobs + jobs = client.fine_tuning.jobs.list(page=0, page_size=5) + print(jobs) # Retrieve a job - retrieved_job = client.jobs.retrieve(created_job.id) + retrieved_job = client.fine_tuning.jobs.get(job_id=created_job.id) print(retrieved_job) # Cancel a job - canceled_job = client.jobs.cancel(created_job.id) + canceled_job = client.fine_tuning.jobs.cancel(job_id=created_job.id) print(canceled_job) # Delete files - client.files.delete(training_file.id) - client.files.delete(validation_file.id) + client.files.delete(file_id=training_file.id) + client.files.delete(file_id=validation_file.id) if __name__ == "__main__": diff --git a/examples/json_format.py b/examples/json_format.py index 749965b..23c3868 100755 --- a/examples/json_format.py +++ b/examples/json_format.py @@ -2,20 +2,24 @@ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-large-latest" - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - chat_response = client.chat( + chat_response = client.chat.complete( model=model, response_format={"type": "json_object"}, - messages=[ChatMessage(role="user", content="What is the best French cheese? Answer shortly in JSON.")], + messages=[ + UserMessage( + content="What is the best French cheese? Answer shortly in JSON.", + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/list_models.py b/examples/list_models.py index b21dcd1..c6c0c85 100755 --- a/examples/list_models.py +++ b/examples/list_models.py @@ -2,15 +2,15 @@ import os -from mistralai.client import MistralClient +from mistralai import Mistral def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - list_models_response = client.list_models() + list_models_response = client.models.list() print(list_models_response) diff --git a/packages/mistralai_azure/.genignore b/packages/mistralai_azure/.genignore new file mode 100644 index 0000000..513646d --- /dev/null +++ b/packages/mistralai_azure/.genignore @@ -0,0 +1,4 @@ +src/mistralai_azure/sdk.py +README.md +USAGE.md +docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_azure/.gitattributes b/packages/mistralai_azure/.gitattributes new file mode 100644 index 0000000..4d75d59 --- /dev/null +++ b/packages/mistralai_azure/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore new file mode 100644 index 0000000..477b772 --- /dev/null +++ b/packages/mistralai_azure/.gitignore @@ -0,0 +1,8 @@ +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock new file mode 100644 index 0000000..047a649 --- /dev/null +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -0,0 +1,133 @@ +lockVersion: 2.0.0 +id: dc40fa48-2c4d-46ad-ac8b-270749770f34 +management: + docChecksum: f04749e097bb06d5fb8850400b089250 + docVersion: 0.0.2 + speakeasyVersion: 1.356.0 + generationVersion: 2.388.1 + releaseVersion: 1.0.0-rc.2 + configChecksum: 98e9cf39c9535097961a0ca73dbac10b + published: true +features: + python: + additionalDependencies: 1.0.0 + constsAndDefaults: 1.0.2 + core: 5.3.4 + defaultEnabledRetries: 0.2.0 + envVarSecurityUsage: 0.3.1 + examples: 3.0.0 + flatRequests: 1.0.1 + globalSecurity: 3.0.1 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.0.0 + nameOverrides: 3.0.0 + nullables: 1.0.0 + responseFormat: 1.0.0 + retries: 3.0.0 + sdkHooks: 1.0.0 + serverEvents: 1.0.2 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.0.1 +generatedFiles: + - src/mistralai_azure/sdkconfiguration.py + - src/mistralai_azure/chat.py + - .vscode/settings.json + - poetry.toml + - py.typed + - pylintrc + - pyproject.toml + - scripts/compile.sh + - scripts/publish.sh + - src/mistralai_azure/__init__.py + - src/mistralai_azure/basesdk.py + - src/mistralai_azure/httpclient.py + - src/mistralai_azure/py.typed + - src/mistralai_azure/types/__init__.py + - src/mistralai_azure/types/basemodel.py + - src/mistralai_azure/utils/__init__.py + - src/mistralai_azure/utils/annotations.py + - src/mistralai_azure/utils/enums.py + - src/mistralai_azure/utils/eventstreaming.py + - src/mistralai_azure/utils/forms.py + - src/mistralai_azure/utils/headers.py + - src/mistralai_azure/utils/logger.py + - src/mistralai_azure/utils/metadata.py + - src/mistralai_azure/utils/queryparams.py + - src/mistralai_azure/utils/requestbodies.py + - src/mistralai_azure/utils/retries.py + - src/mistralai_azure/utils/security.py + - src/mistralai_azure/utils/serializers.py + - src/mistralai_azure/utils/url.py + - src/mistralai_azure/utils/values.py + - src/mistralai_azure/models/sdkerror.py + - src/mistralai_azure/models/completionevent.py + - src/mistralai_azure/models/completionchunk.py + - src/mistralai_azure/models/completionresponsestreamchoice.py + - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/toolcall.py + - src/mistralai_azure/models/functioncall.py + - src/mistralai_azure/models/usageinfo.py + - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/validationerror.py + - src/mistralai_azure/models/chatcompletionstreamrequest.py + - src/mistralai_azure/models/tool.py + - src/mistralai_azure/models/function.py + - src/mistralai_azure/models/responseformat.py + - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/contentchunk.py + - src/mistralai_azure/models/usermessage.py + - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/assistantmessage.py + - src/mistralai_azure/models/toolmessage.py + - src/mistralai_azure/models/chatcompletionresponse.py + - src/mistralai_azure/models/chatcompletionchoice.py + - src/mistralai_azure/models/chatcompletionrequest.py + - src/mistralai_azure/models/security.py + - src/mistralai_azure/models/__init__.py + - docs/models/completionevent.md + - docs/models/completionchunk.md + - docs/models/finishreason.md + - docs/models/completionresponsestreamchoice.md + - docs/models/deltamessage.md + - docs/models/toolcall.md + - docs/models/arguments.md + - docs/models/functioncall.md + - docs/models/usageinfo.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/validationerror.md + - docs/models/stop.md + - docs/models/messages.md + - docs/models/toolchoice.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/tool.md + - docs/models/function.md + - docs/models/responseformats.md + - docs/models/responseformat.md + - docs/models/content.md + - docs/models/role.md + - docs/models/systemmessage.md + - docs/models/contentchunk.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/usermessage.md + - docs/models/textchunk.md + - docs/models/assistantmessagerole.md + - docs/models/assistantmessage.md + - docs/models/toolmessagerole.md + - docs/models/toolmessage.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/security.md + - docs/models/utils/retryconfig.md + - .gitattributes + - src/mistralai_azure/_hooks/sdkhooks.py + - src/mistralai_azure/_hooks/types.py + - src/mistralai_azure/_hooks/__init__.py diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml new file mode 100644 index 0000000..b36d96f --- /dev/null +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -0,0 +1,41 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralAzure + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + auth: + oAuth2ClientCredentialsEnabled: true +python: + version: 1.0.0-rc.2 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + authors: + - Mistral + clientServerStatusCodesAsErrors: true + description: Python Client SDK for the Mistral AI API in Azure. + enumFormat: union + flattenGlobalSecurity: true + flattenRequests: true + imports: + option: openapi + paths: + callbacks: "" + errors: "" + operations: "" + shared: "" + webhooks: "" + inputModelSuffix: input + maxMethodParams: 4 + methodArguments: infer-optional-args + outputModelSuffix: output + packageName: mistralai_azure + responseFormat: flat + templateVersion: v2 diff --git a/packages/mistralai_azure/.vscode/settings.json b/packages/mistralai_azure/.vscode/settings.json new file mode 100644 index 0000000..8d79f0a --- /dev/null +++ b/packages/mistralai_azure/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": ["tests", "-vv"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "pylint.args": ["--rcfile=pylintrc"] +} diff --git a/packages/mistralai_azure/CONTRIBUTING.md b/packages/mistralai_azure/CONTRIBUTING.md new file mode 100644 index 0000000..d585717 --- /dev/null +++ b/packages/mistralai_azure/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/packages/mistralai_azure/README.md b/packages/mistralai_azure/README.md new file mode 100644 index 0000000..65bc2e4 --- /dev/null +++ b/packages/mistralai_azure/README.md @@ -0,0 +1,430 @@ +# Mistral on Azure Python Client + +## SDK Installation + +PIP +```bash +pip install mistralai +``` + +Poetry +```bash +poetry add mistralai +``` + +**Prerequisites** + +Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.complete( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="azureai" +) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_azure import MistralAzure +import os + +async def main(): + s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + ) + res = await s.chat.complete_async( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="azureai" + ) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [create](docs/sdks/chat/README.md#create) - Chat Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="azureai" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai_azure import MistralAzure +from mistralazure.utils import BackoffStrategy, RetryConfig +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai", + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai_azure import MistralAzure +from mistralazure.utils import BackoffStrategy, RetryConfig +import os + +s = MistralAzure( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai_azure import MistralAzure, models +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + +res = None +try: + res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | + +#### Example + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + server="prod", + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + server_url="https://api.mistral.ai", + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai_azure import MistralAzure +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralAzure(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai_azure import MistralAzure +from mistralai_azure.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralAzure(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_azure/USAGE.md b/packages/mistralai_azure/USAGE.md new file mode 100644 index 0000000..0ccf3d7 --- /dev/null +++ b/packages/mistralai_azure/USAGE.md @@ -0,0 +1,55 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_azure import MistralAzure +import os + +async def main(): + s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + ) + res = await s.chat.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], model="azureai") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/arguments.md b/packages/mistralai_azure/docs/models/arguments.md new file mode 100644 index 0000000..2e54e27 --- /dev/null +++ b/packages/mistralai_azure/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md new file mode 100644 index 0000000..0c36cde --- /dev/null +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagerole.md b/packages/mistralai_azure/docs/models/assistantmessagerole.md new file mode 100644 index 0000000..658229e --- /dev/null +++ b/packages/mistralai_azure/docs/models/assistantmessagerole.md @@ -0,0 +1,8 @@ +# AssistantMessageRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/mistralai_azure/docs/models/chatcompletionchoice.md new file mode 100644 index 0000000..6fa839b --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | +| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md b/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 0000000..b2f15ec --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md new file mode 100644 index 0000000..3df1e28 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md b/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md new file mode 100644 index 0000000..bc7708a --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md b/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md new file mode 100644 index 0000000..749296d --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 0000000..ed32b75 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/mistralai_azure/docs/models/chatcompletionresponse.md new file mode 100644 index 0000000..ad37615 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 0000000..1fc3470 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/mistralai_azure/docs/models/completionchunk.md new file mode 100644 index 0000000..b8ae6a0 --- /dev/null +++ b/packages/mistralai_azure/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionevent.md b/packages/mistralai_azure/docs/models/completionevent.md new file mode 100644 index 0000000..7a66e8f --- /dev/null +++ b/packages/mistralai_azure/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md new file mode 100644 index 0000000..c807dac --- /dev/null +++ b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/mistralai_azure/docs/models/content.md new file mode 100644 index 0000000..a833dc2 --- /dev/null +++ b/packages/mistralai_azure/docs/models/content.md @@ -0,0 +1,17 @@ +# Content + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/mistralai_azure/docs/models/contentchunk.md new file mode 100644 index 0000000..64fc80d --- /dev/null +++ b/packages/mistralai_azure/docs/models/contentchunk.md @@ -0,0 +1,9 @@ +# ContentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md new file mode 100644 index 0000000..4cb9e91 --- /dev/null +++ b/packages/mistralai_azure/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/finishreason.md b/packages/mistralai_azure/docs/models/finishreason.md new file mode 100644 index 0000000..45a5aed --- /dev/null +++ b/packages/mistralai_azure/docs/models/finishreason.md @@ -0,0 +1,11 @@ +# FinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md new file mode 100644 index 0000000..8af398f --- /dev/null +++ b/packages/mistralai_azure/docs/models/function.md @@ -0,0 +1,10 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/functioncall.md b/packages/mistralai_azure/docs/models/functioncall.md new file mode 100644 index 0000000..7ccd90d --- /dev/null +++ b/packages/mistralai_azure/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/mistralai_azure/docs/models/httpvalidationerror.md new file mode 100644 index 0000000..6389243 --- /dev/null +++ b/packages/mistralai_azure/docs/models/httpvalidationerror.md @@ -0,0 +1,10 @@ +# HTTPValidationError + +Validation Error + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/loc.md b/packages/mistralai_azure/docs/models/loc.md new file mode 100644 index 0000000..d6094ac --- /dev/null +++ b/packages/mistralai_azure/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/messages.md b/packages/mistralai_azure/docs/models/messages.md new file mode 100644 index 0000000..1d39450 --- /dev/null +++ b/packages/mistralai_azure/docs/models/messages.md @@ -0,0 +1,29 @@ +# Messages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md new file mode 100644 index 0000000..2704eab --- /dev/null +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -0,0 +1,8 @@ +# ResponseFormat + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md new file mode 100644 index 0000000..ce35fbb --- /dev/null +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -0,0 +1,11 @@ +# ResponseFormats + +An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/role.md b/packages/mistralai_azure/docs/models/role.md new file mode 100644 index 0000000..affca78 --- /dev/null +++ b/packages/mistralai_azure/docs/models/role.md @@ -0,0 +1,8 @@ +# Role + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/security.md b/packages/mistralai_azure/docs/models/security.md new file mode 100644 index 0000000..c698674 --- /dev/null +++ b/packages/mistralai_azure/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/stop.md b/packages/mistralai_azure/docs/models/stop.md new file mode 100644 index 0000000..ba40ca8 --- /dev/null +++ b/packages/mistralai_azure/docs/models/stop.md @@ -0,0 +1,19 @@ +# Stop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/mistralai_azure/docs/models/systemmessage.md new file mode 100644 index 0000000..7f82798 --- /dev/null +++ b/packages/mistralai_azure/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/textchunk.md b/packages/mistralai_azure/docs/models/textchunk.md new file mode 100644 index 0000000..34e4dd6 --- /dev/null +++ b/packages/mistralai_azure/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md new file mode 100644 index 0000000..291394c --- /dev/null +++ b/packages/mistralai_azure/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md new file mode 100644 index 0000000..bd2dc9f --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -0,0 +1,10 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/mistralai_azure/docs/models/toolchoice.md new file mode 100644 index 0000000..b84f51f --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolchoice.md @@ -0,0 +1,10 @@ +# ToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessage.md b/packages/mistralai_azure/docs/models/toolmessage.md new file mode 100644 index 0000000..364339e --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagerole.md b/packages/mistralai_azure/docs/models/toolmessagerole.md new file mode 100644 index 0000000..c24e59c --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolmessagerole.md @@ -0,0 +1,8 @@ +# ToolMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/mistralai_azure/docs/models/usageinfo.md new file mode 100644 index 0000000..9f56a3a --- /dev/null +++ b/packages/mistralai_azure/docs/models/usageinfo.md @@ -0,0 +1,10 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | +| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | +| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/mistralai_azure/docs/models/usermessage.md new file mode 100644 index 0000000..3d96f1c --- /dev/null +++ b/packages/mistralai_azure/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagecontent.md b/packages/mistralai_azure/docs/models/usermessagecontent.md new file mode 100644 index 0000000..86ebd18 --- /dev/null +++ b/packages/mistralai_azure/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/usermessagerole.md b/packages/mistralai_azure/docs/models/usermessagerole.md new file mode 100644 index 0000000..171124e --- /dev/null +++ b/packages/mistralai_azure/docs/models/usermessagerole.md @@ -0,0 +1,8 @@ +# UserMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/utils/retryconfig.md b/packages/mistralai_azure/docs/models/utils/retryconfig.md new file mode 100644 index 0000000..69dd549 --- /dev/null +++ b/packages/mistralai_azure/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/validationerror.md b/packages/mistralai_azure/docs/models/validationerror.md new file mode 100644 index 0000000..7a1654a --- /dev/null +++ b/packages/mistralai_azure/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/sdks/chat/README.md b/packages/mistralai_azure/docs/sdks/chat/README.md new file mode 100644 index 0000000..26d20bb --- /dev/null +++ b/packages/mistralai_azure/docs/sdks/chat/README.md @@ -0,0 +1,129 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [create](#create) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## create + +Chat Completion + +### Example Usage + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_azure/docs/sdks/mistralazure/README.md b/packages/mistralai_azure/docs/sdks/mistralazure/README.md new file mode 100644 index 0000000..381000a --- /dev/null +++ b/packages/mistralai_azure/docs/sdks/mistralazure/README.md @@ -0,0 +1,9 @@ +# MistralAzure SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock new file mode 100644 index 0000000..477ecfd --- /dev/null +++ b/packages/mistralai_azure/poetry.lock @@ -0,0 +1,638 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy" +version = "1.10.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pydantic" +version = "2.8.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyright" +version = "1.1.374" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, + {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" + +[package.extras] +all = ["twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] + +[[package]] +name = "pytest" +version = "8.3.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20240316" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, + {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "1478d3764c93fadedc6a94a2b911eb59eb142cd4b127d65deb7120a378e07c45" diff --git a/packages/mistralai_azure/poetry.toml b/packages/mistralai_azure/poetry.toml new file mode 100644 index 0000000..ab1033b --- /dev/null +++ b/packages/mistralai_azure/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true diff --git a/packages/mistralai_azure/py.typed b/packages/mistralai_azure/py.typed new file mode 100644 index 0000000..3e38f1a --- /dev/null +++ b/packages/mistralai_azure/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc new file mode 100644 index 0000000..5080038 --- /dev/null +++ b/packages/mistralai_azure/pylintrc @@ -0,0 +1,658 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml new file mode 100644 index 0000000..15aea3d --- /dev/null +++ b/packages/mistralai_azure/pyproject.toml @@ -0,0 +1,56 @@ +[tool.poetry] +name = "mistralai_azure" +version = "1.0.0-rc.2" +description = "Python Client SDK for the Mistral AI API in Azure." +authors = ["Mistral",] +readme = "README.md" +packages = [ + { include = "mistralai_azure", from = "src" } +] +include = ["py.typed", "src/mistralai_azure/py.typed"] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai_azure/py.typed"] + +[virtualenvs] +in-project = true + +[tool.poetry.dependencies] +python = "^3.8" +httpx = "^0.27.0" +jsonpath-python = "^1.0.6" +pydantic = "~2.8.2" +python-dateutil = "^2.9.0.post0" +typing-inspect = "^0.9.0" + +[tool.poetry.group.dev.dependencies] +mypy = "==1.10.1" +pylint = "==3.2.3" +pyright = "==1.1.374" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" + + diff --git a/packages/mistralai_azure/scripts/compile.sh b/packages/mistralai_azure/scripts/compile.sh new file mode 100755 index 0000000..aa49772 --- /dev/null +++ b/packages/mistralai_azure/scripts/compile.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -o pipefail # Ensure pipeline failures are propagated + +# Use temporary files to store outputs and exit statuses +declare -A output_files +declare -A status_files + +# Function to run a command with temporary output and status files +run_command() { + local cmd="$1" + local key="$2" + local output_file="$3" + local status_file="$4" + + # Run the command and store output and exit status + { + eval "$cmd" + echo $? > "$status_file" + } &> "$output_file" & +} + +# Create temporary files for outputs and statuses +for cmd in compileall pylint mypy pyright; do + output_files[$cmd]=$(mktemp) + status_files[$cmd]=$(mktemp) +done + +# Collect PIDs for background processes +declare -a pids + +# Run commands in parallel using temporary files +echo "Running python -m compileall" +run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" +pids+=($!) + +echo "Running pylint" +run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" +pids+=($!) + +echo "Running mypy" +run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" +pids+=($!) + +echo "Running pyright (optional)" +run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" +pids+=($!) + +# Wait for all processes to complete +echo "Waiting for processes to complete" +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Print output sequentially and check for failures +failed=false +for key in "${!output_files[@]}"; do + echo "--- Output from Command: $key ---" + echo + cat "${output_files[$key]}" + echo # Empty line for separation + echo "--- End of Output from Command: $key ---" + echo + + exit_status=$(cat "${status_files[$key]}") + if [ "$exit_status" -ne 0 ]; then + echo "Command $key failed with exit status $exit_status" >&2 + failed=true + fi +done + +# Clean up temporary files +for tmp_file in "${output_files[@]}" "${status_files[@]}"; do + rm -f "$tmp_file" +done + +if $failed; then + echo "One or more commands failed." >&2 + exit 1 +else + echo "All commands completed successfully." + exit 0 +fi diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh new file mode 100755 index 0000000..1ee7194 --- /dev/null +++ b/packages/mistralai_azure/scripts/publish.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} + +poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py new file mode 100644 index 0000000..68138c4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * +from .models import * diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py new file mode 100644 index 0000000..2ee66cd --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py new file mode 100644 index 0000000..b03549c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py @@ -0,0 +1,16 @@ +# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request.headers["user-agent"] = ( + "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + ) + return request diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py new file mode 100644 index 0000000..304edfa --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py @@ -0,0 +1,15 @@ +from .custom_user_agent import CustomUserAgentHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py new file mode 100644 index 0000000..c8e9631 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai_azure.httpclient import HttpClient + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py new file mode 100644 index 0000000..3076b41 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from abc import ABC, abstractmethod +import httpx +from mistralai_azure.httpclient import HttpClient +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py new file mode 100644 index 0000000..1f22dbc --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -0,0 +1,253 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai_azure import models, utils +from mistralai_azure._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Optional, Tuple + +class BaseSDK: + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + def get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self.get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + ) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody("application/octet-stream") + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + async def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py new file mode 100644 index 0000000..a5e172d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -0,0 +1,470 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_azure import models, utils +from mistralai_azure._hooks import HookContext +from mistralai_azure.types import OptionalNullable, UNSET +from mistralai_azure.utils import eventstreaming +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + + def stream( + self, *, + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def complete( + self, *, + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py new file mode 100644 index 0000000..36b642a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py new file mode 100644 index 0000000..a102b13 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict +from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict +from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict +from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .function import Function, FunctionTypedDict +from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .sdkerror import SDKError +from .security import Security, SecurityTypedDict +from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict +from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict + +__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py new file mode 100644 index 0000000..c7bc4b4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +AssistantMessageRole = Literal["assistant"] + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[str]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py new file mode 100644 index 0000000..acfd5bb --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai_azure.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: NotRequired[AssistantMessageTypedDict] + + +class ChatCompletionChoice(BaseModel): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: Optional[AssistantMessage] = None + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py new file mode 100644 index 0000000..352e883 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionRequestTypedDict(TypedDict): + messages: List[ChatCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[Nullable[str]] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ChatCompletionRequestToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + messages: List[ChatCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" + r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ChatCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py new file mode 100644 index 0000000..8859121 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class ChatCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py new file mode 100644 index 0000000..85276b1 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionStreamRequestTypedDict(TypedDict): + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[Nullable[str]] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" + r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py new file mode 100644 index 0000000..f51aca3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py new file mode 100644 index 0000000..2f8f4b9 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py new file mode 100644 index 0000000..76f7fce --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, TypedDict + + +FinishReason = Literal["stop", "length", "error", "tool_calls"] + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[FinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py new file mode 100644 index 0000000..a45f2bd --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ContentChunkTypedDict(TypedDict): + text: str + + +class ContentChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py new file mode 100644 index 0000000..68d0221 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[str] + content: NotRequired[str] + tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + tool_calls: OptionalNullable[ToolCall] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py new file mode 100644 index 0000000..6ffcacf --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import Any, Dict, Optional, TypedDict +from typing_extensions import NotRequired + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + + +class Function(BaseModel): + name: str + parameters: Dict[str, Any] + description: Optional[str] = "" + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py new file mode 100644 index 0000000..2a9bc80 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import Any, Dict, TypedDict, Union + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + arguments: Arguments + + +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py new file mode 100644 index 0000000..de07a3d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from mistralai_azure import utils +from mistralai_azure.types import BaseModel +from typing import List, Optional + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + + +class HTTPValidationError(Exception): + r"""Validation Error""" + data: HTTPValidationErrorData + + def __init__(self, data: HTTPValidationErrorData): + self.data = data + + def __str__(self) -> str: + return utils.marshal_json(self.data, HTTPValidationErrorData) + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py new file mode 100644 index 0000000..0dac0f6 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + +class ResponseFormatTypedDict(TypedDict): + type: NotRequired[ResponseFormats] + + +class ResponseFormat(BaseModel): + type: Optional[ResponseFormats] = "text" + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py new file mode 100644 index 0000000..03216cb --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass +from typing import Optional +import httpx + + +@dataclass +class SDKError(Exception): + """Represents an error returned by the API.""" + + message: str + status_code: int = -1 + body: str = "" + raw_response: Optional[httpx.Response] = None + + def __str__(self): + body = "" + if len(self.body) > 0: + body = f"\n{self.body}" + + return f"{self.message}: Status {self.status_code}{body}" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py new file mode 100644 index 0000000..94d9e64 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/security.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import FieldMetadata, SecurityMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py new file mode 100644 index 0000000..1ed8a75 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +Role = Literal["system"] + +class SystemMessageTypedDict(TypedDict): + content: ContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: Content + role: Optional[Role] = "system" + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py new file mode 100644 index 0000000..12f2e78 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class TextChunkTypedDict(TypedDict): + text: str + + +class TextChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py new file mode 100644 index 0000000..e77c77d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + + +class Tool(BaseModel): + function: Function + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py new file mode 100644 index 0000000..f15bee9 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + + +class ToolCall(BaseModel): + function: FunctionCall + id: Optional[str] = "null" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py new file mode 100644 index 0000000..e845297 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ToolMessageRole = Literal["tool"] + +class ToolMessageTypedDict(TypedDict): + content: str + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py new file mode 100644 index 0000000..f30c1eb --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class UsageInfo(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py new file mode 100644 index 0000000..8ddc8c8 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +UserMessageRole = Literal["user"] + +class UserMessageTypedDict(TypedDict): + content: UserMessageContentTypedDict + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: UserMessageContent + role: Optional[UserMessageRole] = "user" + + +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py new file mode 100644 index 0000000..626e9c4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import List, TypedDict, Union + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + msg: str + type: str + + +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + diff --git a/packages/mistralai_azure/src/mistralai_azure/py.typed b/packages/mistralai_azure/src/mistralai_azure/py.typed new file mode 100644 index 0000000..3e38f1a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py new file mode 100644 index 0000000..a83faa7 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from typing import Any, Callable, Dict, Optional, Union + +import httpx +from mistralai_azure import models, utils +from mistralai_azure._hooks import SDKHooks +from mistralai_azure.chat import Chat +from mistralai_azure.types import Nullable + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, HttpClient +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, NoOpLogger +from .utils.retries import RetryConfig + + +class MistralAzure(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + chat: Chat + r"""Chat Completion API""" + + def __init__( + self, + azure_api_key: Union[str, Callable[[], str]], + azure_endpoint: str, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param azure_api_key: The azure_api_key required for authentication + :param azure_endpoint: The Azure AI endpoint URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + """ + # if azure_endpoint doesn't end with `/v1` add it + if not azure_endpoint.endswith("/"): + azure_endpoint += "/" + if not azure_endpoint.endswith("v1/"): + azure_endpoint += "v1/" + server_url = azure_endpoint + + if client is None: + client = httpx.Client() + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + if async_client is None: + async_client = httpx.AsyncClient() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + if debug_logger is None: + debug_logger = NoOpLogger() + + security: Any = None + if callable(azure_api_key): + security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment + api_key=azure_api_key() + ) + else: + security = models.Security(api_key=azure_api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=server_url, + server=None, + retry_config=retry_config, + debug_logger=debug_logger, + ), + ) + + hooks = SDKHooks() + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, self.sdk_configuration.client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + self._init_sdks() + + def _init_sdks(self): + self.chat = Chat(self.sdk_configuration) diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py new file mode 100644 index 0000000..5ba1c4c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from ._hooks import SDKHooks +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai_azure import models +from mistralai_azure.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_PROD = "prod" +r"""Production server""" +SERVERS = { + SERVER_PROD: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: HttpClient + async_client: AsyncHttpClient + debug_logger: Logger + security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.0.0-rc.2" + gen_version: str = "2.388.1" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai_azure" + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_PROD + + if self.server not in SERVERS: + raise ValueError(f"Invalid server \"{self.server}\"") + + return SERVERS[self.server], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/types/__init__.py b/packages/mistralai_azure/src/mistralai_azure/types/__init__.py new file mode 100644 index 0000000..fc76fe0 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py new file mode 100644 index 0000000..a6187ef --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + +UnrecognizedInt = NewType("UnrecognizedInt", int) +UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py new file mode 100644 index 0000000..95aa1b6 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .annotations import get_discriminator +from .enums import OpenEnumMeta +from .headers import get_headers, get_response_headers +from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, +) +from .queryparams import get_query_params +from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig +from .requestbodies import serialize_request_body, SerializedRequestBody +from .security import get_security +from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, +) +from .url import generate_url, template_url, remove_suffix +from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .logger import Logger, get_body_content, NoOpLogger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_discriminator", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "NoOpLogger", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_float", + "validate_int", + "validate_open_enum", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py new file mode 100644 index 0000000..0d17472 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + if isinstance(model, dict): + try: + return f'{model.get(key)}' + except AttributeError as e: + raise ValueError(f'Could not find discriminator key {key} in {model}') from e + + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + fieldname = fieldname.upper() + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py new file mode 100644 index 0000000..c650b10 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum + + +class OpenEnumMeta(enum.EnumMeta): + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py new file mode 100644 index 0000000..553b386 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +import httpx + +T = TypeVar("T") + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py new file mode 100644 index 0000000..07f9b23 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -0,0 +1,207 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + form: Dict[str, Any] = {} + files: Dict[str, Any] = {} + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if val is None: + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias is not None else name + + if field_metadata.file: + file_fields: Dict[str, FieldInfo] = val.__class__.model_fields + + file_name = "" + field_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(val, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(val, file_field_name, None) + else: + field_name = ( + file_field.alias + if file_field.alias is not None + else file_field_name + ) + file_name = getattr(val, file_field_name) + + if field_name == "" or file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + if content_type is not None: + files[field_name] = (file_name, content, content_type) + else: + files[field_name] = (file_name, content) + elif field_metadata.json: + files[f_name] = ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if value is None: + continue + values.append(_val_to_string(value)) + + form[f_name + "[]"] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if val is None: + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py b/packages/mistralai_azure/src/mistralai_azure/utils/headers.py new file mode 100644 index 0000000..e14a0f4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if val is None: + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py new file mode 100644 index 0000000..7e4bbea --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py b/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py new file mode 100644 index 0000000..173b3e5 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py new file mode 100644 index 0000000..1c8c583 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py @@ -0,0 +1,203 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if query_params is not None else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if obj_param_metadata is None: + continue + + obj_val = getattr(obj, name) + if obj_val is None: + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if value is None: + return + + for key, val in value.items(): + if val is None: + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if value is None: + return + + for val in value: + if val is None: + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py new file mode 100644 index 0000000..4f586ae --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: str + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py new file mode 100644 index 0000000..a06f927 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import random +import time +from typing import List + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + + def __init__(self, response: httpx.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/mistralai_azure/src/mistralai_azure/utils/security.py new file mode 100644 index 0000000..aab4cb6 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/security.py @@ -0,0 +1,168 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth which could be a flattened model + if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py new file mode 100644 index 0000000..a98998a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -0,0 +1,181 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import json +from typing import Any, Dict, List, Union, get_args +import httpx +from typing_extensions import get_origin +from pydantic import ConfigDict, create_model +from pydantic_core import from_json +from typing_inspect import is_optional_type + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable + + +def serialize_decimal(as_str: bool): + def serialize(d): + if is_optional_type(type(d)) and d is None: + return None + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, Decimal): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + if is_optional_type(type(f)) and f is None: + return None + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, float): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(b): + if is_optional_type(type(b)) and b is None: + return None + + if not isinstance(b, int): + raise ValueError("Expected int") + + return str(b) if as_str else b + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, int): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_open_enum(is_int: bool): + def validate(e): + if e is None: + return None + + if is_int: + if not isinstance(e, int): + raise ValueError("Expected int") + else: + if not isinstance(e, str): + raise ValueError("Expected string") + + return e + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/url.py b/packages/mistralai_azure/src/mistralai_azure/utils/url.py new file mode 100644 index 0000000..b201bfa --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/url.py @@ -0,0 +1,150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if path_params is not None else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if param is None: + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if param_field_val is None: + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + else: + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py new file mode 100644 index 0000000..24ccae3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params diff --git a/packages/mistralai_gcp/.genignore b/packages/mistralai_gcp/.genignore new file mode 100644 index 0000000..ea10bc8 --- /dev/null +++ b/packages/mistralai_gcp/.genignore @@ -0,0 +1,4 @@ +src/mistralai_gcp/sdk.py +README.md +USAGE.md +docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_gcp/.gitattributes b/packages/mistralai_gcp/.gitattributes new file mode 100644 index 0000000..4d75d59 --- /dev/null +++ b/packages/mistralai_gcp/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore new file mode 100644 index 0000000..477b772 --- /dev/null +++ b/packages/mistralai_gcp/.gitignore @@ -0,0 +1,8 @@ +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock new file mode 100644 index 0000000..ab48393 --- /dev/null +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -0,0 +1,142 @@ +lockVersion: 2.0.0 +id: ec60f2d8-7869-45c1-918e-773d41a8cf74 +management: + docChecksum: 5daa3767285068a2f496f5fd41eb7a01 + docVersion: 0.0.2 + speakeasyVersion: 1.356.0 + generationVersion: 2.388.1 + releaseVersion: 1.0.0-rc.2 + configChecksum: 68063242d77238d1f19a7d7b0a39c381 + published: true +features: + python: + additionalDependencies: 1.0.0 + constsAndDefaults: 1.0.2 + core: 5.3.4 + defaultEnabledRetries: 0.2.0 + envVarSecurityUsage: 0.3.1 + examples: 3.0.0 + flatRequests: 1.0.1 + globalSecurity: 3.0.1 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.0.0 + nameOverrides: 3.0.0 + nullables: 1.0.0 + responseFormat: 1.0.0 + retries: 3.0.0 + sdkHooks: 1.0.0 + serverEvents: 1.0.2 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.0.1 +generatedFiles: + - src/mistralai_gcp/sdkconfiguration.py + - src/mistralai_gcp/chat.py + - src/mistralai_gcp/fim.py + - .vscode/settings.json + - poetry.toml + - py.typed + - pylintrc + - pyproject.toml + - scripts/compile.sh + - scripts/publish.sh + - src/mistralai_gcp/__init__.py + - src/mistralai_gcp/basesdk.py + - src/mistralai_gcp/httpclient.py + - src/mistralai_gcp/py.typed + - src/mistralai_gcp/types/__init__.py + - src/mistralai_gcp/types/basemodel.py + - src/mistralai_gcp/utils/__init__.py + - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/enums.py + - src/mistralai_gcp/utils/eventstreaming.py + - src/mistralai_gcp/utils/forms.py + - src/mistralai_gcp/utils/headers.py + - src/mistralai_gcp/utils/logger.py + - src/mistralai_gcp/utils/metadata.py + - src/mistralai_gcp/utils/queryparams.py + - src/mistralai_gcp/utils/requestbodies.py + - src/mistralai_gcp/utils/retries.py + - src/mistralai_gcp/utils/security.py + - src/mistralai_gcp/utils/serializers.py + - src/mistralai_gcp/utils/url.py + - src/mistralai_gcp/utils/values.py + - src/mistralai_gcp/models/sdkerror.py + - src/mistralai_gcp/models/completionevent.py + - src/mistralai_gcp/models/completionchunk.py + - src/mistralai_gcp/models/completionresponsestreamchoice.py + - src/mistralai_gcp/models/deltamessage.py + - src/mistralai_gcp/models/toolcall.py + - src/mistralai_gcp/models/functioncall.py + - src/mistralai_gcp/models/usageinfo.py + - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/validationerror.py + - src/mistralai_gcp/models/chatcompletionstreamrequest.py + - src/mistralai_gcp/models/tool.py + - src/mistralai_gcp/models/function.py + - src/mistralai_gcp/models/responseformat.py + - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/contentchunk.py + - src/mistralai_gcp/models/usermessage.py + - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/assistantmessage.py + - src/mistralai_gcp/models/toolmessage.py + - src/mistralai_gcp/models/chatcompletionresponse.py + - src/mistralai_gcp/models/chatcompletionchoice.py + - src/mistralai_gcp/models/chatcompletionrequest.py + - src/mistralai_gcp/models/fimcompletionstreamrequest.py + - src/mistralai_gcp/models/fimcompletionresponse.py + - src/mistralai_gcp/models/fimcompletionrequest.py + - src/mistralai_gcp/models/security.py + - src/mistralai_gcp/models/__init__.py + - docs/models/completionevent.md + - docs/models/completionchunk.md + - docs/models/finishreason.md + - docs/models/completionresponsestreamchoice.md + - docs/models/deltamessage.md + - docs/models/toolcall.md + - docs/models/arguments.md + - docs/models/functioncall.md + - docs/models/usageinfo.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/validationerror.md + - docs/models/stop.md + - docs/models/messages.md + - docs/models/toolchoice.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/tool.md + - docs/models/function.md + - docs/models/responseformats.md + - docs/models/responseformat.md + - docs/models/content.md + - docs/models/role.md + - docs/models/systemmessage.md + - docs/models/contentchunk.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/usermessage.md + - docs/models/textchunk.md + - docs/models/assistantmessagerole.md + - docs/models/assistantmessage.md + - docs/models/toolmessagerole.md + - docs/models/toolmessage.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionrequest.md + - docs/models/security.md + - docs/models/utils/retryconfig.md + - .gitattributes + - src/mistralai_gcp/_hooks/sdkhooks.py + - src/mistralai_gcp/_hooks/types.py + - src/mistralai_gcp/_hooks/__init__.py diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml new file mode 100644 index 0000000..2a5993b --- /dev/null +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -0,0 +1,44 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralGCP + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + auth: + oAuth2ClientCredentialsEnabled: true +python: + version: 1.0.0-rc.2 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: + google-auth: ^2.31.0 + requests: ^2.32.3 + authors: + - Mistral + clientServerStatusCodesAsErrors: true + description: Python Client SDK for the Mistral AI API in GCP. + enumFormat: union + flattenGlobalSecurity: true + flattenRequests: true + imports: + option: openapi + paths: + callbacks: "" + errors: "" + operations: "" + shared: "" + webhooks: "" + inputModelSuffix: input + maxMethodParams: 4 + methodArguments: infer-optional-args + outputModelSuffix: output + packageName: mistralai-gcp + responseFormat: flat + templateVersion: v2 diff --git a/packages/mistralai_gcp/.vscode/settings.json b/packages/mistralai_gcp/.vscode/settings.json new file mode 100644 index 0000000..8d79f0a --- /dev/null +++ b/packages/mistralai_gcp/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": ["tests", "-vv"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "pylint.args": ["--rcfile=pylintrc"] +} diff --git a/packages/mistralai_gcp/CONTRIBUTING.md b/packages/mistralai_gcp/CONTRIBUTING.md new file mode 100644 index 0000000..d585717 --- /dev/null +++ b/packages/mistralai_gcp/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/packages/mistralai_gcp/README.md b/packages/mistralai_gcp/README.md new file mode 100644 index 0000000..a423324 --- /dev/null +++ b/packages/mistralai_gcp/README.md @@ -0,0 +1,425 @@ +# Mistral on GCP Python Client + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). + +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running + +```bash +gcloud auth application-default login +``` + +## SDK Installation + +Install the extras dependencies specific to Google Cloud: + +```bash +pip install mistralai[gcp] +``` + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_gcp import MistralGCP +import os +) + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_gcp import MistralGCP +import os + +async def main(): + s = MistralGCP( + api_key=os.getenv("API_KEY", ""), + ) + res = await s.chat.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], model="mistral-small-latest") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [create](docs/sdks/chat/README.md#create) - Chat Completion + +### [fim](docs/sdks/fim/README.md) + +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion +* [create](docs/sdks/fim/README.md#create) - Fim Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai_gcp import MistralGCP +from mistralgcp.utils import BackoffStrategy, RetryConfig +import os + +s = MistralGCP() + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest", + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ) +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai_gcp import MistralGCP +from mistralgcp.utils import BackoffStrategy, RetryConfig +import os + +s = MistralGCP( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), +) + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai_gcp import MistralGCP, models +import os + +s = MistralGCP() + +res = None +try: + res = s.chat.complete( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" + ) + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | + +#### Example + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP(server="prod") + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP( + server_url="https://api.mistral.ai", +) + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai_gcp import MistralGCP +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralGCP(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai_gcp import MistralGCP +from mistralai_gcp.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralGCP(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_gcp/USAGE.md b/packages/mistralai_gcp/USAGE.md new file mode 100644 index 0000000..30fa08a --- /dev/null +++ b/packages/mistralai_gcp/USAGE.md @@ -0,0 +1,51 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_gcp import MistralGCP +import os + +async def main(): + s = MistralGCP( + api_key=os.getenv("API_KEY", ""), + ) + res = await s.chat.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], model="mistral-small-latest") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/arguments.md b/packages/mistralai_gcp/docs/models/arguments.md new file mode 100644 index 0000000..2e54e27 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md new file mode 100644 index 0000000..0c36cde --- /dev/null +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagerole.md b/packages/mistralai_gcp/docs/models/assistantmessagerole.md new file mode 100644 index 0000000..658229e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/assistantmessagerole.md @@ -0,0 +1,8 @@ +# AssistantMessageRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md new file mode 100644 index 0000000..6fa839b --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | +| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 0000000..b2f15ec --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md new file mode 100644 index 0000000..3e30c64 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -0,0 +1,19 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md b/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md new file mode 100644 index 0000000..bc7708a --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md b/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md new file mode 100644 index 0000000..749296d --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 0000000..ed32b75 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md new file mode 100644 index 0000000..ad37615 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 0000000..adc7ff9 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md new file mode 100644 index 0000000..b8ae6a0 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionevent.md b/packages/mistralai_gcp/docs/models/completionevent.md new file mode 100644 index 0000000..7a66e8f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md new file mode 100644 index 0000000..c807dac --- /dev/null +++ b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/mistralai_gcp/docs/models/content.md new file mode 100644 index 0000000..a833dc2 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/content.md @@ -0,0 +1,17 @@ +# Content + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/mistralai_gcp/docs/models/contentchunk.md new file mode 100644 index 0000000..64fc80d --- /dev/null +++ b/packages/mistralai_gcp/docs/models/contentchunk.md @@ -0,0 +1,9 @@ +# ContentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md new file mode 100644 index 0000000..4cb9e91 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md new file mode 100644 index 0000000..b4b024e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md b/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md new file mode 100644 index 0000000..a0dbb00 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md new file mode 100644 index 0000000..da786a1 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md @@ -0,0 +1,13 @@ +# FIMCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md new file mode 100644 index 0000000..acffb53 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md new file mode 100644 index 0000000..5a9e2ff --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/finishreason.md b/packages/mistralai_gcp/docs/models/finishreason.md new file mode 100644 index 0000000..45a5aed --- /dev/null +++ b/packages/mistralai_gcp/docs/models/finishreason.md @@ -0,0 +1,11 @@ +# FinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md new file mode 100644 index 0000000..8af398f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/function.md @@ -0,0 +1,10 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/functioncall.md b/packages/mistralai_gcp/docs/models/functioncall.md new file mode 100644 index 0000000..7ccd90d --- /dev/null +++ b/packages/mistralai_gcp/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/mistralai_gcp/docs/models/httpvalidationerror.md new file mode 100644 index 0000000..6389243 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/httpvalidationerror.md @@ -0,0 +1,10 @@ +# HTTPValidationError + +Validation Error + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/loc.md b/packages/mistralai_gcp/docs/models/loc.md new file mode 100644 index 0000000..d6094ac --- /dev/null +++ b/packages/mistralai_gcp/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/messages.md b/packages/mistralai_gcp/docs/models/messages.md new file mode 100644 index 0000000..1d39450 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/messages.md @@ -0,0 +1,29 @@ +# Messages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md new file mode 100644 index 0000000..2704eab --- /dev/null +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -0,0 +1,8 @@ +# ResponseFormat + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md new file mode 100644 index 0000000..ce35fbb --- /dev/null +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -0,0 +1,11 @@ +# ResponseFormats + +An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/role.md b/packages/mistralai_gcp/docs/models/role.md new file mode 100644 index 0000000..affca78 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/role.md @@ -0,0 +1,8 @@ +# Role + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/security.md b/packages/mistralai_gcp/docs/models/security.md new file mode 100644 index 0000000..c698674 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/stop.md b/packages/mistralai_gcp/docs/models/stop.md new file mode 100644 index 0000000..ba40ca8 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/stop.md @@ -0,0 +1,19 @@ +# Stop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/mistralai_gcp/docs/models/systemmessage.md new file mode 100644 index 0000000..7f82798 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/textchunk.md b/packages/mistralai_gcp/docs/models/textchunk.md new file mode 100644 index 0000000..34e4dd6 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md new file mode 100644 index 0000000..291394c --- /dev/null +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md new file mode 100644 index 0000000..bd2dc9f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -0,0 +1,10 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md new file mode 100644 index 0000000..b84f51f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -0,0 +1,10 @@ +# ToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessage.md b/packages/mistralai_gcp/docs/models/toolmessage.md new file mode 100644 index 0000000..364339e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagerole.md b/packages/mistralai_gcp/docs/models/toolmessagerole.md new file mode 100644 index 0000000..c24e59c --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolmessagerole.md @@ -0,0 +1,8 @@ +# ToolMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md new file mode 100644 index 0000000..9f56a3a --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usageinfo.md @@ -0,0 +1,10 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | +| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | +| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/mistralai_gcp/docs/models/usermessage.md new file mode 100644 index 0000000..3d96f1c --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagecontent.md b/packages/mistralai_gcp/docs/models/usermessagecontent.md new file mode 100644 index 0000000..86ebd18 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/usermessagerole.md b/packages/mistralai_gcp/docs/models/usermessagerole.md new file mode 100644 index 0000000..171124e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usermessagerole.md @@ -0,0 +1,8 @@ +# UserMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/utils/retryconfig.md b/packages/mistralai_gcp/docs/models/utils/retryconfig.md new file mode 100644 index 0000000..69dd549 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/validationerror.md b/packages/mistralai_gcp/docs/models/validationerror.md new file mode 100644 index 0000000..7a1654a --- /dev/null +++ b/packages/mistralai_gcp/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/sdks/chat/README.md b/packages/mistralai_gcp/docs/sdks/chat/README.md new file mode 100644 index 0000000..6f5f197 --- /dev/null +++ b/packages/mistralai_gcp/docs/sdks/chat/README.md @@ -0,0 +1,121 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [create](#create) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## create + +Chat Completion + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/docs/sdks/fim/README.md b/packages/mistralai_gcp/docs/sdks/fim/README.md new file mode 100644 index 0000000..b997fab --- /dev/null +++ b/packages/mistralai_gcp/docs/sdks/fim/README.md @@ -0,0 +1,107 @@ +# Fim +(*fim*) + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [stream](#stream) - Stream fim completion +* [create](#create) - Fim Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.fim.stream(prompt="def", model="codestral-2405", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## create + +FIM completion. + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.fim.complete(prompt="def", model="codestral-2405", suffix="return a+b") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md b/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md new file mode 100644 index 0000000..84963a9 --- /dev/null +++ b/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md @@ -0,0 +1,9 @@ +# MistralGCP SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock new file mode 100644 index 0000000..a3d5456 --- /dev/null +++ b/packages/mistralai_gcp/poetry.lock @@ -0,0 +1,848 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "cachetools" +version = "5.4.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, + {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "google-auth" +version = "2.32.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, + {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy" +version = "1.10.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pyasn1" +version = "0.6.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, + {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.0" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, + {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pydantic" +version = "2.8.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyright" +version = "1.1.374" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, + {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" + +[package.extras] +all = ["twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] + +[[package]] +name = "pytest" +version = "8.3.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20240316" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, + {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "a68027cf5e3c64af190addf2b94014fb7eeb47d41cdd5c7f0ae2fb87305f83d0" diff --git a/packages/mistralai_gcp/poetry.toml b/packages/mistralai_gcp/poetry.toml new file mode 100644 index 0000000..ab1033b --- /dev/null +++ b/packages/mistralai_gcp/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true diff --git a/packages/mistralai_gcp/py.typed b/packages/mistralai_gcp/py.typed new file mode 100644 index 0000000..3e38f1a --- /dev/null +++ b/packages/mistralai_gcp/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc new file mode 100644 index 0000000..5080038 --- /dev/null +++ b/packages/mistralai_gcp/pylintrc @@ -0,0 +1,658 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml new file mode 100644 index 0000000..48841e4 --- /dev/null +++ b/packages/mistralai_gcp/pyproject.toml @@ -0,0 +1,58 @@ +[tool.poetry] +name = "mistralai-gcp" +version = "1.0.0-rc.2" +description = "Python Client SDK for the Mistral AI API in GCP." +authors = ["Mistral",] +readme = "README.md" +packages = [ + { include = "mistralai_gcp", from = "src" } +] +include = ["py.typed", "src/mistralai_gcp/py.typed"] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai_gcp/py.typed"] + +[virtualenvs] +in-project = true + +[tool.poetry.dependencies] +python = "^3.8" +google-auth = "^2.31.0" +httpx = "^0.27.0" +jsonpath-python = "^1.0.6" +pydantic = "~2.8.2" +python-dateutil = "^2.9.0.post0" +requests = "^2.32.3" +typing-inspect = "^0.9.0" + +[tool.poetry.group.dev.dependencies] +mypy = "==1.10.1" +pylint = "==3.2.3" +pyright = "==1.1.374" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" + + diff --git a/packages/mistralai_gcp/scripts/compile.sh b/packages/mistralai_gcp/scripts/compile.sh new file mode 100755 index 0000000..aa49772 --- /dev/null +++ b/packages/mistralai_gcp/scripts/compile.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -o pipefail # Ensure pipeline failures are propagated + +# Use temporary files to store outputs and exit statuses +declare -A output_files +declare -A status_files + +# Function to run a command with temporary output and status files +run_command() { + local cmd="$1" + local key="$2" + local output_file="$3" + local status_file="$4" + + # Run the command and store output and exit status + { + eval "$cmd" + echo $? > "$status_file" + } &> "$output_file" & +} + +# Create temporary files for outputs and statuses +for cmd in compileall pylint mypy pyright; do + output_files[$cmd]=$(mktemp) + status_files[$cmd]=$(mktemp) +done + +# Collect PIDs for background processes +declare -a pids + +# Run commands in parallel using temporary files +echo "Running python -m compileall" +run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" +pids+=($!) + +echo "Running pylint" +run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" +pids+=($!) + +echo "Running mypy" +run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" +pids+=($!) + +echo "Running pyright (optional)" +run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" +pids+=($!) + +# Wait for all processes to complete +echo "Waiting for processes to complete" +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Print output sequentially and check for failures +failed=false +for key in "${!output_files[@]}"; do + echo "--- Output from Command: $key ---" + echo + cat "${output_files[$key]}" + echo # Empty line for separation + echo "--- End of Output from Command: $key ---" + echo + + exit_status=$(cat "${status_files[$key]}") + if [ "$exit_status" -ne 0 ]; then + echo "Command $key failed with exit status $exit_status" >&2 + failed=true + fi +done + +# Clean up temporary files +for tmp_file in "${output_files[@]}" "${status_files[@]}"; do + rm -f "$tmp_file" +done + +if $failed; then + echo "One or more commands failed." >&2 + exit 1 +else + echo "All commands completed successfully." + exit 0 +fi diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh new file mode 100755 index 0000000..1ee7194 --- /dev/null +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} + +poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py new file mode 100644 index 0000000..68138c4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * +from .models import * diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py new file mode 100644 index 0000000..2ee66cd --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py new file mode 100644 index 0000000..b03549c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py @@ -0,0 +1,16 @@ +# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request.headers["user-agent"] = ( + "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + ) + return request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py new file mode 100644 index 0000000..304edfa --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py @@ -0,0 +1,15 @@ +from .custom_user_agent import CustomUserAgentHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py new file mode 100644 index 0000000..ca3b7b3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai_gcp.httpclient import HttpClient + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py new file mode 100644 index 0000000..f4ee7f3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from abc import ABC, abstractmethod +import httpx +from mistralai_gcp.httpclient import HttpClient +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py new file mode 100644 index 0000000..fd4854f --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -0,0 +1,253 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai_gcp import models, utils +from mistralai_gcp._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Optional, Tuple + +class BaseSDK: + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + def get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self.get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + ) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody("application/octet-stream") + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + async def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py new file mode 100644 index 0000000..d9ad7bc --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -0,0 +1,458 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_gcp import models, utils +from mistralai_gcp._hooks import HookContext +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.utils import eventstreaming +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + + def stream( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def complete( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py new file mode 100644 index 0000000..47d8c9a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -0,0 +1,438 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_gcp import models, utils +from mistralai_gcp._hooks import HookContext +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.utils import eventstreaming +from typing import Any, AsyncGenerator, Generator, Optional, Union + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + + def stream( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def complete( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py new file mode 100644 index 0000000..36b642a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py new file mode 100644 index 0000000..79fb7c9 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict +from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict +from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict +from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict +from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .function import Function, FunctionTypedDict +from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .sdkerror import SDKError +from .security import Security, SecurityTypedDict +from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict +from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict + +__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py new file mode 100644 index 0000000..f4e94f3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +AssistantMessageRole = Literal["assistant"] + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[str]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py new file mode 100644 index 0000000..d868422 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai_gcp.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: NotRequired[AssistantMessageTypedDict] + + +class ChatCompletionChoice(BaseModel): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: Optional[AssistantMessage] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py new file mode 100644 index 0000000..759aa1e --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ChatCompletionRequestToolChoice] + + +class ChatCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ChatCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py new file mode 100644 index 0000000..c8ccdfc --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class ChatCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py new file mode 100644 index 0000000..ad0fc79 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -0,0 +1,103 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ToolChoice] + + +class ChatCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ToolChoice] = "auto" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py new file mode 100644 index 0000000..52266f4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py new file mode 100644 index 0000000..5a6e3c2 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py new file mode 100644 index 0000000..83a0b02 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, TypedDict + + +FinishReason = Literal["stop", "length", "error", "tool_calls"] + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[FinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py new file mode 100644 index 0000000..9adcb95 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ContentChunkTypedDict(TypedDict): + text: str + + +class ContentChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py new file mode 100644 index 0000000..34cc346 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[str] + content: NotRequired[str] + tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + tool_calls: OptionalNullable[ToolCall] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py new file mode 100644 index 0000000..15e36cc --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py new file mode 100644 index 0000000..27fcc4f --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class FIMCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py new file mode 100644 index 0000000..3888846 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py new file mode 100644 index 0000000..235eb34 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import Any, Dict, Optional, TypedDict +from typing_extensions import NotRequired + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + + +class Function(BaseModel): + name: str + parameters: Dict[str, Any] + description: Optional[str] = "" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py new file mode 100644 index 0000000..c188ad4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import Any, Dict, TypedDict, Union + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + arguments: Arguments + + +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py new file mode 100644 index 0000000..0347dc1 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from mistralai_gcp import utils +from mistralai_gcp.types import BaseModel +from typing import List, Optional + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + + +class HTTPValidationError(Exception): + r"""Validation Error""" + data: HTTPValidationErrorData + + def __init__(self, data: HTTPValidationErrorData): + self.data = data + + def __str__(self) -> str: + return utils.marshal_json(self.data, HTTPValidationErrorData) + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py new file mode 100644 index 0000000..5c3e9b7 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + +class ResponseFormatTypedDict(TypedDict): + type: NotRequired[ResponseFormats] + + +class ResponseFormat(BaseModel): + type: Optional[ResponseFormats] = "text" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py new file mode 100644 index 0000000..03216cb --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass +from typing import Optional +import httpx + + +@dataclass +class SDKError(Exception): + """Represents an error returned by the API.""" + + message: str + status_code: int = -1 + body: str = "" + raw_response: Optional[httpx.Response] = None + + def __str__(self): + body = "" + if len(self.body) > 0: + body = f"\n{self.body}" + + return f"{self.message}: Status {self.status_code}{body}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py new file mode 100644 index 0000000..cd4d8f3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import FieldMetadata, SecurityMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py new file mode 100644 index 0000000..461a4cc --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +Role = Literal["system"] + +class SystemMessageTypedDict(TypedDict): + content: ContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: Content + role: Optional[Role] = "system" + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py new file mode 100644 index 0000000..ecf2741 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class TextChunkTypedDict(TypedDict): + text: str + + +class TextChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py new file mode 100644 index 0000000..b4e0645 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + + +class Tool(BaseModel): + function: Function + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py new file mode 100644 index 0000000..5ea87fd --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + + +class ToolCall(BaseModel): + function: FunctionCall + id: Optional[str] = "null" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py new file mode 100644 index 0000000..e36f803 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ToolMessageRole = Literal["tool"] + +class ToolMessageTypedDict(TypedDict): + content: str + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py new file mode 100644 index 0000000..43877c9 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class UsageInfo(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py new file mode 100644 index 0000000..9e82ff3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +UserMessageRole = Literal["user"] + +class UserMessageTypedDict(TypedDict): + content: UserMessageContentTypedDict + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: UserMessageContent + role: Optional[UserMessageRole] = "user" + + +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py new file mode 100644 index 0000000..4eee48c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import List, TypedDict, Union + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + msg: str + type: str + + +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/py.typed b/packages/mistralai_gcp/src/mistralai_gcp/py.typed new file mode 100644 index 0000000..3e38f1a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py new file mode 100644 index 0000000..3c530c8 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -0,0 +1,174 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import json +from typing import Optional, Union + +import google.auth +import google.auth.credentials +import google.auth.transport +import google.auth.transport.requests +import httpx +from mistralai_gcp import models +from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks +from mistralai_gcp.chat import Chat +from mistralai_gcp.fim import Fim +from mistralai_gcp.types import Nullable + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, HttpClient +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, NoOpLogger +from .utils.retries import RetryConfig + + +class MistralGoogleCloud(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + chat: Chat + fim: Fim + r"""Chat Completion API""" + + def __init__( + self, + region: str = "europe-west4", + project_id: Optional[str] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param region: The Google Cloud region to use for all methods + :param project_id: The project ID to use for all methods + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + """ + + credentials, loaded_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + + if not isinstance(credentials, google.auth.credentials.Credentials): + raise models.SDKError( + "credentials must be an instance of google.auth.credentials.Credentials" + ) + + project_id = project_id or loaded_project_id + if project_id is None: + raise models.SDKError("project_id must be provided") + + def auth_token() -> str: + if credentials.expired: + credentials.refresh(google.auth.transport.requests.Request()) + token = credentials.token + if not token: + raise models.SDKError("Failed to get token from credentials") + return token + + if client is None: + client = httpx.Client() + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + if async_client is None: + async_client = httpx.AsyncClient() + + if debug_logger is None: + debug_logger = NoOpLogger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security = None + if callable(auth_token): + security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment + api_key=auth_token() + ) + else: + security = models.Security(api_key=auth_token) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=f"https://{region}-aiplatform.googleapis.com", + server=None, + retry_config=retry_config, + debug_logger=debug_logger, + ), + ) + + hooks = SDKHooks() + + hook = GoogleCloudBeforeRequestHook(region, project_id) + hooks.register_before_request_hook(hook) + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, self.sdk_configuration.client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + self._init_sdks() + + def _init_sdks(self): + self.chat = Chat(self.sdk_configuration) + self.fim = Fim(self.sdk_configuration) + + +class GoogleCloudBeforeRequestHook(BeforeRequestHook): + + def __init__(self, region: str, project_id: str): + self.region = region + self.project_id = project_id + + def before_request( + self, hook_ctx, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + # The goal of this function is to template in the region, project, model, and model_version into the URL path + # We do this here so that the API remains more user-friendly + model = None + model_version = None + new_content = None + if request.content: + parsed = json.loads(request.content.decode("utf-8")) + model_raw = parsed.get("model") + model = "-".join(model_raw.split("-")[:-1]) + model_version = model_raw.split("-")[-1] + parsed["model"] = model + new_content = json.dumps(parsed).encode("utf-8") + + if model == "": + raise models.SDKError("model must be provided") + + if model_version is None: + raise models.SDKError("model_version must be provided") + + stream = "streamRawPredict" in request.url.path + specifier = "streamRawPredict" if stream else "rawPredict" + url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model}@{model_version}:{specifier}" + + headers = dict(request.headers) + # Delete content-length header as it will need to be recalculated + headers.pop("content-length", None) + + next_request = httpx.Request( + method=request.method, + url=request.url.copy_with(path=url), + headers=headers, + content=new_content, + stream=None, + ) + + return next_request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py new file mode 100644 index 0000000..65d3c75 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from ._hooks import SDKHooks +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai_gcp import models +from mistralai_gcp.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_PROD = "prod" +r"""Production server""" +SERVERS = { + SERVER_PROD: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: HttpClient + async_client: AsyncHttpClient + debug_logger: Logger + security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.0.0-rc.2" + gen_version: str = "2.388.1" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai-gcp" + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_PROD + + if self.server not in SERVERS: + raise ValueError(f"Invalid server \"{self.server}\"") + + return SERVERS[self.server], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py new file mode 100644 index 0000000..fc76fe0 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py new file mode 100644 index 0000000..a6187ef --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + +UnrecognizedInt = NewType("UnrecognizedInt", int) +UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py new file mode 100644 index 0000000..95aa1b6 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .annotations import get_discriminator +from .enums import OpenEnumMeta +from .headers import get_headers, get_response_headers +from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, +) +from .queryparams import get_query_params +from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig +from .requestbodies import serialize_request_body, SerializedRequestBody +from .security import get_security +from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, +) +from .url import generate_url, template_url, remove_suffix +from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .logger import Logger, get_body_content, NoOpLogger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_discriminator", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "NoOpLogger", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_float", + "validate_int", + "validate_open_enum", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py new file mode 100644 index 0000000..0d17472 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + if isinstance(model, dict): + try: + return f'{model.get(key)}' + except AttributeError as e: + raise ValueError(f'Could not find discriminator key {key} in {model}') from e + + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + fieldname = fieldname.upper() + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py new file mode 100644 index 0000000..c650b10 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum + + +class OpenEnumMeta(enum.EnumMeta): + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py new file mode 100644 index 0000000..553b386 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +import httpx + +T = TypeVar("T") + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py new file mode 100644 index 0000000..07f9b23 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -0,0 +1,207 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + form: Dict[str, Any] = {} + files: Dict[str, Any] = {} + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if val is None: + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias is not None else name + + if field_metadata.file: + file_fields: Dict[str, FieldInfo] = val.__class__.model_fields + + file_name = "" + field_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(val, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(val, file_field_name, None) + else: + field_name = ( + file_field.alias + if file_field.alias is not None + else file_field_name + ) + file_name = getattr(val, file_field_name) + + if field_name == "" or file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + if content_type is not None: + files[field_name] = (file_name, content, content_type) + else: + files[field_name] = (file_name, content) + elif field_metadata.json: + files[f_name] = ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if value is None: + continue + values.append(_val_to_string(value)) + + form[f_name + "[]"] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if val is None: + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py new file mode 100644 index 0000000..e14a0f4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if val is None: + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py new file mode 100644 index 0000000..7e4bbea --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py new file mode 100644 index 0000000..173b3e5 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py new file mode 100644 index 0000000..1c8c583 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py @@ -0,0 +1,203 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if query_params is not None else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if obj_param_metadata is None: + continue + + obj_val = getattr(obj, name) + if obj_val is None: + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if value is None: + return + + for key, val in value.items(): + if val is None: + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if value is None: + return + + for val in value: + if val is None: + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py new file mode 100644 index 0000000..4f586ae --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: str + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py new file mode 100644 index 0000000..a06f927 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import random +import time +from typing import List + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + + def __init__(self, response: httpx.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py new file mode 100644 index 0000000..aab4cb6 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py @@ -0,0 +1,168 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth which could be a flattened model + if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py new file mode 100644 index 0000000..a98998a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -0,0 +1,181 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import json +from typing import Any, Dict, List, Union, get_args +import httpx +from typing_extensions import get_origin +from pydantic import ConfigDict, create_model +from pydantic_core import from_json +from typing_inspect import is_optional_type + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable + + +def serialize_decimal(as_str: bool): + def serialize(d): + if is_optional_type(type(d)) and d is None: + return None + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, Decimal): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + if is_optional_type(type(f)) and f is None: + return None + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, float): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(b): + if is_optional_type(type(b)) and b is None: + return None + + if not isinstance(b, int): + raise ValueError("Expected int") + + return str(b) if as_str else b + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, int): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_open_enum(is_int: bool): + def validate(e): + if e is None: + return None + + if is_int: + if not isinstance(e, int): + raise ValueError("Expected int") + else: + if not isinstance(e, str): + raise ValueError("Expected string") + + return e + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py new file mode 100644 index 0000000..b201bfa --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py @@ -0,0 +1,150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if path_params is not None else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if param is None: + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if param_field_val is None: + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + else: + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py new file mode 100644 index 0000000..24ccae3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params diff --git a/poetry.lock b/poetry.lock index 167827a..f22cde1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,25 +1,28 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" -version = "4.3.0" +version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, ] [package.dependencies] @@ -33,15 +36,139 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "cachetools" +version = "5.4.0" +description = "Extensible memoizing collections and decorators" +optional = true +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, + {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, +] + [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = true +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] [[package]] @@ -55,20 +182,58 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "google-auth" +version = "2.32.0" +description = "Google Authentication Library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, + {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + [[package]] name = "h11" version = "0.14.0" @@ -82,13 +247,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.4" +version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [package.dependencies] @@ -99,17 +264,17 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] +trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.25.2" +version = "0.27.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, - {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, ] [package.dependencies] @@ -127,13 +292,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -147,40 +312,76 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, ] [package.dependencies] @@ -206,209 +407,234 @@ files = [ ] [[package]] -name = "orjson" -version = "3.9.15" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.9.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d61f7ce4727a9fa7680cd6f3986b0e2c732639f46a5e0156e550e35258aa313a"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4feeb41882e8aa17634b589533baafdceb387e01e117b1ec65534ec724023d04"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fbbeb3c9b2edb5fd044b2a070f127a0ac456ffd079cb82746fc84af01ef021a4"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66bcc5670e8a6b78f0313bcb74774c8291f6f8aeef10fe70e910b8040f3ab75"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2973474811db7b35c30248d1129c64fd2bdf40d57d84beed2a9a379a6f57d0ab"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fe41b6f72f52d3da4db524c8653e46243c8c92df826ab5ffaece2dba9cccd58"}, - {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4228aace81781cc9d05a3ec3a6d2673a1ad0d8725b4e915f1089803e9efd2b99"}, - {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f7b65bfaf69493c73423ce9db66cfe9138b2f9ef62897486417a8fcb0a92bfe"}, - {file = "orjson-3.9.15-cp310-none-win32.whl", hash = "sha256:2d99e3c4c13a7b0fb3792cc04c2829c9db07838fb6973e578b85c1745e7d0ce7"}, - {file = "orjson-3.9.15-cp310-none-win_amd64.whl", hash = "sha256:b725da33e6e58e4a5d27958568484aa766e825e93aa20c26c91168be58e08cbb"}, - {file = "orjson-3.9.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c8e8fe01e435005d4421f183038fc70ca85d2c1e490f51fb972db92af6e047c2"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87f1097acb569dde17f246faa268759a71a2cb8c96dd392cd25c668b104cad2f"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff0f9913d82e1d1fadbd976424c316fbc4d9c525c81d047bbdd16bd27dd98cfc"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8055ec598605b0077e29652ccfe9372247474375e0e3f5775c91d9434e12d6b1"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6768a327ea1ba44c9114dba5fdda4a214bdb70129065cd0807eb5f010bfcbb5"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12365576039b1a5a47df01aadb353b68223da413e2e7f98c02403061aad34bde"}, - {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:71c6b009d431b3839d7c14c3af86788b3cfac41e969e3e1c22f8a6ea13139404"}, - {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e18668f1bd39e69b7fed19fa7cd1cd110a121ec25439328b5c89934e6d30d357"}, - {file = "orjson-3.9.15-cp311-none-win32.whl", hash = "sha256:62482873e0289cf7313461009bf62ac8b2e54bc6f00c6fabcde785709231a5d7"}, - {file = "orjson-3.9.15-cp311-none-win_amd64.whl", hash = "sha256:b3d336ed75d17c7b1af233a6561cf421dee41d9204aa3cfcc6c9c65cd5bb69a8"}, - {file = "orjson-3.9.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:82425dd5c7bd3adfe4e94c78e27e2fa02971750c2b7ffba648b0f5d5cc016a73"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c51378d4a8255b2e7c1e5cc430644f0939539deddfa77f6fac7b56a9784160a"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae4e06be04dc00618247c4ae3f7c3e561d5bc19ab6941427f6d3722a0875ef7"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcef128f970bb63ecf9a65f7beafd9b55e3aaf0efc271a4154050fc15cdb386e"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b72758f3ffc36ca566ba98a8e7f4f373b6c17c646ff8ad9b21ad10c29186f00d"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c57bc7b946cf2efa67ac55766e41764b66d40cbd9489041e637c1304400494"}, - {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:946c3a1ef25338e78107fba746f299f926db408d34553b4754e90a7de1d44068"}, - {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2f256d03957075fcb5923410058982aea85455d035607486ccb847f095442bda"}, - {file = "orjson-3.9.15-cp312-none-win_amd64.whl", hash = "sha256:5bb399e1b49db120653a31463b4a7b27cf2fbfe60469546baf681d1b39f4edf2"}, - {file = "orjson-3.9.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b17f0f14a9c0ba55ff6279a922d1932e24b13fc218a3e968ecdbf791b3682b25"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f6cbd8e6e446fb7e4ed5bac4661a29e43f38aeecbf60c4b900b825a353276a1"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76bc6356d07c1d9f4b782813094d0caf1703b729d876ab6a676f3aaa9a47e37c"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdfa97090e2d6f73dced247a2f2d8004ac6449df6568f30e7fa1a045767c69a6"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7413070a3e927e4207d00bd65f42d1b780fb0d32d7b1d951f6dc6ade318e1b5a"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cf1596680ac1f01839dba32d496136bdd5d8ffb858c280fa82bbfeb173bdd40"}, - {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:809d653c155e2cc4fd39ad69c08fdff7f4016c355ae4b88905219d3579e31eb7"}, - {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:920fa5a0c5175ab14b9c78f6f820b75804fb4984423ee4c4f1e6d748f8b22bc1"}, - {file = "orjson-3.9.15-cp38-none-win32.whl", hash = "sha256:2b5c0f532905e60cf22a511120e3719b85d9c25d0e1c2a8abb20c4dede3b05a5"}, - {file = "orjson-3.9.15-cp38-none-win_amd64.whl", hash = "sha256:67384f588f7f8daf040114337d34a5188346e3fae6c38b6a19a2fe8c663a2f9b"}, - {file = "orjson-3.9.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6fc2fe4647927070df3d93f561d7e588a38865ea0040027662e3e541d592811e"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34cbcd216e7af5270f2ffa63a963346845eb71e174ea530867b7443892d77180"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f541587f5c558abd93cb0de491ce99a9ef8d1ae29dd6ab4dbb5a13281ae04cbd"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92255879280ef9c3c0bcb327c5a1b8ed694c290d61a6a532458264f887f052cb"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a1f57fb601c426635fcae9ddbe90dfc1ed42245eb4c75e4960440cac667262"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ede0bde16cc6e9b96633df1631fbcd66491d1063667f260a4f2386a098393790"}, - {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e88b97ef13910e5f87bcbc4dd7979a7de9ba8702b54d3204ac587e83639c0c2b"}, - {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57d5d8cf9c27f7ef6bc56a5925c7fbc76b61288ab674eb352c26ac780caa5b10"}, - {file = "orjson-3.9.15-cp39-none-win32.whl", hash = "sha256:001f4eb0ecd8e9ebd295722d0cbedf0748680fb9998d3993abaed2f40587257a"}, - {file = "orjson-3.9.15-cp39-none-win_amd64.whl", hash = "sha256:ea0b183a5fe6b2b45f3b854b0d19c4e932d6f5934ae1f723b07cf9560edd4ec7"}, - {file = "orjson-3.9.15.tar.gz", hash = "sha256:95cae920959d772f30ab36d3b25f83bb0f3be671e986c72ce22f8fa700dae061"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] -name = "packaging" -version = "24.0" -description = "Core utilities for Python packages" +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pyasn1" +version = "0.6.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, + {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.0" +description = "A collection of ASN.1-based protocols modules" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, + {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + [[package]] name = "pydantic" -version = "2.6.4" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, - {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.16.3" -description = "" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + [[package]] name = "pytest" -version = "7.4.4" +version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, ] [package.dependencies] @@ -416,21 +642,21 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" -version = "0.23.5.post1" +version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-asyncio-0.23.5.post1.tar.gz", hash = "sha256:b9a8806bea78c21276bc34321bbf234ba1b2ea5b30d9f0ce0f2dea45e4685813"}, - {file = "pytest_asyncio-0.23.5.post1-py3-none-any.whl", hash = "sha256:30f54d27774e79ac409778889880242b0403d09cabd65b727ce90fe92dd5d80e"}, + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, ] [package.dependencies] @@ -441,29 +667,63 @@ docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] -name = "ruff" -version = "0.1.15" -description = "An extremely fast Python linter and code formatter, written in Rust." +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" optional = false -python-versions = ">=3.7" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = true +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = true +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, - {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, - {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, - {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, - {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] @@ -489,39 +749,62 @@ files = [ ] [[package]] -name = "types-requests" -version = "2.31.0.20240311" -description = "Typing stubs for requests" +name = "tomlkit" +version = "0.13.0" +description = "Style preserving TOML library" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"}, - {file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"}, + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, ] -[package.dependencies] -urllib3 = ">=2" +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20240316" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, + {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, +] [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, ] +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false +optional = true python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -530,7 +813,10 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[extras] +gcp = ["google-auth", "requests"] + [metadata] lock-version = "2.0" -python-versions = ">=3.9,<4.0" -content-hash = "a1b9663d7041a47bc8b6705e4fc9bd4563718a49e492aa8f0edf96fb8afa468b" +python-versions = "^3.8" +content-hash = "a1ca991b0570a5c978745559e8d18354ec04cbd566513cc895346ec1bae01112" diff --git a/poetry.toml b/poetry.toml new file mode 100644 index 0000000..ab1033b --- /dev/null +++ b/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true diff --git a/py.typed b/py.typed new file mode 100644 index 0000000..3e38f1a --- /dev/null +++ b/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/pylintrc b/pylintrc new file mode 100644 index 0000000..5080038 --- /dev/null +++ b/pylintrc @@ -0,0 +1,658 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/pyproject.toml b/pyproject.toml index 4b115d6..bea3e42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,42 +1,53 @@ [tool.poetry] name = "mistralai" -version = "0.4.2" -description = "" -authors = ["Bam4d "] +version = "1.0.0-rc.2" +description = "Python Client SDK for the Mistral AI API." +authors = ["Mistral"] readme = "README.md" -license = "Apache 2.0 License" - -[tool.ruff] -select = ["E", "F", "W", "Q", "I"] -ignore = ["E203"] -fixable = ["ALL"] -unfixable = [] -line-length = 120 - - -[tool.mypy] -disallow_untyped_defs = true -show_error_codes = true -no_implicit_optional = true -warn_return_any = true -warn_unused_ignores = true -exclude = ["docs", "tests", "examples", "tools", "build"] +packages = [ + { include = "mistralai", from = "src" }, + { include = "mistralai_azure", from = "packages/mistralai_azure/src" }, + { include = "mistralai_gcp", from = "packages/mistralai_gcp/src" }, +] +include = ["py.typed", "src/mistralai/py.typed"] +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/py.typed"] [tool.poetry.dependencies] -python = ">=3.9,<4.0" -orjson = ">=3.9.10,<3.11" -pydantic = ">=2.5.2,<3" -httpx = ">=0.25,<1" - +python = "^3.8" +httpx = "^0.27.0" +jsonpath-python = "^1.0.6" +pydantic = "~2.8.2" +python-dateutil = "^2.9.0.post0" +typing-inspect = "^0.9.0" +google-auth = { version = "^2.31.0", optional = true } +requests = { version = "^2.32.3", optional = true } [tool.poetry.group.dev.dependencies] -ruff = "^0.1.6" -mypy = "^1.7.1" -types-requests = "^2.31.0.10" -pytest = "^7.4.3" -pytest-asyncio = "^0.23.2" +mypy = "==1.10.1" +pylint = "==3.2.3" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" + +[tool.poetry.extras] +gcp = ["google-auth", "requests"] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true diff --git a/scripts/compile.sh b/scripts/compile.sh new file mode 100755 index 0000000..aa49772 --- /dev/null +++ b/scripts/compile.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -o pipefail # Ensure pipeline failures are propagated + +# Use temporary files to store outputs and exit statuses +declare -A output_files +declare -A status_files + +# Function to run a command with temporary output and status files +run_command() { + local cmd="$1" + local key="$2" + local output_file="$3" + local status_file="$4" + + # Run the command and store output and exit status + { + eval "$cmd" + echo $? > "$status_file" + } &> "$output_file" & +} + +# Create temporary files for outputs and statuses +for cmd in compileall pylint mypy pyright; do + output_files[$cmd]=$(mktemp) + status_files[$cmd]=$(mktemp) +done + +# Collect PIDs for background processes +declare -a pids + +# Run commands in parallel using temporary files +echo "Running python -m compileall" +run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" +pids+=($!) + +echo "Running pylint" +run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" +pids+=($!) + +echo "Running mypy" +run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" +pids+=($!) + +echo "Running pyright (optional)" +run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" +pids+=($!) + +# Wait for all processes to complete +echo "Waiting for processes to complete" +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Print output sequentially and check for failures +failed=false +for key in "${!output_files[@]}"; do + echo "--- Output from Command: $key ---" + echo + cat "${output_files[$key]}" + echo # Empty line for separation + echo "--- End of Output from Command: $key ---" + echo + + exit_status=$(cat "${status_files[$key]}") + if [ "$exit_status" -ne 0 ]; then + echo "Command $key failed with exit status $exit_status" >&2 + failed=true + fi +done + +# Clean up temporary files +for tmp_file in "${output_files[@]}" "${status_files[@]}"; do + rm -f "$tmp_file" +done + +if $failed; then + echo "One or more commands failed." >&2 + exit 1 +else + echo "All commands completed successfully." + exit 0 +fi diff --git a/scripts/publish.sh b/scripts/publish.sh new file mode 100755 index 0000000..1ee7194 --- /dev/null +++ b/scripts/publish.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} + +poetry publish --build --skip-existing diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py index e69de29..68138c4 100644 --- a/src/mistralai/__init__.py +++ b/src/mistralai/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * +from .models import * diff --git a/src/mistralai/_hooks/__init__.py b/src/mistralai/_hooks/__init__.py new file mode 100644 index 0000000..2ee66cd --- /dev/null +++ b/src/mistralai/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/_hooks/custom_user_agent.py new file mode 100644 index 0000000..59506ea --- /dev/null +++ b/src/mistralai/_hooks/custom_user_agent.py @@ -0,0 +1,16 @@ +# MAKE SURE YOU UPDATE THE COPIES OF THIS FILES IN THE PROVIDERS'S PACKAGES WHEN YOU MAKE CHANGES HERE +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request.headers["user-agent"] = ( + "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + ) + return request diff --git a/src/mistralai/_hooks/deprecation_warning.py b/src/mistralai/_hooks/deprecation_warning.py new file mode 100644 index 0000000..8de2968 --- /dev/null +++ b/src/mistralai/_hooks/deprecation_warning.py @@ -0,0 +1,26 @@ +import logging +from typing import Union + +import httpx + +from .types import AfterSuccessContext, AfterSuccessHook + +logger = logging.getLogger(__name__) + +HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp" + + +class DeprecationWarningHook(AfterSuccessHook): + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + if HEADER_MODEL_DEPRECATION_TIMESTAMP in response.headers: + model = response.json()["model"] + # pylint: disable=logging-fstring-interpolation + logger.warning( + "WARNING: The model %s is deprecated and will be removed on %s. Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.", + model, + response.headers[HEADER_MODEL_DEPRECATION_TIMESTAMP], + ) + return response diff --git a/src/mistralai/_hooks/registration.py b/src/mistralai/_hooks/registration.py new file mode 100644 index 0000000..fc3ae79 --- /dev/null +++ b/src/mistralai/_hooks/registration.py @@ -0,0 +1,17 @@ +from .custom_user_agent import CustomUserAgentHook +from .deprecation_warning import DeprecationWarningHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + hooks.register_before_request_hook(CustomUserAgentHook()) + hooks.register_after_success_hook(DeprecationWarningHook()) diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py new file mode 100644 index 0000000..24b0d08 --- /dev/null +++ b/src/mistralai/_hooks/sdkhooks.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.httpclient import HttpClient + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py new file mode 100644 index 0000000..e9391f3 --- /dev/null +++ b/src/mistralai/_hooks/types.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from abc import ABC, abstractmethod +import httpx +from mistralai.httpclient import HttpClient +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py new file mode 100644 index 0000000..12ea575 --- /dev/null +++ b/src/mistralai/agents.py @@ -0,0 +1,434 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Agents(BaseSDK): + r"""Agents API.""" + + + def complete( + self, *, + messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.AgentsCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + agent_id=agent_id, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.AgentsCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + agent_id=agent_id, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def stream( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index abe8654..f9522a2 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -1,423 +1,15 @@ -import asyncio -import posixpath -from json import JSONDecodeError -from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union +from typing import Optional -from httpx import ( - AsyncClient, - AsyncHTTPTransport, - ConnectError, - Limits, - RequestError, - Response, -) +from .client import MIGRATION_MESSAGE -from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES -from mistralai.exceptions import ( - MistralAPIException, - MistralAPIStatusException, - MistralConnectionException, - MistralException, -) -from mistralai.files import FilesAsyncClient -from mistralai.jobs import JobsAsyncClient -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ResponseFormat, - ToolChoice, -) -from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelDeleted, ModelList - -class MistralAsyncClient(ClientBase): +class MistralAsyncClient: def __init__( self, api_key: Optional[str] = None, - endpoint: str = ENDPOINT, + endpoint: str = "", max_retries: int = 5, timeout: int = 120, max_concurrent_requests: int = 64, ): - super().__init__(endpoint, api_key, max_retries, timeout) - - self._client = AsyncClient( - follow_redirects=True, - timeout=timeout, - limits=Limits(max_connections=max_concurrent_requests), - transport=AsyncHTTPTransport(retries=max_retries), - ) - self.files = FilesAsyncClient(self) - self.jobs = JobsAsyncClient(self) - - async def close(self) -> None: - await self._client.aclose() - - async def _check_response_status_codes(self, response: Response) -> None: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif 400 <= response.status_code < 500: - if response.stream: - await response.aread() - raise MistralAPIException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif response.status_code >= 500: - if response.stream: - await response.aread() - raise MistralException( - message=f"Status: {response.status_code}. Message: {response.text}", - ) - - async def _check_streaming_response(self, response: Response) -> None: - await self._check_response_status_codes(response) - - async def _check_response(self, response: Response) -> Dict[str, Any]: - await self._check_response_status_codes(response) - - json_response: Dict[str, Any] = response.json() - - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException.from_response( - response, - message=json_response["message"], - ) - - return json_response - - async def _request( - self, - method: str, - json: Optional[Dict[str, Any]], - path: str, - stream: bool = False, - attempt: int = 1, - data: Optional[Dict[str, Any]] = None, - check_model_deprecation_headers_callback: Optional[Callable] = None, - **kwargs: Any, - ) -> AsyncGenerator[Dict[str, Any], None]: - accept_header = "text/event-stream" if stream else "application/json" - headers = { - "Accept": accept_header, - "User-Agent": f"mistral-client-python/{self._version}", - "Authorization": f"Bearer {self._api_key}", - } - - if json is not None: - headers["Content-Type"] = "application/json" - - url = posixpath.join(self._endpoint, path) - - self._logger.debug(f"Sending request: {method} {url} {json}") - - response: Response - - try: - if stream: - async with self._client.stream( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) as response: - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - await self._check_streaming_response(response) - - async for line in response.aiter_lines(): - json_streamed_response = self._process_line(line) - if json_streamed_response: - yield json_streamed_response - - else: - response = await self._client.request( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - yield await self._check_response(response) - - except ConnectError as e: - raise MistralConnectionException(str(e)) from e - except RequestError as e: - raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e - except JSONDecodeError as e: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {response.text}", - ) from e - except MistralAPIStatusException as e: - attempt += 1 - if attempt > self._max_retries: - raise MistralAPIStatusException.from_response(response, message=str(e)) from e - backoff = 2.0**attempt # exponential backoff - await asyncio.sleep(backoff) - - # Retry as a generator - async for r in self._request(method, json, path, stream=stream, attempt=attempt): - yield r - - async def chat( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> ChatCompletionResponse: - """A asynchronous chat endpoint that returns a single response. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - ChatCompletionResponse: a response object containing the generated text. - """ - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=False, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - - single_response = self._request( - "post", - request, - "v1/chat/completions", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - async def chat_stream( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: - """An Asynchronous chat endpoint that streams responses. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - tools (Optional[List[Function]], optional): a list of tools to use. - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - AsyncGenerator[ChatCompletionStreamResponse, None]: - An async generator that yields ChatCompletionStreamResponse objects. - """ - - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=True, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - async_response = self._request( - "post", - request, - "v1/chat/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for json_response in async_response: - yield ChatCompletionStreamResponse(**json_response) - - async def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: - """An asynchronous embeddings endpoint that returns embeddings for a single, or batch of inputs - - Args: - model (str): The embedding model to use, e.g. mistral-embed - input (Union[str, List[str]]): The input to embed, - e.g. ['What is the best French cheese?'] - - Returns: - EmbeddingResponse: A response object containing the embeddings. - """ - request = {"model": model, "input": input} - single_response = self._request( - "post", - request, - "v1/embeddings", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for response in single_response: - return EmbeddingResponse(**response) - - raise MistralException("No response received") - - async def list_models(self) -> ModelList: - """Returns a list of the available models - - Returns: - ModelList: A response object containing the list of models. - """ - single_response = self._request("get", {}, "v1/models") - - async for response in single_response: - return ModelList(**response) - - raise MistralException("No response received") - - async def delete_model(self, model_id: str) -> ModelDeleted: - single_response = self._request("delete", {}, f"v1/models/{model_id}") - - async for response in single_response: - return ModelDeleted(**response) - - raise MistralException("No response received") - - async def completion( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> ChatCompletionResponse: - """An asynchronous completion endpoint that returns a single response. - - Args: - model (str): model the name of the model to get completions with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - Returns: - Dict[str, Any]: a response object containing the generated text. - """ - request = self._make_completion_request( - prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop - ) - single_response = self._request( - "post", - request, - "v1/fim/completions", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - async def completion_stream( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: - """An asynchronous completion endpoint that returns a streaming response. - - Args: - model (str): model the name of the model to get completions with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - - Returns: - Dict[str, Any]: a response object containing the generated text. - """ - request = self._make_completion_request( - prompt, - model, - suffix, - temperature, - max_tokens, - top_p, - random_seed, - stop, - stream=True, - ) - async_response = self._request( - "post", - request, - "v1/fim/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for json_response in async_response: - yield ChatCompletionStreamResponse(**json_response) + raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py new file mode 100644 index 0000000..f9e54c5 --- /dev/null +++ b/src/mistralai/basesdk.py @@ -0,0 +1,253 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai import models, utils +from mistralai._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Optional, Tuple + +class BaseSDK: + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + def get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self.get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + ) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + security = utils.get_security_from_env(security, models.Security) + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody("application/octet-stream") + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + async def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py new file mode 100644 index 0000000..1323be2 --- /dev/null +++ b/src/mistralai/chat.py @@ -0,0 +1,470 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + + def complete( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def stream( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionStreamRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionStreamRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 9c75373..d3582f7 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -1,423 +1,14 @@ -import posixpath -import time -from json import JSONDecodeError -from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union +from typing import Optional -from httpx import Client, ConnectError, HTTPTransport, RequestError, Response +MIGRATION_MESSAGE = "This client is deprecated. To migrate to the new client, please refer to this guide: https://github.com/mistralai/client-python/blob/main/MIGRATION.md. If you need to use this client anyway, pin your version to 0.4.2." -from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES -from mistralai.exceptions import ( - MistralAPIException, - MistralAPIStatusException, - MistralConnectionException, - MistralException, -) -from mistralai.files import FilesClient -from mistralai.jobs import JobsClient -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ResponseFormat, - ToolChoice, -) -from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelDeleted, ModelList - - -class MistralClient(ClientBase): - """ - Synchronous wrapper around the async client - """ +class MistralClient: def __init__( self, api_key: Optional[str] = None, - endpoint: str = ENDPOINT, + endpoint: str = "", max_retries: int = 5, timeout: int = 120, ): - super().__init__(endpoint, api_key, max_retries, timeout) - - self._client = Client( - follow_redirects=True, - timeout=self._timeout, - transport=HTTPTransport(retries=self._max_retries), - ) - self.files = FilesClient(self) - self.jobs = JobsClient(self) - - def __del__(self) -> None: - self._client.close() - - def _check_response_status_codes(self, response: Response) -> None: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif 400 <= response.status_code < 500: - if response.stream: - response.read() - raise MistralAPIException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif response.status_code >= 500: - if response.stream: - response.read() - raise MistralException( - message=f"Status: {response.status_code}. Message: {response.text}", - ) - - def _check_streaming_response(self, response: Response) -> None: - self._check_response_status_codes(response) - - def _check_response(self, response: Response) -> Dict[str, Any]: - self._check_response_status_codes(response) - - json_response: Dict[str, Any] = response.json() - - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException.from_response( - response, - message=json_response["message"], - ) - - return json_response - - def _request( - self, - method: str, - json: Optional[Dict[str, Any]], - path: str, - stream: bool = False, - attempt: int = 1, - data: Optional[Dict[str, Any]] = None, - check_model_deprecation_headers_callback: Optional[Callable] = None, - **kwargs: Any, - ) -> Iterator[Dict[str, Any]]: - accept_header = "text/event-stream" if stream else "application/json" - headers = { - "Accept": accept_header, - "User-Agent": f"mistral-client-python/{self._version}", - "Authorization": f"Bearer {self._api_key}", - } - - if json is not None: - headers["Content-Type"] = "application/json" - - url = posixpath.join(self._endpoint, path) - - self._logger.debug(f"Sending request: {method} {url} {json}") - - response: Response - - try: - if stream: - with self._client.stream( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) as response: - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - self._check_streaming_response(response) - - for line in response.iter_lines(): - json_streamed_response = self._process_line(line) - if json_streamed_response: - yield json_streamed_response - - else: - response = self._client.request( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - yield self._check_response(response) - - except ConnectError as e: - raise MistralConnectionException(str(e)) from e - except RequestError as e: - raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e - except JSONDecodeError as e: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {response.text}", - ) from e - except MistralAPIStatusException as e: - attempt += 1 - if attempt > self._max_retries: - raise MistralAPIStatusException.from_response(response, message=str(e)) from e - backoff = 2.0**attempt # exponential backoff - time.sleep(backoff) - - # Retry as a generator - for r in self._request(method, json, path, stream=stream, attempt=attempt): - yield r - - def chat( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> ChatCompletionResponse: - """A chat endpoint that returns a single response. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - tools (Optional[List[Function]], optional): a list of tools to use. - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - ChatCompletionResponse: a response object containing the generated text. - """ - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=False, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - - single_response = self._request( - "post", - request, - "v1/chat/completions", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - def chat_stream( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> Iterable[ChatCompletionStreamResponse]: - """A chat endpoint that streams responses. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - tools (Optional[List[Function]], optional): a list of tools to use. - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - Iterable[ChatCompletionStreamResponse]: - A generator that yields ChatCompletionStreamResponse objects. - """ - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=True, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - - response = self._request( - "post", - request, - "v1/chat/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for json_streamed_response in response: - yield ChatCompletionStreamResponse(**json_streamed_response) - - def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: - """An embeddings endpoint that returns embeddings for a single, or batch of inputs - - Args: - model (str): The embedding model to use, e.g. mistral-embed - input (Union[str, List[str]]): The input to embed, - e.g. ['What is the best French cheese?'] - - Returns: - EmbeddingResponse: A response object containing the embeddings. - """ - request = {"model": model, "input": input} - singleton_response = self._request( - "post", - request, - "v1/embeddings", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for response in singleton_response: - return EmbeddingResponse(**response) - - raise MistralException("No response received") - - def list_models(self) -> ModelList: - """Returns a list of the available models - - Returns: - ModelList: A response object containing the list of models. - """ - singleton_response = self._request("get", {}, "v1/models") - - for response in singleton_response: - return ModelList(**response) - - raise MistralException("No response received") - - def delete_model(self, model_id: str) -> ModelDeleted: - single_response = self._request("delete", {}, f"v1/models/{model_id}") - - for response in single_response: - return ModelDeleted(**response) - - raise MistralException("No response received") - - def completion( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> ChatCompletionResponse: - """A completion endpoint that returns a single response. - - Args: - model (str): model the name of the model to get completion with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - - Returns: - Dict[str, Any]: a response object containing the generated text. - """ - request = self._make_completion_request( - prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop - ) - - single_response = self._request( - "post", - request, - "v1/fim/completions", - stream=False, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - def completion_stream( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> Iterable[ChatCompletionStreamResponse]: - """An asynchronous completion endpoint that streams responses. - - Args: - model (str): model the name of the model to get completions with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - - Returns: - Iterable[Dict[str, Any]]: a generator that yields response objects containing the generated text. - """ - request = self._make_completion_request( - prompt, - model, - suffix, - temperature, - max_tokens, - top_p, - random_seed, - stop, - stream=True, - ) - - response = self._request( - "post", - request, - "v1/fim/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for json_streamed_response in response: - yield ChatCompletionStreamResponse(**json_streamed_response) + raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py deleted file mode 100644 index 2507777..0000000 --- a/src/mistralai/client_base.py +++ /dev/null @@ -1,211 +0,0 @@ -import logging -import os -from abc import ABC -from typing import Any, Callable, Dict, List, Optional, Union - -import orjson -from httpx import Headers - -from mistralai.constants import HEADER_MODEL_DEPRECATION_TIMESTAMP -from mistralai.exceptions import MistralException -from mistralai.models.chat_completion import ( - ChatMessage, - Function, - ResponseFormat, - ToolChoice, -) - -CLIENT_VERSION = "0.4.2" - - -class ClientBase(ABC): - def __init__( - self, - endpoint: str, - api_key: Optional[str] = None, - max_retries: int = 5, - timeout: int = 120, - ): - self._max_retries = max_retries - self._timeout = timeout - - if api_key is None: - api_key = os.environ.get("MISTRAL_API_KEY") - if api_key is None: - raise MistralException(message="API key not provided. Please set MISTRAL_API_KEY environment variable.") - self._api_key = api_key - self._endpoint = endpoint - self._logger = logging.getLogger(__name__) - - # For azure endpoints, we default to the mistral model - if "inference.azure.com" in self._endpoint: - self._default_model = "mistral" - - self._version = CLIENT_VERSION - - def _get_model(self, model: Optional[str] = None) -> str: - if model is not None: - return model - else: - if self._default_model is None: - raise MistralException(message="model must be provided") - return self._default_model - - def _parse_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - parsed_tools: List[Dict[str, Any]] = [] - for tool in tools: - if tool["type"] == "function": - parsed_function = {} - parsed_function["type"] = tool["type"] - if isinstance(tool["function"], Function): - parsed_function["function"] = tool["function"].model_dump(exclude_none=True) - else: - parsed_function["function"] = tool["function"] - - parsed_tools.append(parsed_function) - - return parsed_tools - - def _parse_tool_choice(self, tool_choice: Union[str, ToolChoice]) -> str: - if isinstance(tool_choice, ToolChoice): - return tool_choice.value - return tool_choice - - def _parse_response_format(self, response_format: Union[Dict[str, Any], ResponseFormat]) -> Dict[str, Any]: - if isinstance(response_format, ResponseFormat): - return response_format.model_dump(exclude_none=True) - return response_format - - def _parse_messages(self, messages: List[Any]) -> List[Dict[str, Any]]: - parsed_messages: List[Dict[str, Any]] = [] - for message in messages: - if isinstance(message, ChatMessage): - parsed_messages.append(message.model_dump(exclude_none=True)) - else: - parsed_messages.append(message) - - return parsed_messages - - def _check_model_deprecation_header_callback_factory(self, model: Optional[str] = None) -> Callable: - model = self._get_model(model) - - def _check_model_deprecation_header_callback( - headers: Headers, - ) -> None: - if HEADER_MODEL_DEPRECATION_TIMESTAMP in headers: - self._logger.warning( - f"WARNING: The model {model} is deprecated " - f"and will be removed on {headers[HEADER_MODEL_DEPRECATION_TIMESTAMP]}. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning " - "for more information." - ) - - return _check_model_deprecation_header_callback - - def _make_completion_request( - self, - prompt: str, - model: Optional[str] = None, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - stream: Optional[bool] = False, - ) -> Dict[str, Any]: - request_data: Dict[str, Any] = { - "prompt": prompt, - "suffix": suffix, - "model": model, - "stream": stream, - } - - if stop is not None: - request_data["stop"] = stop - - request_data["model"] = self._get_model(model) - - request_data.update( - self._build_sampling_params( - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - ) - ) - - self._logger.debug(f"Completion request: {request_data}") - - return request_data - - def _build_sampling_params( - self, - max_tokens: Optional[int], - random_seed: Optional[int], - temperature: Optional[float], - top_p: Optional[float], - ) -> Dict[str, Any]: - params = {} - if temperature is not None: - params["temperature"] = temperature - if max_tokens is not None: - params["max_tokens"] = max_tokens - if top_p is not None: - params["top_p"] = top_p - if random_seed is not None: - params["random_seed"] = random_seed - return params - - def _make_chat_request( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stream: Optional[bool] = None, - safe_prompt: Optional[bool] = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> Dict[str, Any]: - request_data: Dict[str, Any] = { - "messages": self._parse_messages(messages), - } - - request_data["model"] = self._get_model(model) - - request_data.update( - self._build_sampling_params( - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - ) - ) - - if safe_prompt: - request_data["safe_prompt"] = safe_prompt - if tools is not None: - request_data["tools"] = self._parse_tools(tools) - if stream is not None: - request_data["stream"] = stream - - if tool_choice is not None: - request_data["tool_choice"] = self._parse_tool_choice(tool_choice) - if response_format is not None: - request_data["response_format"] = self._parse_response_format(response_format) - - self._logger.debug(f"Chat request: {request_data}") - - return request_data - - def _process_line(self, line: str) -> Optional[Dict[str, Any]]: - if line.startswith("data: "): - line = line[6:].strip() - if line != "[DONE]": - json_streamed_response: Dict[str, Any] = orjson.loads(line) - return json_streamed_response - return None diff --git a/src/mistralai/constants.py b/src/mistralai/constants.py deleted file mode 100644 index c057d4c..0000000 --- a/src/mistralai/constants.py +++ /dev/null @@ -1,5 +0,0 @@ -RETRY_STATUS_CODES = {429, 500, 502, 503, 504} - -ENDPOINT = "https://api.mistral.ai" - -HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp" diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py new file mode 100644 index 0000000..193758e --- /dev/null +++ b/src/mistralai/embeddings.py @@ -0,0 +1,182 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Optional, Union + +class Embeddings(BaseSDK): + r"""Embeddings API.""" + + + def create( + self, *, + inputs: Union[models.Inputs, models.InputsTypedDict], + model: str, + encoding_format: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.EmbeddingResponse]: + r"""Embeddings + + Embeddings + + :param inputs: Text to embed. + :param model: ID of the model to use. + :param encoding_format: The format to return the embeddings in. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.EmbeddingRequest( + inputs=inputs, + model=model, + encoding_format=encoding_format, + ) + + req = self.build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def create_async( + self, *, + inputs: Union[models.Inputs, models.InputsTypedDict], + model: str, + encoding_format: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.EmbeddingResponse]: + r"""Embeddings + + Embeddings + + :param inputs: Text to embed. + :param model: ID of the model to use. + :param encoding_format: The format to return the embeddings in. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.EmbeddingRequest( + inputs=inputs, + model=model, + encoding_format=encoding_format, + ) + + req = self.build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/exceptions.py b/src/mistralai/exceptions.py deleted file mode 100644 index 5728a1c..0000000 --- a/src/mistralai/exceptions.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict, Optional - -from httpx import Response - - -class MistralException(Exception): - """Base Exception class, returned when nothing more specific applies""" - - def __init__(self, message: Optional[str] = None) -> None: - super(MistralException, self).__init__(message) - - self.message = message - - def __str__(self) -> str: - msg = self.message or "" - return msg - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(message={str(self)})" - - -class MistralAPIException(MistralException): - """Returned when the API responds with an error message""" - - def __init__( - self, - message: Optional[str] = None, - http_status: Optional[int] = None, - headers: Optional[Dict[str, Any]] = None, - ) -> None: - super().__init__(message) - self.http_status = http_status - self.headers = headers or {} - - @classmethod - def from_response(cls, response: Response, message: Optional[str] = None) -> MistralAPIException: - return cls( - message=message or response.text, - http_status=response.status_code, - headers=dict(response.headers), - ) - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(message={str(self)}, http_status={self.http_status})" - - -class MistralAPIStatusException(MistralAPIException): - """Returned when we receive a non-200 response from the API that we should retry""" - - -class MistralConnectionException(MistralException): - """Returned when the SDK can not reach the API server for any reason""" diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 7646a06..2aa3765 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -1,84 +1,600 @@ -from typing import Any - -from mistralai.exceptions import ( - MistralException, -) -from mistralai.models.files import FileDeleted, FileObject, Files - - -class FilesClient: - def __init__(self, client: Any): - self.client = client - - def create( - self, - file: bytes, - purpose: str = "fine-tune", - ) -> FileObject: - single_response = self.client._request( - "post", - None, - "v1/files", - files={"file": file}, - data={"purpose": purpose}, - ) - for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - def retrieve(self, file_id: str) -> FileObject: - single_response = self.client._request("get", {}, f"v1/files/{file_id}") - for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - def list(self) -> Files: - single_response = self.client._request("get", {}, "v1/files") - for response in single_response: - return Files(**response) - raise MistralException("No response received") - - def delete(self, file_id: str) -> FileDeleted: - single_response = self.client._request("delete", {}, f"v1/files/{file_id}") - for response in single_response: - return FileDeleted(**response) - raise MistralException("No response received") - - -class FilesAsyncClient: - def __init__(self, client: Any): - self.client = client - - async def create( - self, - file: bytes, - purpose: str = "fine-tune", - ) -> FileObject: - single_response = self.client._request( - "post", - None, - "v1/files", - files={"file": file}, - data={"purpose": purpose}, - ) - async for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - async def retrieve(self, file_id: str) -> FileObject: - single_response = self.client._request("get", {}, f"v1/files/{file_id}") - async for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - async def list(self) -> Files: - single_response = self.client._request("get", {}, "v1/files") - async for response in single_response: - return Files(**response) - raise MistralException("No response received") - - async def delete(self, file_id: str) -> FileDeleted: - single_response = self.client._request("delete", {}, f"v1/files/{file_id}") - async for response in single_response: - return FileDeleted(**response) - raise MistralException("No response received") +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Optional, Union + +class Files(BaseSDK): + r"""Files API""" + + + def upload( + self, *, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UploadFileOut]: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + file=utils.get_pydantic_model(file, models.File), + ) + + req = self.build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def upload_async( + self, *, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UploadFileOut]: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + file=utils.get_pydantic_model(file, models.File), + ) + + req = self.build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def list( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ListFilesOut]: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def list_async( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ListFilesOut]: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def retrieve( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.RetrieveFileOut]: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def retrieve_async( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.RetrieveFileOut]: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def delete( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteFileOut]: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def delete_async( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteFileOut]: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py new file mode 100644 index 0000000..19090d9 --- /dev/null +++ b/src/mistralai/fim.py @@ -0,0 +1,438 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, AsyncGenerator, Generator, Optional, Union + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + + def complete( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def stream( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py new file mode 100644 index 0000000..998100a --- /dev/null +++ b/src/mistralai/fine_tuning.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.jobs import Jobs + +class FineTuning(BaseSDK): + jobs: Jobs + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = Jobs(self.sdk_configuration) + diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py new file mode 100644 index 0000000..36b642a --- /dev/null +++ b/src/mistralai/httpclient.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 115b232..255310f 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -1,72 +1,57 @@ -from datetime import datetime -from typing import Any, Optional, Union - -from mistralai.exceptions import ( - MistralException, -) -from mistralai.models.jobs import DetailedJob, IntegrationIn, Job, JobMetadata, JobQueryFilter, Jobs, TrainingParameters +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .basesdk import BaseSDK +from datetime import datetime +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import List, Optional, Union -class JobsClient: - def __init__(self, client: Any): - self.client = client +class Jobs(BaseSDK): + + + def list( + self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.QueryParamStatus] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsOut]: + r"""Get Fine Tuning Jobs - def create( - self, - model: str, - training_files: Union[list[str], None] = None, - validation_files: Union[list[str], None] = None, - hyperparameters: TrainingParameters = TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - suffix: Union[str, None] = None, - integrations: Union[set[IntegrationIn], None] = None, - training_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - validation_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - dry_run: bool = False, - ) -> Union[Job, JobMetadata]: - # Handle deprecated arguments - if not training_files and training_file: - training_files = [training_file] - if not validation_files and validation_file: - validation_files = [validation_file] - single_response = self.client._request( - method="post", - json={ - "model": model, - "training_files": training_files, - "validation_files": validation_files, - "hyperparameters": hyperparameters.dict(), - "suffix": suffix, - "integrations": integrations, - }, - path="v1/fine_tuning/jobs", - params={"dry_run": dry_run}, - ) - for response in single_response: - return Job(**response) if not dry_run else JobMetadata(**response) - raise MistralException("No response received") - - def retrieve(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="get", path=f"v1/fine_tuning/jobs/{job_id}", json={}) - for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") + Get a list of fine-tuning jobs for your organization and user. - def list( - self, - page: int = 0, - page_size: int = 10, - model: Optional[str] = None, - created_after: Optional[datetime] = None, - created_by_me: Optional[bool] = None, - status: Optional[str] = None, - wandb_project: Optional[str] = None, - wandb_name: Optional[str] = None, - suffix: Optional[str] = None, - ) -> Jobs: - query_params = JobQueryFilter( + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -76,80 +61,95 @@ def list( wandb_project=wandb_project, wandb_name=wandb_name, suffix=suffix, - ).model_dump(exclude_none=True) - single_response = self.client._request(method="get", params=query_params, path="v1/fine_tuning/jobs", json={}) - for response in single_response: - return Jobs(**response) - raise MistralException("No response received") - - def cancel(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="post", path=f"v1/fine_tuning/jobs/{job_id}/cancel", json={}) - for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") - - -class JobsAsyncClient: - def __init__(self, client: Any): - self.client = client - - async def create( - self, - model: str, - training_files: Union[list[str], None] = None, - validation_files: Union[list[str], None] = None, - hyperparameters: TrainingParameters = TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - suffix: Union[str, None] = None, - integrations: Union[set[IntegrationIn], None] = None, - training_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - validation_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - dry_run: bool = False, - ) -> Union[Job, JobMetadata]: - # Handle deprecated arguments - if not training_files and training_file: - training_files = [training_file] - if not validation_files and validation_file: - validation_files = [validation_file] - - single_response = self.client._request( - method="post", - json={ - "model": model, - "training_files": training_files, - "validation_files": validation_files, - "hyperparameters": hyperparameters.dict(), - "suffix": suffix, - "integrations": integrations, - }, - path="v1/fine_tuning/jobs", - params={"dry_run": dry_run}, - ) - async for response in single_response: - return Job(**response) if not dry_run else JobMetadata(**response) - raise MistralException("No response received") - - async def retrieve(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="get", path=f"v1/fine_tuning/jobs/{job_id}", json={}) - async for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") - - async def list( - self, - page: int = 0, - page_size: int = 10, - model: Optional[str] = None, - created_after: Optional[datetime] = None, - created_by_me: Optional[bool] = None, - status: Optional[str] = None, - wandb_project: Optional[str] = None, - wandb_name: Optional[str] = None, - suffix: Optional[str] = None, - ) -> Jobs: - query_params = JobQueryFilter( + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def list_async( + self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.QueryParamStatus] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsOut]: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -159,14 +159,686 @@ async def list( wandb_project=wandb_project, wandb_name=wandb_name, suffix=suffix, - ).model_dump(exclude_none=True) - single_response = self.client._request(method="get", path="v1/fine_tuning/jobs", params=query_params, json={}) - async for response in single_response: - return Jobs(**response) - raise MistralException("No response received") - - async def cancel(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="post", path=f"v1/fine_tuning/jobs/{job_id}/cancel", json={}) - async for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def create( + self, *, + model: models.FineTuneableModel, + hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], + training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET, + repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None, + auto_start: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param repositories: + :param auto_start: This field will be required in a future release. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + validation_files=validation_files, + hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + suffix=suffix, + integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]), + repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]), + auto_start=auto_start, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def create_async( + self, *, + model: models.FineTuneableModel, + hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], + training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET, + repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None, + auto_start: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param repositories: + :param auto_start: This field will be required in a future release. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + validation_files=validation_files, + hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + suffix=suffix, + integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]), + repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]), + auto_start=auto_start, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def get( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def get_async( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def cancel( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def cancel_async( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def start( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def start_async( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index e69de29..f316270 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict +from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestTypedDict +from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict +from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason +from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict +from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestMessages, ChatCompletionStreamRequestMessagesTypedDict, ChatCompletionStreamRequestStop, ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestTypedDict +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .delete_model_v1_models_model_id_deleteop import DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict +from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict +from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .detailedjobout import DetailedJobOut, DetailedJobOutStatus, DetailedJobOutTypedDict +from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict, Inputs, InputsTypedDict +from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .eventout import EventOut, EventOutTypedDict +from .files_api_routes_delete_fileop import FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict +from .files_api_routes_retrieve_fileop import FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict +from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict +from .fileschema import FileSchema, FileSchemaTypedDict +from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict +from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .finetuneablemodel import FineTuneableModel +from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from .ftmodelout import FTModelOut, FTModelOutTypedDict +from .function import Function, FunctionTypedDict +from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .jobin import JobIn, JobInTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .jobout import JobOut, JobOutTypedDict, Status +from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict +from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict +from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import JobsAPIRoutesFineTuningGetFineTuningJobRequest, JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import JobsAPIRoutesFineTuningGetFineTuningJobsRequest, JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, QueryParamStatus +from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict +from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict +from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict +from .jobsout import JobsOut, JobsOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from .listfilesout import ListFilesOut, ListFilesOutTypedDict +from .metricout import MetricOut, MetricOutTypedDict +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from .modelcard import ModelCard, ModelCardTypedDict +from .modellist import ModelList, ModelListTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .retrieve_model_v1_models_model_id_getop import RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict +from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict +from .sampletype import SampleType +from .sdkerror import SDKError +from .security import Security, SecurityTypedDict +from .source import Source +from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .trainingparameters import TrainingParameters, TrainingParametersTypedDict +from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict +from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict +from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict +from .uploadfileout import UploadFileOut, UploadFileOutTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict +from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict + +__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"] diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py new file mode 100644 index 0000000..3eb8b38 --- /dev/null +++ b/src/mistralai/models/agentscompletionrequest.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +AgentsCompletionRequestToolChoice = Literal["auto", "none", "any"] + +class AgentsCompletionRequestTypedDict(TypedDict): + messages: List[AgentsCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[AgentsCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionRequestToolChoice] + + +class AgentsCompletionRequest(BaseModel): + messages: List[AgentsCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[AgentsCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[AgentsCompletionRequestToolChoice] = "auto" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +AgentsCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +AgentsCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py new file mode 100644 index 0000000..e6a1ea6 --- /dev/null +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class AgentsCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class AgentsCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[AgentsCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py new file mode 100644 index 0000000..ba76737 --- /dev/null +++ b/src/mistralai/models/archiveftmodelout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ArchiveFTModelOutTypedDict(TypedDict): + id: str + archived: NotRequired[bool] + + +class ArchiveFTModelOut(BaseModel): + id: str + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + archived: Optional[bool] = True + diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py new file mode 100644 index 0000000..b7080d5 --- /dev/null +++ b/src/mistralai/models/assistantmessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +AssistantMessageRole = Literal["assistant"] + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[str]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py deleted file mode 100644 index c5eda5a..0000000 --- a/src/mistralai/models/chat_completion.py +++ /dev/null @@ -1,93 +0,0 @@ -from enum import Enum -from typing import List, Optional - -from pydantic import BaseModel - -from mistralai.models.common import UsageInfo - - -class Function(BaseModel): - name: str - description: str - parameters: dict - - -class ToolType(str, Enum): - function = "function" - - -class FunctionCall(BaseModel): - name: str - arguments: str - - -class ToolCall(BaseModel): - id: str = "null" - type: ToolType = ToolType.function - function: FunctionCall - - -class ResponseFormats(str, Enum): - text: str = "text" - json_object: str = "json_object" - - -class ToolChoice(str, Enum): - auto: str = "auto" - any: str = "any" - none: str = "none" - - -class ResponseFormat(BaseModel): - type: ResponseFormats = ResponseFormats.text - - -class ChatMessage(BaseModel): - role: str - content: str - name: Optional[str] = None - tool_calls: Optional[List[ToolCall]] = None - tool_call_id: Optional[str] = None - - -class DeltaMessage(BaseModel): - role: Optional[str] = None - content: Optional[str] = None - tool_calls: Optional[List[ToolCall]] = None - - -class FinishReason(str, Enum): - stop = "stop" - length = "length" - error = "error" - tool_calls = "tool_calls" - - -class ChatCompletionResponseStreamChoice(BaseModel): - index: int - delta: DeltaMessage - finish_reason: Optional[FinishReason] - - -class ChatCompletionStreamResponse(BaseModel): - id: str - model: str - choices: List[ChatCompletionResponseStreamChoice] - created: Optional[int] = None - object: Optional[str] = None - usage: Optional[UsageInfo] = None - - -class ChatCompletionResponseChoice(BaseModel): - index: int - message: ChatMessage - finish_reason: Optional[FinishReason] - - -class ChatCompletionResponse(BaseModel): - id: str - object: str - created: int - model: str - choices: List[ChatCompletionResponseChoice] - usage: UsageInfo diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py new file mode 100644 index 0000000..04d2350 --- /dev/null +++ b/src/mistralai/models/chatcompletionchoice.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + finish_reason: FinishReason + message: NotRequired[AssistantMessageTypedDict] + + +class ChatCompletionChoice(BaseModel): + index: int + finish_reason: FinishReason + message: Optional[AssistantMessage] = None + diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py new file mode 100644 index 0000000..30817c5 --- /dev/null +++ b/src/mistralai/models/chatcompletionrequest.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py new file mode 100644 index 0000000..dacb0ac --- /dev/null +++ b/src/mistralai/models/chatcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class ChatCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py new file mode 100644 index 0000000..9523dd5 --- /dev/null +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ChatCompletionStreamRequestToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py new file mode 100644 index 0000000..108356c --- /dev/null +++ b/src/mistralai/models/checkpointout.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .metricout import MetricOut, MetricOutTypedDict +from mistralai.types import BaseModel +from typing import TypedDict + + +class CheckpointOutTypedDict(TypedDict): + metrics: MetricOutTypedDict + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + + +class CheckpointOut(BaseModel): + metrics: MetricOut + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + diff --git a/src/mistralai/models/common.py b/src/mistralai/models/common.py deleted file mode 100644 index 11b71e5..0000000 --- a/src/mistralai/models/common.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Optional - -from pydantic import BaseModel - - -class UsageInfo(BaseModel): - prompt_tokens: int - total_tokens: int - completion_tokens: Optional[int] diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py new file mode 100644 index 0000000..f3a12c1 --- /dev/null +++ b/src/mistralai/models/completionchunk.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None + diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py new file mode 100644 index 0000000..9b75f73 --- /dev/null +++ b/src/mistralai/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.types import BaseModel +from typing import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk + diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py new file mode 100644 index 0000000..bd3cf9b --- /dev/null +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, TypedDict + + +CompletionResponseStreamChoiceFinishReason = Literal["stop", "length", "error", "tool_calls"] + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py new file mode 100644 index 0000000..0695492 --- /dev/null +++ b/src/mistralai/models/contentchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ContentChunkTypedDict(TypedDict): + text: str + + +class ContentChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py new file mode 100644 index 0000000..8935acb --- /dev/null +++ b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to delete.""" + + +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to delete.""" + diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py new file mode 100644 index 0000000..90c60ff --- /dev/null +++ b/src/mistralai/models/deletefileout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import TypedDict + + +class DeleteFileOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + + +class DeleteFileOut(BaseModel): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py new file mode 100644 index 0000000..bab96e0 --- /dev/null +++ b/src/mistralai/models/deletemodelout.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeleteModelOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted model.""" + object: NotRequired[str] + r"""The object type that was deleted""" + deleted: NotRequired[bool] + r"""The deletion status""" + + +class DeleteModelOut(BaseModel): + id: str + r"""The ID of the deleted model.""" + object: Optional[str] = "model" + r"""The object type that was deleted""" + deleted: Optional[bool] = True + r"""The deletion status""" + diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py new file mode 100644 index 0000000..013f708 --- /dev/null +++ b/src/mistralai/models/deltamessage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[str] + content: NotRequired[str] + tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + tool_calls: OptionalNullable[ToolCall] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py new file mode 100644 index 0000000..c9f3122 --- /dev/null +++ b/src/mistralai/models/detailedjobout.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .eventout import EventOut, EventOutTypedDict +from .finetuneablemodel import FineTuneableModel +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .trainingparameters import TrainingParameters, TrainingParametersTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Literal, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +DetailedJobOutStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] + +class DetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + hyperparameters: TrainingParametersTypedDict + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: DetailedJobOutStatus + job_type: str + created_at: int + modified_at: int + training_files: List[str] + validation_files: NotRequired[Nullable[List[str]]] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]] + trained_tokens: NotRequired[Nullable[int]] + repositories: NotRequired[List[GithubRepositoryOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class DetailedJobOut(BaseModel): + id: str + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: DetailedJobOutStatus + job_type: str + created_at: int + modified_at: int + training_files: List[str] + validation_files: OptionalNullable[List[str]] = UNSET + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore + fine_tuned_model: OptionalNullable[str] = UNSET + suffix: OptionalNullable[str] = UNSET + integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET + trained_tokens: OptionalNullable[int] = UNSET + repositories: Optional[List[GithubRepositoryOut]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata", "events", "checkpoints"] + nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py new file mode 100644 index 0000000..6875e14 --- /dev/null +++ b/src/mistralai/models/embeddingrequest.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import List, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +class EmbeddingRequestTypedDict(TypedDict): + inputs: InputsTypedDict + r"""Text to embed.""" + model: str + r"""ID of the model to use.""" + encoding_format: NotRequired[Nullable[str]] + r"""The format to return the embeddings in.""" + + +class EmbeddingRequest(BaseModel): + inputs: Annotated[Inputs, pydantic.Field(alias="input")] + r"""Text to embed.""" + model: str + r"""ID of the model to use.""" + encoding_format: OptionalNullable[str] = UNSET + r"""The format to return the embeddings in.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["encoding_format"] + nullable_fields = ["encoding_format"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +InputsTypedDict = Union[str, List[str]] +r"""Text to embed.""" + + +Inputs = Union[str, List[str]] +r"""Text to embed.""" + diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py new file mode 100644 index 0000000..040c42d --- /dev/null +++ b/src/mistralai/models/embeddingresponse.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, TypedDict + + +class EmbeddingResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + data: List[EmbeddingResponseDataTypedDict] + + +class EmbeddingResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + data: List[EmbeddingResponseData] + diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py new file mode 100644 index 0000000..07a061b --- /dev/null +++ b/src/mistralai/models/embeddingresponsedata.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class EmbeddingResponseDataTypedDict(TypedDict): + object: NotRequired[str] + embedding: NotRequired[List[float]] + index: NotRequired[int] + + +class EmbeddingResponseData(BaseModel): + object: Optional[str] = None + embedding: Optional[List[float]] = None + index: Optional[int] = None + diff --git a/src/mistralai/models/embeddings.py b/src/mistralai/models/embeddings.py deleted file mode 100644 index a3200f7..0000000 --- a/src/mistralai/models/embeddings.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import List - -from pydantic import BaseModel - -from mistralai.models.common import UsageInfo - - -class EmbeddingObject(BaseModel): - object: str - embedding: List[float] - index: int - - -class EmbeddingResponse(BaseModel): - id: str - object: str - data: List[EmbeddingObject] - model: str - usage: UsageInfo diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py new file mode 100644 index 0000000..d522abe --- /dev/null +++ b/src/mistralai/models/eventout.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, TypedDict +from typing_extensions import NotRequired + + +class EventOutTypedDict(TypedDict): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: NotRequired[Nullable[Dict[str, Any]]] + + +class EventOut(BaseModel): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["data"] + nullable_fields = ["data"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/files.py b/src/mistralai/models/files.py deleted file mode 100644 index f0aeff3..0000000 --- a/src/mistralai/models/files.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Literal, Optional - -from pydantic import BaseModel - - -class FileObject(BaseModel): - id: str - object: str - bytes: int - created_at: int - filename: str - purpose: Optional[Literal["fine-tune"]] = "fine-tune" - - -class FileDeleted(BaseModel): - id: str - object: str - deleted: bool - - -class Files(BaseModel): - data: list[FileObject] - object: Literal["list"] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py new file mode 100644 index 0000000..8571483 --- /dev/null +++ b/src/mistralai/models/files_api_routes_delete_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDeleteFileRequest(BaseModel): + file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py new file mode 100644 index 0000000..76063be --- /dev/null +++ b/src/mistralai/models/files_api_routes_retrieve_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesRetrieveFileRequest(BaseModel): + file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py new file mode 100644 index 0000000..5d72a89 --- /dev/null +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +import io +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from typing import Final, IO, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[str, pydantic.Field(alias="file"), FieldMetadata(multipart=True)] + content: Annotated[Union[bytes, IO[bytes], io.BufferedReader], pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(content=True))] + content_type: Annotated[Optional[str], pydantic.Field(alias="Content-Type"), FieldMetadata(multipart=True)] = None + + +class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): + file: Annotated[File, pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + PURPOSE: Annotated[Final[Optional[str]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore + diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py new file mode 100644 index 0000000..c055242 --- /dev/null +++ b/src/mistralai/models/fileschema.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, TypedDict +from typing_extensions import Annotated, NotRequired + + +class FileSchemaTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + + +class FileSchema(BaseModel): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines"] + nullable_fields = ["num_lines"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py new file mode 100644 index 0000000..ba941bb --- /dev/null +++ b/src/mistralai/models/fimcompletionrequest.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py new file mode 100644 index 0000000..f359adb --- /dev/null +++ b/src/mistralai/models/fimcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class FIMCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py new file mode 100644 index 0000000..767d041 --- /dev/null +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/src/mistralai/models/finetuneablemodel.py b/src/mistralai/models/finetuneablemodel.py new file mode 100644 index 0000000..22c8e4c --- /dev/null +++ b/src/mistralai/models/finetuneablemodel.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FineTuneableModel = Literal["open-mistral-7b", "mistral-small-latest", "codestral-latest", "mistral-large-latest", "open-mistral-nemo"] +r"""The name of the model to fine-tune.""" diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py new file mode 100644 index 0000000..ab76af3 --- /dev/null +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class FTModelCapabilitiesOutTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + + +class FTModelCapabilitiesOut(BaseModel): + completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = False + fine_tuning: Optional[bool] = False + diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py new file mode 100644 index 0000000..44b5348 --- /dev/null +++ b/src/mistralai/models/ftmodelout.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class FTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + + +class FTModelOut(BaseModel): + id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOut + job: str + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "name", "description", "max_context_length", "aliases"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py new file mode 100644 index 0000000..78eb259 --- /dev/null +++ b/src/mistralai/models/function.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Any, Dict, Optional, TypedDict +from typing_extensions import NotRequired + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + + +class Function(BaseModel): + name: str + parameters: Dict[str, Any] + description: Optional[str] = "" + diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py new file mode 100644 index 0000000..c8a6591 --- /dev/null +++ b/src/mistralai/models/functioncall.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Any, Dict, TypedDict, Union + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + arguments: Arguments + + +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py new file mode 100644 index 0000000..8c4cdd9 --- /dev/null +++ b/src/mistralai/models/githubrepositoryin.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class GithubRepositoryInTypedDict(TypedDict): + name: str + owner: str + token: str + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryIn(BaseModel): + name: str + owner: str + token: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "github" # type: ignore + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py new file mode 100644 index 0000000..6bc539e --- /dev/null +++ b/src/mistralai/models/githubrepositoryout.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class GithubRepositoryOutTypedDict(TypedDict): + name: str + owner: str + commit_id: str + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryOut(BaseModel): + name: str + owner: str + commit_id: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "github" # type: ignore + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py new file mode 100644 index 0000000..4e4a209 --- /dev/null +++ b/src/mistralai/models/httpvalidationerror.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from mistralai import utils +from mistralai.types import BaseModel +from typing import List, Optional + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + + +class HTTPValidationError(Exception): + r"""Validation Error""" + data: HTTPValidationErrorData + + def __init__(self, data: HTTPValidationErrorData): + self.data = data + + def __str__(self) -> str: + return utils.marshal_json(self.data, HTTPValidationErrorData) + diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py new file mode 100644 index 0000000..95cd8bf --- /dev/null +++ b/src/mistralai/models/jobin.py @@ -0,0 +1,73 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .finetuneablemodel import FineTuneableModel +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class JobInTypedDict(TypedDict): + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + hyperparameters: TrainingParametersInTypedDict + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + training_files: NotRequired[List[TrainingFileTypedDict]] + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: NotRequired[Nullable[str]] + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: NotRequired[Nullable[List[WandbIntegrationTypedDict]]] + r"""A list of integrations to enable for your fine-tuning job.""" + repositories: NotRequired[List[GithubRepositoryInTypedDict]] + auto_start: NotRequired[bool] + r"""This field will be required in a future release.""" + + +class JobIn(BaseModel): + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + hyperparameters: TrainingParametersIn + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + training_files: Optional[List[TrainingFile]] = None + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: OptionalNullable[str] = UNSET + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: OptionalNullable[List[WandbIntegration]] = UNSET + r"""A list of integrations to enable for your fine-tuning job.""" + repositories: Optional[List[GithubRepositoryIn]] = None + auto_start: Optional[bool] = None + r"""This field will be required in a future release.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["training_files", "validation_files", "suffix", "integrations", "repositories", "auto_start"] + nullable_fields = ["validation_files", "suffix", "integrations"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py new file mode 100644 index 0000000..9d3bfba --- /dev/null +++ b/src/mistralai/models/jobmetadataout.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class JobMetadataOutTypedDict(TypedDict): + expected_duration_seconds: NotRequired[Nullable[int]] + cost: NotRequired[Nullable[float]] + cost_currency: NotRequired[Nullable[str]] + train_tokens_per_step: NotRequired[Nullable[int]] + train_tokens: NotRequired[Nullable[int]] + data_tokens: NotRequired[Nullable[int]] + estimated_start_time: NotRequired[Nullable[int]] + + +class JobMetadataOut(BaseModel): + expected_duration_seconds: OptionalNullable[int] = UNSET + cost: OptionalNullable[float] = UNSET + cost_currency: OptionalNullable[str] = UNSET + train_tokens_per_step: OptionalNullable[int] = UNSET + train_tokens: OptionalNullable[int] = UNSET + data_tokens: OptionalNullable[int] = UNSET + estimated_start_time: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] + nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py new file mode 100644 index 0000000..353b5cf --- /dev/null +++ b/src/mistralai/models/jobout.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .finetuneablemodel import FineTuneableModel +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .trainingparameters import TrainingParameters, TrainingParametersTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Literal, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +Status = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +r"""The current status of the fine-tuning job.""" + +class JobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + hyperparameters: TrainingParametersTypedDict + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: Status + r"""The current status of the fine-tuning job.""" + job_type: str + r"""The type of job (`FT` for fine-tuning).""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + repositories: NotRequired[List[GithubRepositoryOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + + +class JobOut(BaseModel): + id: str + r"""The ID of the job.""" + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: Status + r"""The current status of the fine-tuning job.""" + job_type: str + r"""The type of job (`FT` for fine-tuning).""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore + r"""The object type of the fine-tuning job.""" + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + repositories: Optional[List[GithubRepositoryOut]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata"] + nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobs.py b/src/mistralai/models/jobs.py deleted file mode 100644 index 64d3351..0000000 --- a/src/mistralai/models/jobs.py +++ /dev/null @@ -1,100 +0,0 @@ -from datetime import datetime -from typing import Annotated, List, Literal, Optional, Union - -from pydantic import BaseModel, Field - - -class TrainingParameters(BaseModel): - training_steps: int = Field(1800, le=10000, ge=1) - learning_rate: float = Field(1.0e-4, le=1, ge=1.0e-8) - - -class WandbIntegration(BaseModel): - type: Literal["wandb"] = "wandb" - project: str - name: Union[str, None] = None - run_name: Union[str, None] = None - - -class WandbIntegrationIn(WandbIntegration): - api_key: str - - -Integration = Annotated[Union[WandbIntegration], Field(discriminator="type")] -IntegrationIn = Annotated[Union[WandbIntegrationIn], Field(discriminator="type")] - - -class JobMetadata(BaseModel): - object: Literal["job.metadata"] = "job.metadata" - training_steps: int - train_tokens_per_step: int - data_tokens: int - train_tokens: int - epochs: float - expected_duration_seconds: Optional[int] - cost: Optional[float] = None - cost_currency: Optional[str] = None - - -class Job(BaseModel): - id: str - hyperparameters: TrainingParameters - fine_tuned_model: Union[str, None] - model: str - status: Literal[ - "QUEUED", - "STARTED", - "RUNNING", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", - ] - job_type: str - created_at: int - modified_at: int - training_files: list[str] - validation_files: Union[list[str], None] = [] - object: Literal["job"] - integrations: List[Integration] = [] - - -class Event(BaseModel): - name: str - data: Union[dict, None] = None - created_at: int - - -class Metric(BaseModel): - train_loss: Union[float, None] = None - valid_loss: Union[float, None] = None - valid_mean_token_accuracy: Union[float, None] = None - - -class Checkpoint(BaseModel): - metrics: Metric - step_number: int - created_at: int - - -class JobQueryFilter(BaseModel): - page: int = 0 - page_size: int = 100 - model: Optional[str] = None - created_after: Optional[datetime] = None - created_by_me: Optional[bool] = None - status: Optional[str] = None - wandb_project: Optional[str] = None - wandb_name: Optional[str] = None - suffix: Optional[str] = None - - -class DetailedJob(Job): - events: list[Event] = [] - checkpoints: list[Checkpoint] = [] - estimated_start_time: Optional[int] = None - - -class Jobs(BaseModel): - data: list[Job] = [] - object: Literal["list"] diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py new file mode 100644 index 0000000..e32d52b --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to archive.""" + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to archive.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 0000000..0ba05ee --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the job to cancel.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 0000000..c4ba3c1 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jobout import JobOut, JobOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from typing import Union + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict] +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = Union[LegacyJobMetadataOut, JobOut] +r"""OK""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 0000000..f8924c8 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the job to analyse.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py new file mode 100644 index 0000000..bb5bf3b --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -0,0 +1,81 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +QueryParamStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +r"""The current job state to filter on. When set, the other results are not displayed.""" + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""The page number of the results to be returned.""" + page_size: NotRequired[int] + r"""The number of items to return per page.""" + model: NotRequired[Nullable[str]] + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: NotRequired[Nullable[datetime]] + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_by_me: NotRequired[bool] + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: NotRequired[Nullable[QueryParamStatus]] + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: NotRequired[Nullable[str]] + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: NotRequired[Nullable[str]] + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: NotRequired[Nullable[str]] + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): + page: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 0 + r"""The page number of the results to be returned.""" + page_size: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 100 + r"""The number of items to return per page.""" + model: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: Annotated[OptionalNullable[datetime], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_by_me: Annotated[Optional[bool], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = False + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: Annotated[OptionalNullable[QueryParamStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["page", "page_size", "model", "created_after", "created_by_me", "status", "wandb_project", "wandb_name", "suffix"] + nullable_fields = ["model", "created_after", "status", "wandb_project", "wandb_name", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 0000000..312063f --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py new file mode 100644 index 0000000..ef44fed --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to unarchive.""" + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to unarchive.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 0000000..8a229f0 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_ft_model_in: UpdateFTModelInTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to update.""" + update_ft_model_in: Annotated[UpdateFTModelIn, FieldMetadata(request=RequestMetadata(media_type="application/json"))] + diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py new file mode 100644 index 0000000..15776ad --- /dev/null +++ b/src/mistralai/models/jobsout.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jobout import JobOut, JobOutTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, List, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class JobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[JobOutTypedDict]] + + +class JobsOut(BaseModel): + total: int + data: Optional[List[JobOut]] = None + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "list" # type: ignore + diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py new file mode 100644 index 0000000..f4c2d7a --- /dev/null +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class LegacyJobMetadataOutTypedDict(TypedDict): + details: str + expected_duration_seconds: NotRequired[Nullable[int]] + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: NotRequired[Nullable[float]] + r"""The cost of the fine-tuning job.""" + cost_currency: NotRequired[Nullable[str]] + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: NotRequired[Nullable[int]] + r"""The number of tokens consumed by one training step.""" + train_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens in the training dataset.""" + estimated_start_time: NotRequired[Nullable[int]] + deprecated: NotRequired[bool] + epochs: NotRequired[Nullable[float]] + r"""The number of complete passes through the entire training dataset.""" + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + +class LegacyJobMetadataOut(BaseModel): + details: str + expected_duration_seconds: OptionalNullable[int] = UNSET + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: OptionalNullable[float] = UNSET + r"""The cost of the fine-tuning job.""" + cost_currency: OptionalNullable[str] = UNSET + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: OptionalNullable[int] = UNSET + r"""The number of tokens consumed by one training step.""" + train_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens in the training dataset.""" + estimated_start_time: OptionalNullable[int] = UNSET + deprecated: Optional[bool] = True + epochs: OptionalNullable[float] = UNSET + r"""The number of complete passes through the entire training dataset.""" + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "deprecated", "epochs", "training_steps", "object"] + nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "epochs", "training_steps"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py new file mode 100644 index 0000000..b6f4dd1 --- /dev/null +++ b/src/mistralai/models/listfilesout.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .fileschema import FileSchema, FileSchemaTypedDict +from mistralai.types import BaseModel +from typing import List, TypedDict + + +class ListFilesOutTypedDict(TypedDict): + data: List[FileSchemaTypedDict] + object: str + + +class ListFilesOut(BaseModel): + data: List[FileSchema] + object: str + diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py new file mode 100644 index 0000000..b85cd7d --- /dev/null +++ b/src/mistralai/models/metricout.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class MetricOutTypedDict(TypedDict): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: NotRequired[Nullable[float]] + valid_loss: NotRequired[Nullable[float]] + valid_mean_token_accuracy: NotRequired[Nullable[float]] + + +class MetricOut(BaseModel): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: OptionalNullable[float] = UNSET + valid_loss: OptionalNullable[float] = UNSET + valid_mean_token_accuracy: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py new file mode 100644 index 0000000..c22ce59 --- /dev/null +++ b/src/mistralai/models/modelcapabilities.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class ModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + + +class ModelCapabilities(BaseModel): + completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = True + fine_tuning: Optional[bool] = False + diff --git a/src/mistralai/models/modelcard.py b/src/mistralai/models/modelcard.py new file mode 100644 index 0000000..80e082e --- /dev/null +++ b/src/mistralai/models/modelcard.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ModelCardTypedDict(TypedDict): + id: str + capabilities: ModelCapabilitiesTypedDict + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + root: NotRequired[Nullable[str]] + archived: NotRequired[bool] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + + +class ModelCard(BaseModel): + id: str + capabilities: ModelCapabilities + object: Optional[str] = "model" + created: Optional[int] = None + owned_by: Optional[str] = "mistralai" + root: OptionalNullable[str] = UNSET + archived: Optional[bool] = False + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + deprecation: OptionalNullable[datetime] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "created", "owned_by", "root", "archived", "name", "description", "max_context_length", "aliases", "deprecation"] + nullable_fields = ["root", "name", "description", "deprecation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py new file mode 100644 index 0000000..0c76b32 --- /dev/null +++ b/src/mistralai/models/modellist.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcard import ModelCard, ModelCardTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ModelListTypedDict(TypedDict): + object: NotRequired[str] + data: NotRequired[List[ModelCardTypedDict]] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + data: Optional[List[ModelCard]] = None + diff --git a/src/mistralai/models/models.py b/src/mistralai/models/models.py deleted file mode 100644 index f88033d..0000000 --- a/src/mistralai/models/models.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel - - -class ModelPermission(BaseModel): - id: str - object: str - created: int - allow_create_engine: Optional[bool] = False - allow_sampling: bool = True - allow_logprobs: bool = True - allow_search_indices: Optional[bool] = False - allow_view: bool = True - allow_fine_tuning: bool = False - organization: str = "*" - group: Optional[str] = None - is_blocking: Optional[bool] = False - - -class ModelCard(BaseModel): - id: str - object: str - created: int - owned_by: str - root: Optional[str] = None - parent: Optional[str] = None - permission: List[ModelPermission] = [] - - -class ModelList(BaseModel): - object: str - data: List[ModelCard] - - -class ModelDeleted(BaseModel): - id: str - object: str - deleted: bool diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py new file mode 100644 index 0000000..0ead91a --- /dev/null +++ b/src/mistralai/models/responseformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + +class ResponseFormatTypedDict(TypedDict): + type: NotRequired[ResponseFormats] + + +class ResponseFormat(BaseModel): + type: Optional[ResponseFormats] = "text" + diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 0000000..8a37b1c --- /dev/null +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to retrieve.""" + diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py new file mode 100644 index 0000000..98af323 --- /dev/null +++ b/src/mistralai/models/retrievefileout.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, TypedDict +from typing_extensions import Annotated, NotRequired + + +class RetrieveFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + + +class RetrieveFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines"] + nullable_fields = ["num_lines"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py new file mode 100644 index 0000000..83424f3 --- /dev/null +++ b/src/mistralai/models/sampletype.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +SampleType = Literal["pretrain", "instruct"] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py new file mode 100644 index 0000000..03216cb --- /dev/null +++ b/src/mistralai/models/sdkerror.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass +from typing import Optional +import httpx + + +@dataclass +class SDKError(Exception): + """Represents an error returned by the API.""" + + message: str + status_code: int = -1 + body: str = "" + raw_response: Optional[httpx.Response] = None + + def __str__(self): + body = "" + if len(self.body) > 0: + body = f"\n{self.body}" + + return f"{self.message}: Status {self.status_code}{body}" diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py new file mode 100644 index 0000000..3d69602 --- /dev/null +++ b/src/mistralai/models/security.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, SecurityMetadata +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class SecurityTypedDict(TypedDict): + api_key: NotRequired[str] + + +class Security(BaseModel): + api_key: Annotated[Optional[str], FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] = None + diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py new file mode 100644 index 0000000..66d09ae --- /dev/null +++ b/src/mistralai/models/source.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +Source = Literal["upload", "repository"] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py new file mode 100644 index 0000000..171acf5 --- /dev/null +++ b/src/mistralai/models/systemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +Role = Literal["system"] + +class SystemMessageTypedDict(TypedDict): + content: ContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: Content + role: Optional[Role] = "system" + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py new file mode 100644 index 0000000..fd95ab8 --- /dev/null +++ b/src/mistralai/models/textchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class TextChunkTypedDict(TypedDict): + text: str + + +class TextChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py new file mode 100644 index 0000000..c790e63 --- /dev/null +++ b/src/mistralai/models/tool.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + + +class Tool(BaseModel): + function: Function + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py new file mode 100644 index 0000000..2afd453 --- /dev/null +++ b/src/mistralai/models/toolcall.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + + +class ToolCall(BaseModel): + function: FunctionCall + id: Optional[str] = "null" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py new file mode 100644 index 0000000..8445861 --- /dev/null +++ b/src/mistralai/models/toolmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ToolMessageRole = Literal["tool"] + +class ToolMessageTypedDict(TypedDict): + content: str + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py new file mode 100644 index 0000000..097ea17 --- /dev/null +++ b/src/mistralai/models/trainingfile.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class TrainingFileTypedDict(TypedDict): + file_id: str + weight: NotRequired[float] + + +class TrainingFile(BaseModel): + file_id: str + weight: Optional[float] = 1 + diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py new file mode 100644 index 0000000..2110b55 --- /dev/null +++ b/src/mistralai/models/trainingparameters.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class TrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + epochs: NotRequired[Nullable[float]] + fim_ratio: NotRequired[Nullable[float]] + + +class TrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + learning_rate: Optional[float] = 0.0001 + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["training_steps", "learning_rate", "epochs", "fim_ratio"] + nullable_fields = ["training_steps", "epochs", "fim_ratio"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py new file mode 100644 index 0000000..60d71b2 --- /dev/null +++ b/src/mistralai/models/trainingparametersin.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class TrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + epochs: NotRequired[Nullable[float]] + fim_ratio: NotRequired[Nullable[float]] + + +class TrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["training_steps", "learning_rate", "epochs", "fim_ratio"] + nullable_fields = ["training_steps", "epochs", "fim_ratio"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py new file mode 100644 index 0000000..07334f5 --- /dev/null +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class UnarchiveFTModelOutTypedDict(TypedDict): + id: str + archived: NotRequired[bool] + + +class UnarchiveFTModelOut(BaseModel): + id: str + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + archived: Optional[bool] = False + diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py new file mode 100644 index 0000000..8c3d847 --- /dev/null +++ b/src/mistralai/models/updateftmodelin.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class UpdateFTModelInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateFTModelIn(BaseModel): + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py new file mode 100644 index 0000000..cd5b86c --- /dev/null +++ b/src/mistralai/models/uploadfileout.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, TypedDict +from typing_extensions import Annotated, NotRequired + + +class UploadFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + + +class UploadFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines"] + nullable_fields = ["num_lines"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py new file mode 100644 index 0000000..153ab6b --- /dev/null +++ b/src/mistralai/models/usageinfo.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class UsageInfo(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py new file mode 100644 index 0000000..bea7328 --- /dev/null +++ b/src/mistralai/models/usermessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +UserMessageRole = Literal["user"] + +class UserMessageTypedDict(TypedDict): + content: UserMessageContentTypedDict + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: UserMessageContent + role: Optional[UserMessageRole] = "user" + + +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py new file mode 100644 index 0000000..2d4a97b --- /dev/null +++ b/src/mistralai/models/validationerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import List, TypedDict, Union + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + msg: str + type: str + + +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py new file mode 100644 index 0000000..fccab00 --- /dev/null +++ b/src/mistralai/models/wandbintegration.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class WandbIntegrationTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegration(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "wandb" # type: ignore + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name"] + nullable_fields = ["name", "run_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py new file mode 100644 index 0000000..f971ba5 --- /dev/null +++ b/src/mistralai/models/wandbintegrationout.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class WandbIntegrationOutTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegrationOut(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "wandb" # type: ignore + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name"] + nullable_fields = ["name", "run_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py new file mode 100644 index 0000000..aba5782 --- /dev/null +++ b/src/mistralai/models_.py @@ -0,0 +1,928 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Optional + +class Models(BaseSDK): + r"""Model Management API""" + + + def list( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelList]: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def list_async( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelList]: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def retrieve( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelCard]: + r"""Retrieve Model + + Retrieve a model information. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self.build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelCard]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def retrieve_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelCard]: + r"""Retrieve Model + + Retrieve a model information. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self.build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelCard]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def delete( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteModelOut]: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def delete_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteModelOut]: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def update( + self, *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FTModelOut]: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self.build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def update_async( + self, *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FTModelOut]: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self.build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def archive( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ArchiveFTModelOut]: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def archive_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ArchiveFTModelOut]: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def unarchive( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UnarchiveFTModelOut]: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def unarchive_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UnarchiveFTModelOut]: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/py.typed b/src/mistralai/py.typed index e69de29..3e38f1a 100644 --- a/src/mistralai/py.typed +++ b/src/mistralai/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py new file mode 100644 index 0000000..b0d2bb1 --- /dev/null +++ b/src/mistralai/sdk.py @@ -0,0 +1,119 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, HttpClient +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, NoOpLogger +from .utils.retries import RetryConfig +import httpx +from mistralai import models, utils +from mistralai._hooks import SDKHooks +from mistralai.agents import Agents +from mistralai.chat import Chat +from mistralai.embeddings import Embeddings +from mistralai.files import Files +from mistralai.fim import Fim +from mistralai.fine_tuning import FineTuning +from mistralai.models_ import Models +from mistralai.types import OptionalNullable, UNSET +from typing import Any, Callable, Dict, Optional, Union + +class Mistral(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + models: Models + r"""Model Management API""" + files: Files + r"""Files API""" + fine_tuning: FineTuning + chat: Chat + r"""Chat Completion API.""" + fim: Fim + r"""Fill-in-the-middle API.""" + agents: Agents + r"""Agents API.""" + embeddings: Embeddings + r"""Embeddings API.""" + def __init__( + self, + api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + if client is None: + client = httpx.Client() + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + if async_client is None: + async_client = httpx.AsyncClient() + + if debug_logger is None: + debug_logger = NoOpLogger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Any = None + if callable(api_key): + security = lambda: models.Security(api_key = api_key()) # pylint: disable=unnecessary-lambda-assignment + else: + security = models.Security(api_key = api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + + BaseSDK.__init__(self, SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger + )) + + hooks = SDKHooks() + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init(current_server_url, self.sdk_configuration.client) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + self._init_sdks() + + + def _init_sdks(self): + self.models = Models(self.sdk_configuration) + self.files = Files(self.sdk_configuration) + self.fine_tuning = FineTuning(self.sdk_configuration) + self.chat = Chat(self.sdk_configuration) + self.fim = Fim(self.sdk_configuration) + self.agents = Agents(self.sdk_configuration) + self.embeddings = Embeddings(self.sdk_configuration) + diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py new file mode 100644 index 0000000..8da7f2a --- /dev/null +++ b/src/mistralai/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from ._hooks import SDKHooks +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai import models +from mistralai.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_PROD = "prod" +r"""Production server""" +SERVERS = { + SERVER_PROD: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: HttpClient + async_client: AsyncHttpClient + debug_logger: Logger + security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.0.0-rc.2" + gen_version: str = "2.388.1" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai" + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_PROD + + if self.server not in SERVERS: + raise ValueError(f"Invalid server \"{self.server}\"") + + return SERVERS[self.server], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/src/mistralai/types/__init__.py b/src/mistralai/types/__init__.py new file mode 100644 index 0000000..fc76fe0 --- /dev/null +++ b/src/mistralai/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py new file mode 100644 index 0000000..a6187ef --- /dev/null +++ b/src/mistralai/types/basemodel.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + +UnrecognizedInt = NewType("UnrecognizedInt", int) +UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py new file mode 100644 index 0000000..75ca024 --- /dev/null +++ b/src/mistralai/utils/__init__.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .annotations import get_discriminator +from .enums import OpenEnumMeta +from .headers import get_headers, get_response_headers +from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, +) +from .queryparams import get_query_params +from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig +from .requestbodies import serialize_request_body, SerializedRequestBody +from .security import get_security, get_security_from_env + +from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, +) +from .url import generate_url, template_url, remove_suffix +from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .logger import Logger, get_body_content, NoOpLogger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_discriminator", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "get_security_from_env", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "NoOpLogger", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_float", + "validate_int", + "validate_open_enum", +] diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py new file mode 100644 index 0000000..0d17472 --- /dev/null +++ b/src/mistralai/utils/annotations.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + if isinstance(model, dict): + try: + return f'{model.get(key)}' + except AttributeError as e: + raise ValueError(f'Could not find discriminator key {key} in {model}') from e + + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + fieldname = fieldname.upper() + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py new file mode 100644 index 0000000..c650b10 --- /dev/null +++ b/src/mistralai/utils/enums.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum + + +class OpenEnumMeta(enum.EnumMeta): + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py new file mode 100644 index 0000000..553b386 --- /dev/null +++ b/src/mistralai/utils/eventstreaming.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +import httpx + +T = TypeVar("T") + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py new file mode 100644 index 0000000..07f9b23 --- /dev/null +++ b/src/mistralai/utils/forms.py @@ -0,0 +1,207 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + form: Dict[str, Any] = {} + files: Dict[str, Any] = {} + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if val is None: + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias is not None else name + + if field_metadata.file: + file_fields: Dict[str, FieldInfo] = val.__class__.model_fields + + file_name = "" + field_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(val, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(val, file_field_name, None) + else: + field_name = ( + file_field.alias + if file_field.alias is not None + else file_field_name + ) + file_name = getattr(val, file_field_name) + + if field_name == "" or file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + if content_type is not None: + files[field_name] = (file_name, content, content_type) + else: + files[field_name] = (file_name, content) + elif field_metadata.json: + files[f_name] = ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if value is None: + continue + values.append(_val_to_string(value)) + + form[f_name + "[]"] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if val is None: + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/src/mistralai/utils/headers.py b/src/mistralai/utils/headers.py new file mode 100644 index 0000000..e14a0f4 --- /dev/null +++ b/src/mistralai/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if val is None: + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py new file mode 100644 index 0000000..7e4bbea --- /dev/null +++ b/src/mistralai/utils/logger.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + diff --git a/src/mistralai/utils/metadata.py b/src/mistralai/utils/metadata.py new file mode 100644 index 0000000..173b3e5 --- /dev/null +++ b/src/mistralai/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py new file mode 100644 index 0000000..1c8c583 --- /dev/null +++ b/src/mistralai/utils/queryparams.py @@ -0,0 +1,203 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if query_params is not None else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if obj_param_metadata is None: + continue + + obj_val = getattr(obj, name) + if obj_val is None: + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if value is None: + return + + for key, val in value.items(): + if val is None: + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if value is None: + return + + for val in value: + if val is None: + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py new file mode 100644 index 0000000..4f586ae --- /dev/null +++ b/src/mistralai/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: str + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py new file mode 100644 index 0000000..a06f927 --- /dev/null +++ b/src/mistralai/utils/retries.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import random +import time +from typing import List + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + + def __init__(self, response: httpx.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py new file mode 100644 index 0000000..4c511d9 --- /dev/null +++ b/src/mistralai/utils/security.py @@ -0,0 +1,185 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) +import os + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth which could be a flattened model + if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: + if security is not None: + return security + + if not issubclass(security_class, BaseModel): + raise TypeError("security_class must be a pydantic model class") + + security_dict: Any = {} + + if os.getenv("MISTRAL_API_KEY"): + security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") + + return security_class(**security_dict) if security_dict else None + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py new file mode 100644 index 0000000..a98998a --- /dev/null +++ b/src/mistralai/utils/serializers.py @@ -0,0 +1,181 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import json +from typing import Any, Dict, List, Union, get_args +import httpx +from typing_extensions import get_origin +from pydantic import ConfigDict, create_model +from pydantic_core import from_json +from typing_inspect import is_optional_type + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable + + +def serialize_decimal(as_str: bool): + def serialize(d): + if is_optional_type(type(d)) and d is None: + return None + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, Decimal): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + if is_optional_type(type(f)) and f is None: + return None + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, float): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(b): + if is_optional_type(type(b)) and b is None: + return None + + if not isinstance(b, int): + raise ValueError("Expected int") + + return str(b) if as_str else b + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, int): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_open_enum(is_int: bool): + def validate(e): + if e is None: + return None + + if is_int: + if not isinstance(e, int): + raise ValueError("Expected int") + else: + if not isinstance(e, str): + raise ValueError("Expected string") + + return e + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False diff --git a/src/mistralai/utils/url.py b/src/mistralai/utils/url.py new file mode 100644 index 0000000..b201bfa --- /dev/null +++ b/src/mistralai/utils/url.py @@ -0,0 +1,150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if path_params is not None else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if param is None: + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if param_field_val is None: + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + else: + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py new file mode 100644 index 0000000..24ccae3 --- /dev/null +++ b/src/mistralai/utils/values.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index c43f7aa..0000000 --- a/tests/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -from unittest import mock - -import pytest -from mistralai.async_client import MistralAsyncClient -from mistralai.client import MistralClient - - -@pytest.fixture() -def client(): - client = MistralClient(api_key="test_api_key") - client._client = mock.MagicMock() - return client - - -@pytest.fixture() -def async_client(): - client = MistralAsyncClient(api_key="test_api_key") - client._client = mock.AsyncMock() - return client diff --git a/tests/test_chat.py b/tests/test_chat.py deleted file mode 100644 index 15a4065..0000000 --- a/tests/test_chat.py +++ /dev/null @@ -1,149 +0,0 @@ -import io -import logging - -import pytest -from mistralai.constants import HEADER_MODEL_DEPRECATION_TIMESTAMP -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ChatMessage, -) - -from .utils import ( - mock_chat_response_payload, - mock_chat_response_streaming_payload, - mock_response, - mock_stream_response, -) - - -class TestChat: - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - def test_chat(self, client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - client._client.request.return_value = mock_response(200, mock_chat_response_payload(), headers) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = client.chat( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": False, - }, - data=None, - ) - - assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.object == "chat.completion" - - # Check if the log message was produced when the model is deprecated - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output - - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - def test_chat_streaming(self, client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - client._client.stream.return_value = mock_stream_response(200, mock_chat_response_streaming_payload(), headers) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = client.chat_stream( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - results = list(result) - - client._client.stream.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "text/event-stream", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": True, - }, - data=None, - ) - - for i, result in enumerate(results): - if i == 0: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.choices[0].delta.role == "assistant" - else: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == i - 1 - assert result.choices[0].delta.content == f"stream response {i-1}" - assert result.object == "chat.completion.chunk" - - # Check if the log message was produced - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py deleted file mode 100644 index c16a9a8..0000000 --- a/tests/test_chat_async.py +++ /dev/null @@ -1,157 +0,0 @@ -import io -import logging -import unittest.mock as mock - -import pytest -from mistralai.constants import ( - HEADER_MODEL_DEPRECATION_TIMESTAMP, -) -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ChatMessage, -) - -from .utils import ( - mock_async_stream_response, - mock_chat_response_payload, - mock_chat_response_streaming_payload, - mock_response, -) - - -class TestAsyncChat: - @pytest.mark.asyncio - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - async def test_chat(self, async_client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - async_client._client.request.return_value = mock_response(200, mock_chat_response_payload(), headers) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = async_client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = await async_client.chat( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - async_client._client.request.assert_awaited_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": False, - }, - data=None, - ) - - assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.object == "chat.completion" - - # Check if the log message was produced when the model is deprecated - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output - - @pytest.mark.asyncio - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - async def test_chat_streaming(self, async_client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - async_client._client.stream = mock.Mock() - async_client._client.stream.return_value = mock_async_stream_response( - 200, mock_chat_response_streaming_payload(), headers - ) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = async_client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = async_client.chat_stream( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - results = [r async for r in result] - - async_client._client.stream.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "Accept": "text/event-stream", - "User-Agent": f"mistral-client-python/{async_client._version}", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": True, - }, - data=None, - ) - - for i, result in enumerate(results): - if i == 0: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.choices[0].delta.role == "assistant" - else: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == i - 1 - assert result.choices[0].delta.content == f"stream response {i-1}" - assert result.object == "chat.completion.chunk" - - # Check if the log message was produced when the model is deprecated - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output diff --git a/tests/test_completion.py b/tests/test_completion.py deleted file mode 100644 index a30cfcf..0000000 --- a/tests/test_completion.py +++ /dev/null @@ -1,99 +0,0 @@ -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, -) - -from .utils import ( - mock_completion_response_payload, - mock_response, - mock_stream_response, -) - - -class TestCompletion: - def test_completion(self, client): - client._client.request.return_value = mock_response( - 200, - mock_completion_response_payload(), - ) - - result = client.completion( - model="mistral-small-latest", - prompt="def add(a, b):", - suffix="return a + b", - temperature=0.5, - max_tokens=50, - top_p=0.9, - random_seed=42, - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fim/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "prompt": "def add(a, b):", - "suffix": "return a + b", - "stream": False, - "temperature": 0.5, - "max_tokens": 50, - "top_p": 0.9, - "random_seed": 42, - }, - data=None, - ) - - assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.object == "chat.completion" - - def test_completion_streaming(self, client): - client._client.stream.return_value = mock_stream_response( - 200, - mock_completion_response_payload(), - ) - - result = client.completion_stream( - model="mistral-small-latest", prompt="def add(a, b):", suffix="return a + b", stop=["#"] - ) - - results = list(result) - - client._client.stream.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fim/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "text/event-stream", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "prompt": "def add(a, b):", - "suffix": "return a + b", - "stream": True, - "stop": ["#"], - }, - data=None, - ) - - for i, result in enumerate(results): - if i == 0: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.choices[0].delta.role == "assistant" - else: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == i - 1 - assert result.choices[0].delta.content == f"stream response {i - 1}" - assert result.object == "chat.completion.chunk" diff --git a/tests/test_delete_model.py b/tests/test_delete_model.py deleted file mode 100644 index d050c21..0000000 --- a/tests/test_delete_model.py +++ /dev/null @@ -1,26 +0,0 @@ -from mistralai.models.models import ModelDeleted - -from .utils import mock_model_deleted_response_payload, mock_response - - -class TestDeleteModel: - def test_delete_model(self, client): - expected_response_model = ModelDeleted.model_validate_json(mock_model_deleted_response_payload()) - client._client.request.return_value = mock_response(200, expected_response_model.json()) - - response_model = client.delete_model("model_id") - - client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/models/model_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - - assert response_model == expected_response_model diff --git a/tests/test_delete_model_async.py b/tests/test_delete_model_async.py deleted file mode 100644 index 9fa393e..0000000 --- a/tests/test_delete_model_async.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -from mistralai.models.models import ModelDeleted - -from .utils import mock_model_deleted_response_payload, mock_response - - -class TestAsyncDeleteModel: - @pytest.mark.asyncio - async def test_delete_model(self, async_client): - expected_response_model = ModelDeleted.model_validate_json(mock_model_deleted_response_payload()) - async_client._client.request.return_value = mock_response(200, expected_response_model.json()) - - response_model = await async_client.delete_model("model_id") - - async_client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/models/model_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - - assert response_model == expected_response_model diff --git a/tests/test_embedder.py b/tests/test_embedder.py deleted file mode 100644 index 6d9a0df..0000000 --- a/tests/test_embedder.py +++ /dev/null @@ -1,66 +0,0 @@ -from mistralai.models.embeddings import EmbeddingResponse - -from .utils import mock_embedding_response_payload, mock_response - - -class TestEmbeddings: - def test_embeddings(self, client): - client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(), - ) - - result = client.embeddings( - model="mistral-embed", - input="What is the best French cheese?", - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={"model": "mistral-embed", "input": "What is the best French cheese?"}, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 1 - assert result.data[0].index == 0 - assert result.object == "list" - - def test_embeddings_batch(self, client): - client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(batch_size=10), - ) - - result = client.embeddings( - model="mistral-embed", - input=["What is the best French cheese?"] * 10, - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-embed", - "input": ["What is the best French cheese?"] * 10, - }, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 10 - assert result.data[0].index == 0 - assert result.object == "list" diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py deleted file mode 100644 index 3de1601..0000000 --- a/tests/test_embedder_async.py +++ /dev/null @@ -1,69 +0,0 @@ -import pytest -from mistralai.models.embeddings import EmbeddingResponse - -from .utils import mock_embedding_response_payload, mock_response - - -class TestAsyncEmbeddings: - @pytest.mark.asyncio - async def test_embeddings(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(), - ) - - result = await async_client.embeddings( - model="mistral-embed", - input="What is the best French cheese?", - ) - - async_client._client.request.assert_awaited_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={"model": "mistral-embed", "input": "What is the best French cheese?"}, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 1 - assert result.data[0].index == 0 - assert result.object == "list" - - @pytest.mark.asyncio - async def test_embeddings_batch(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(batch_size=10), - ) - - result = await async_client.embeddings( - model="mistral-embed", - input=["What is the best French cheese?"] * 10, - ) - - async_client._client.request.assert_awaited_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-embed", - "input": ["What is the best French cheese?"] * 10, - }, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 10 - assert result.data[0].index == 0 - assert result.object == "list" diff --git a/tests/test_files.py b/tests/test_files.py deleted file mode 100644 index e4ef9e4..0000000 --- a/tests/test_files.py +++ /dev/null @@ -1,105 +0,0 @@ -import orjson -from mistralai.models.files import FileDeleted, FileObject - -from .utils import ( - mock_file_deleted_response_payload, - mock_file_response_payload, - mock_response, -) - - -class TestFilesClient: - def test_create_file(self, client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = client.files.create(b"file_content") - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - }, - files={"file": b"file_content"}, - json=None, - data={"purpose": "fine-tune"}, - ) - assert response_file == expected_response_file - - def test_retrieve(self, client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = client.files.retrieve("file_id") - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - def test_list_files(self, client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_file.model_dump()], - "object": "list", - } - ), - ) - - response_files = client.files.list() - response_file = response_files.data[0] - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - def test_delete_file(self, client): - expected_response_file = FileDeleted.model_validate_json(mock_file_deleted_response_payload()) - client._client.request.return_value = mock_response(200, expected_response_file.json()) - - response_file = client.files.delete("file_id") - - client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file diff --git a/tests/test_files_async.py b/tests/test_files_async.py deleted file mode 100644 index 7248f40..0000000 --- a/tests/test_files_async.py +++ /dev/null @@ -1,110 +0,0 @@ -import orjson -import pytest -from mistralai.models.files import FileDeleted, FileObject - -from .utils import ( - mock_file_deleted_response_payload, - mock_file_response_payload, - mock_response, -) - - -class TestFilesAyncClient: - @pytest.mark.asyncio - async def test_create_file(self, async_client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = await async_client.files.create(b"file_content") - - async_client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - }, - files={"file": b"file_content"}, - json=None, - data={"purpose": "fine-tune"}, - ) - assert response_file == expected_response_file - - @pytest.mark.asyncio - async def test_retrieve(self, async_client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = await async_client.files.retrieve("file_id") - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - @pytest.mark.asyncio - async def test_list_files(self, async_client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_file.model_dump()], - "object": "list", - } - ), - ) - - response_files = await async_client.files.list() - response_file = response_files.data[0] - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - @pytest.mark.asyncio - async def test_delete_file(self, async_client): - expected_response_file = FileDeleted.model_validate_json(mock_file_deleted_response_payload()) - async_client._client.request.return_value = mock_response(200, expected_response_file.json()) - - response_file = await async_client.files.delete("file_id") - - async_client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file diff --git a/tests/test_jobs.py b/tests/test_jobs.py deleted file mode 100644 index efb19b7..0000000 --- a/tests/test_jobs.py +++ /dev/null @@ -1,128 +0,0 @@ -import orjson -from mistralai.models.jobs import DetailedJob, Job, TrainingParameters - -from .utils import ( - mock_detailed_job_response_payload, - mock_job_response_payload, - mock_response, -) - - -class TestJobsClient: - def test_create(self, client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = client.jobs.create( - model="model", - training_files=["training_file_id"], - validation_files=["validation_file_id"], - hyperparameters=TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={ - "model": "model", - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "suffix": None, - "integrations": None, - }, - data=None, - params={"dry_run": False}, - ) - assert response_job == expected_response_job - - def test_retrieve(self, client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = client.jobs.retrieve("job_id") - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job - - def test_list(self, client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_job.model_dump()], - "object": "list", - } - ), - ) - - response_jobs = client.jobs.list() - response_job = response_jobs.data[0] - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - params={"page": 0, "page_size": 10}, - ) - assert response_job == expected_response_job - - def test_cancel(self, client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = client.jobs.cancel("job_id") - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id/cancel", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job diff --git a/tests/test_jobs_async.py b/tests/test_jobs_async.py deleted file mode 100644 index 2d0d488..0000000 --- a/tests/test_jobs_async.py +++ /dev/null @@ -1,133 +0,0 @@ -import orjson -import pytest -from mistralai.models.jobs import DetailedJob, Job, TrainingParameters - -from .utils import ( - mock_detailed_job_response_payload, - mock_job_response_payload, - mock_response, -) - - -class TestJobsClient: - @pytest.mark.asyncio - async def test_create(self, async_client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = await async_client.jobs.create( - model="model", - training_files=["training_file_id"], - validation_files=["validation_file_id"], - hyperparameters=TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - ) - - async_client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={ - "model": "model", - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "suffix": None, - "integrations": None, - }, - data=None, - params={"dry_run": False}, - ) - assert response_job == expected_response_job - - @pytest.mark.asyncio - async def test_retrieve(self, async_client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = await async_client.jobs.retrieve("job_id") - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job - - @pytest.mark.asyncio - async def test_list(self, async_client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_job.model_dump()], - "object": "list", - } - ), - ) - - response_jobs = await async_client.jobs.list() - response_job = response_jobs.data[0] - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - params={"page": 0, "page_size": 10}, - ) - assert response_job == expected_response_job - - @pytest.mark.asyncio - async def test_cancel(self, async_client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = await async_client.jobs.cancel("job_id") - - async_client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id/cancel", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job diff --git a/tests/test_list_models.py b/tests/test_list_models.py deleted file mode 100644 index 15de847..0000000 --- a/tests/test_list_models.py +++ /dev/null @@ -1,30 +0,0 @@ -from mistralai.models.models import ModelList - -from .utils import mock_list_models_response_payload, mock_response - - -class TestListModels: - def test_list_models(self, client): - client._client.request.return_value = mock_response( - 200, - mock_list_models_response_payload(), - ) - - result = client.list_models() - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/models", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={}, - data=None, - ) - - assert isinstance(result, ModelList), "Should return an ModelList" - assert len(result.data) == 4 - assert result.object == "list" diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py deleted file mode 100644 index 2f3d7b4..0000000 --- a/tests/test_list_models_async.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest -from mistralai.models.models import ModelList - -from .utils import mock_list_models_response_payload, mock_response - - -class TestAsyncListModels: - @pytest.mark.asyncio - async def test_list_models(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_list_models_response_payload(), - ) - - result = await async_client.list_models() - - async_client._client.request.assert_awaited_once_with( - "get", - "https://api.mistral.ai/v1/models", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={}, - data=None, - ) - - assert isinstance(result, ModelList), "Should return an ModelList" - assert len(result.data) == 4 - assert result.object == "list" diff --git a/tests/utils.py b/tests/utils.py deleted file mode 100644 index 4c2ca14..0000000 --- a/tests/utils.py +++ /dev/null @@ -1,335 +0,0 @@ -import contextlib -import unittest.mock as mock -from typing import Any, Dict, List - -import orjson -from httpx import Response - - -@contextlib.contextmanager -def mock_stream_response(status_code: int, content: List[str], headers: Dict[str, Any] = None): - response = mock.Mock(Response) - response.status_code = status_code - response.headers = headers if headers else {} - response.iter_lines.return_value = iter(content) - yield response - - -@contextlib.asynccontextmanager -async def mock_async_stream_response(status_code: int, content: List[str], headers: Dict[str, Any] = None): - response = mock.Mock(Response) - response.status_code = status_code - response.headers = headers if headers else {} - - async def async_iter(content: List[str]): - for line in content: - yield line - - response.aiter_lines.return_value = async_iter(content) - yield response - - -def mock_response( - status_code: int, content: str, headers: Dict[str, Any] = None, is_json: bool = True -) -> mock.MagicMock: - response = mock.Mock(Response) - response.status_code = status_code - response.headers = headers if headers else {} - if is_json: - response.json = mock.MagicMock() - response.json.return_value = orjson.loads(content) - response.text = content - return response - - -def mock_list_models_response_payload() -> str: - return orjson.dumps( - { - "object": "list", - "data": [ - { - "id": "mistral-medium", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-15bebaf316264adb84b891bf06a84933", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - { - "id": "mistral-small-latest", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-d0dced5c703242fa862f4ca3f241c00e", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - { - "id": "mistral-tiny", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-0e64e727c3a94f17b29f8895d4be2910", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - { - "id": "mistral-embed", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-ebdff9046f524e628059447b5932e3ad", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - ], - } - ) - - -def mock_embedding_response_payload(batch_size: int = 1) -> str: - return orjson.dumps( - { - "id": "embd-98c8c60e3fbf4fc49658eddaf447357c", - "object": "list", - "data": [ - { - "object": "embedding", - "embedding": [-0.018585205078125, 0.027099609375, 0.02587890625], - "index": 0, - } - ] - * batch_size, - "model": "mistral-embed", - "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, - } - ).decode() - - -def mock_chat_response_payload(): - return orjson.dumps( - { - "id": "chat-98c8c60e3fbf4fc49658eddaf447357c", - "object": "chat.completion", - "created": 1703165682, - "choices": [ - { - "finish_reason": "stop", - "message": { - "role": "assistant", - "content": "What is the best French cheese?", - }, - "index": 0, - } - ], - "model": "mistral-small-latest", - "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, - } - ).decode() - - -def mock_chat_response_streaming_payload(): - return [ - "data: " - + orjson.dumps( - { - "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", - "model": "mistral-small-latest", - "choices": [ - { - "index": 0, - "delta": {"role": "assistant"}, - "finish_reason": None, - } - ], - } - ).decode() - + "\n\n", - *[ - "data: " - + orjson.dumps( - { - "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", - "object": "chat.completion.chunk", - "created": 1703168544, - "model": "mistral-small-latest", - "choices": [ - { - "index": i, - "delta": {"content": f"stream response {i}"}, - "finish_reason": None, - } - ], - } - ).decode() - + "\n\n" - for i in range(10) - ], - "data: [DONE]\n\n", - ] - - -def mock_completion_response_payload() -> str: - return orjson.dumps( - { - "id": "chat-98c8c60e3fbf4fc49658eddaf447357c", - "object": "chat.completion", - "created": 1703165682, - "choices": [ - { - "finish_reason": "stop", - "message": { - "role": "assistant", - "content": " a + b", - }, - "index": 0, - } - ], - "model": "mistral-small-latest", - "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, - } - ).decode() - - -def mock_job_response_payload() -> str: - return orjson.dumps( - { - "id": "job_id", - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "fine_tuned_model": "fine_tuned_model", - "model": "model", - "status": "QUEUED", - "job_type": "job_type", - "created_at": 1633046400000, - "modified_at": 1633046400000, - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "object": "job", - "integrations": [], - } - ) - - -def mock_detailed_job_response_payload() -> str: - return orjson.dumps( - { - "id": "job_id", - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "fine_tuned_model": "fine_tuned_model", - "model": "model", - "status": "QUEUED", - "job_type": "job_type", - "created_at": 1633046400000, - "modified_at": 1633046400000, - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "object": "job", - "integrations": [], - "events": [ - { - "name": "event_name", - "created_at": 1633046400000, - } - ], - } - ) - - -def mock_file_response_payload() -> str: - return orjson.dumps( - { - "id": "file_id", - "object": "file", - "bytes": 0, - "created_at": 1633046400000, - "filename": "file.jsonl", - "purpose": "fine-tune", - } - ) - - -def mock_file_deleted_response_payload() -> str: - return orjson.dumps( - { - "id": "file_id", - "object": "file", - "deleted": True, - } - ) - - -def mock_model_deleted_response_payload() -> str: - return orjson.dumps( - { - "id": "model_id", - "object": "model", - "deleted": True, - } - )