From e33f3f40a45bf16881727a370f136ffed561820b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 13:54:31 +0100 Subject: [PATCH] ci: regenerated with Speakeasy CLI v1.434.0 (#682) Co-authored-by: speakeasybot --- .speakeasy/workflow.lock | 4 +- .speakeasy/workflow.yaml | 22 +- ai/api-reference/gateway.openapi.yaml | 980 ++++++++++++++++++++++---- 3 files changed, 854 insertions(+), 152 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 624a486d..ca0aa41f 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,8 +1,8 @@ -speakeasyVersion: 1.421.2 +speakeasyVersion: 1.434.0 sources: livepeer-studio-api: sourceNamespace: livepeer-studio-api - sourceRevisionDigest: sha256:c4cbdf63438b7eaf18a0cbeb0befea660afdbda164c0a650dd49167915ebc916 + sourceRevisionDigest: sha256:90670a3202ad2a0a98eb70ff0f4d74e69aa6505944d747dcaca8e1a65eb10de7 sourceBlobDigest: sha256:ecce8bedb74bfc0a0b642e8816d5ffdb58115ecc368b64d8bee1380ab5ddc2ea tags: - latest diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index c4be9c1d..80caa884 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,6 +1,16 @@ workflowVersion: 1.0.0 speakeasyVersion: latest sources: + livepeer-ai-api: + inputs: + - location: https://raw.githubusercontent.com/livepeer/ai-worker/main/runner/gateway.openapi.yaml + overlays: + - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-js/main/codeSamples.yaml + - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-go/main/codeSamples.yaml + - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-python/main/codeSamples.yaml + output: ai/api-reference/gateway.openapi.yaml + registry: + location: registry.speakeasyapi.dev/livepeer/livepeer-ai/livepeer-ai-oas livepeer-studio-api: inputs: - location: https://raw.githubusercontent.com/livepeer/studio/master/packages/api/src/schema/api-schema.yaml @@ -9,17 +19,7 @@ sources: - location: https://raw.githubusercontent.com/livepeer/livepeer-js/main/codeSamples.yaml - location: https://raw.githubusercontent.com/livepeer/livepeer-go/main/codeSamples.yaml - location: https://raw.githubusercontent.com/livepeer/livepeer-python/main/codeSamples.yaml - registry: - location: registry.speakeasyapi.dev/livepeer/livepeer-studio/livepeer-studio-api output: openapi.yaml - livepeer-ai-api: - inputs: - - location: https://raw.githubusercontent.com/livepeer/ai-worker/main/runner/gateway.openapi.yaml - overlays: - - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-js/main/codeSamples.yaml - - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-go/main/codeSamples.yaml - - location: https://raw.githubusercontent.com/livepeer/livepeer-ai-python/main/codeSamples.yaml registry: - location: registry.speakeasyapi.dev/livepeer/livepeer-ai/livepeer-ai-oas - output: ai/api-reference/gateway.openapi.yaml + location: registry.speakeasyapi.dev/livepeer/livepeer-studio/livepeer-studio-api targets: {} diff --git a/ai/api-reference/gateway.openapi.yaml b/ai/api-reference/gateway.openapi.yaml index 4bc3f778..b4691cde 100644 --- a/ai/api-reference/gateway.openapi.yaml +++ b/ai/api-reference/gateway.openapi.yaml @@ -5,15 +5,15 @@ info: description: An application to run AI pipelines version: 0.0.0 servers: -- url: https://dream-gateway.livepeer.cloud - description: Livepeer Cloud Community Gateway -- url: https://livepeer.studio/api/beta/generate - description: Livepeer Studio Gateway + - url: https://dream-gateway.livepeer.cloud + description: Livepeer Cloud Community Gateway + - url: https://livepeer.studio/api/beta/generate + description: Livepeer Studio Gateway paths: /text-to-image: post: tags: - - generate + - generate summary: Text To Image description: Generate images from text prompts. operationId: genTextToImage @@ -56,12 +56,76 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: textToImage + x-codeSamples: + - lang: typescript + label: genTextToImage + source: |- + import { Livepeer } from "@livepeer/ai"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.textToImage({ + prompt: "", + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genTextToImage + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + ctx := context.Background() + res, err := s.Generate.TextToImage(ctx, components.TextToImageParams{ + Prompt: "", + }) + if err != nil { + log.Fatal(err) + } + if res.ImageResponse != nil { + // handle response + } + } + - lang: python + label: genTextToImage + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.text_to_image(request={ + "prompt": "", + }) + + if res.image_response is not None: + # handle response + pass /image-to-image: post: tags: - - generate + - generate summary: Image To Image description: Apply image transformations to a provided image. operationId: genImageToImage @@ -104,12 +168,92 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: imageToImage + x-codeSamples: + - lang: typescript + label: genImageToImage + source: |- + import { Livepeer } from "@livepeer/ai"; + import { openAsBlob } from "node:fs"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.imageToImage({ + prompt: "", + image: await openAsBlob("example.file"), + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genImageToImage + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "os" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + content, fileErr := os.Open("example.file") + if fileErr != nil { + panic(fileErr) + } + + ctx := context.Background() + res, err := s.Generate.ImageToImage(ctx, components.BodyGenImageToImage{ + Prompt: "", + Image: components.Image{ + FileName: "example.file", + Content: content, + }, + }) + if err != nil { + log.Fatal(err) + } + if res.ImageResponse != nil { + // handle response + } + } + - lang: python + label: genImageToImage + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.image_to_image(request={ + "prompt": "", + "image": { + "file_name": "example.file", + "content": open("example.file", "rb"), + }, + }) + + if res.image_response is not None: + # handle response + pass /image-to-video: post: tags: - - generate + - generate summary: Image To Video description: Generate a video from a provided image. operationId: genImageToVideo @@ -152,12 +296,89 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: imageToVideo + x-codeSamples: + - lang: typescript + label: genImageToVideo + source: |- + import { Livepeer } from "@livepeer/ai"; + import { openAsBlob } from "node:fs"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.imageToVideo({ + image: await openAsBlob("example.file"), + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genImageToVideo + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "os" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + content, fileErr := os.Open("example.file") + if fileErr != nil { + panic(fileErr) + } + + ctx := context.Background() + res, err := s.Generate.ImageToVideo(ctx, components.BodyGenImageToVideo{ + Image: components.BodyGenImageToVideoImage{ + FileName: "example.file", + Content: content, + }, + }) + if err != nil { + log.Fatal(err) + } + if res.VideoResponse != nil { + // handle response + } + } + - lang: python + label: genImageToVideo + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.image_to_video(request={ + "image": { + "file_name": "example.file", + "content": open("example.file", "rb"), + }, + }) + + if res.video_response is not None: + # handle response + pass /upscale: post: tags: - - generate + - generate summary: Upscale description: Upscale an image by increasing its resolution. operationId: genUpscale @@ -200,12 +421,92 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: upscale + x-codeSamples: + - lang: typescript + label: genUpscale + source: |- + import { Livepeer } from "@livepeer/ai"; + import { openAsBlob } from "node:fs"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.upscale({ + prompt: "", + image: await openAsBlob("example.file"), + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genUpscale + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "os" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + content, fileErr := os.Open("example.file") + if fileErr != nil { + panic(fileErr) + } + + ctx := context.Background() + res, err := s.Generate.Upscale(ctx, components.BodyGenUpscale{ + Prompt: "", + Image: components.BodyGenUpscaleImage{ + FileName: "example.file", + Content: content, + }, + }) + if err != nil { + log.Fatal(err) + } + if res.ImageResponse != nil { + // handle response + } + } + - lang: python + label: genUpscale + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.upscale(request={ + "prompt": "", + "image": { + "file_name": "example.file", + "content": open("example.file", "rb"), + }, + }) + + if res.image_response is not None: + # handle response + pass /audio-to-text: post: tags: - - generate + - generate summary: Audio To Text description: Transcribe audio files to text. operationId: genAudioToText @@ -260,12 +561,89 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: audioToText + x-codeSamples: + - lang: typescript + label: genAudioToText + source: |- + import { Livepeer } from "@livepeer/ai"; + import { openAsBlob } from "node:fs"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.audioToText({ + audio: await openAsBlob("example.file"), + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genAudioToText + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "os" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + content, fileErr := os.Open("example.file") + if fileErr != nil { + panic(fileErr) + } + + ctx := context.Background() + res, err := s.Generate.AudioToText(ctx, components.BodyGenAudioToText{ + Audio: components.Audio{ + FileName: "example.file", + Content: content, + }, + }) + if err != nil { + log.Fatal(err) + } + if res.TextResponse != nil { + // handle response + } + } + - lang: python + label: genAudioToText + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.audio_to_text(request={ + "audio": { + "file_name": "example.file", + "content": open("example.file", "rb"), + }, + }) + + if res.text_response is not None: + # handle response + pass /segment-anything-2: post: tags: - - generate + - generate summary: Segment Anything 2 description: Segment objects in an image. operationId: genSegmentAnything2 @@ -308,12 +686,89 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: segmentAnything2 + x-codeSamples: + - lang: typescript + label: genSegmentAnything2 + source: |- + import { Livepeer } from "@livepeer/ai"; + import { openAsBlob } from "node:fs"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.segmentAnything2({ + image: await openAsBlob("example.file"), + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genSegmentAnything2 + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "os" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + content, fileErr := os.Open("example.file") + if fileErr != nil { + panic(fileErr) + } + + ctx := context.Background() + res, err := s.Generate.SegmentAnything2(ctx, components.BodyGenSegmentAnything2{ + Image: components.BodyGenSegmentAnything2Image{ + FileName: "example.file", + Content: content, + }, + }) + if err != nil { + log.Fatal(err) + } + if res.MasksResponse != nil { + // handle response + } + } + - lang: python + label: genSegmentAnything2 + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.segment_anything2(request={ + "image": { + "file_name": "example.file", + "content": open("example.file", "rb"), + }, + }) + + if res.masks_response is not None: + # handle response + pass /llm: post: tags: - - generate + - generate summary: LLM description: Generate text using a language model. operationId: genLLM @@ -355,12 +810,76 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: llm + x-codeSamples: + - lang: typescript + label: genLLM + source: |- + import { Livepeer } from "@livepeer/ai"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.llm({ + prompt: "", + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genLLM + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + ctx := context.Background() + res, err := s.Generate.Llm(ctx, components.BodyGenLLM{ + Prompt: "", + }) + if err != nil { + log.Fatal(err) + } + if res.LLMResponse != nil { + // handle response + } + } + - lang: python + label: genLLM + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.llm(request={ + "prompt": "", + }) + + if res.llm_response is not None: + # handle response + pass /image-to-text: post: tags: - - generate + - generate summary: Image To Text description: Transform image files to text. operationId: genImageToText @@ -409,12 +928,89 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: imageToText + x-codeSamples: + - lang: typescript + label: genImageToText + source: |- + import { Livepeer } from "@livepeer/ai"; + import { openAsBlob } from "node:fs"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.imageToText({ + image: await openAsBlob("example.file"), + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genImageToText + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "os" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + content, fileErr := os.Open("example.file") + if fileErr != nil { + panic(fileErr) + } + + ctx := context.Background() + res, err := s.Generate.ImageToText(ctx, components.BodyGenImageToText{ + Image: components.BodyGenImageToTextImage{ + FileName: "example.file", + Content: content, + }, + }) + if err != nil { + log.Fatal(err) + } + if res.ImageToTextResponse != nil { + // handle response + } + } + - lang: python + label: genImageToText + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.image_to_text(request={ + "image": { + "file_name": "example.file", + "content": open("example.file", "rb"), + }, + }) + + if res.image_to_text_response is not None: + # handle response + pass /live-video-to-video: post: tags: - - generate + - generate summary: Video To Video description: Apply video-like transformations to a provided image. operationId: genLiveVideoToVideo @@ -430,6 +1026,7 @@ paths: content: application/json: schema: + $ref: '#/components/schemas/LiveVideoToVideoResponse' x-speakeasy-name-override: data '400': description: Bad Request @@ -456,15 +1053,81 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: liveVideoToVideo + x-codeSamples: + - lang: typescript + label: genLiveVideoToVideo + source: |- + import { Livepeer } from "@livepeer/ai"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.liveVideoToVideo({ + subscribeUrl: "https://soulful-lava.org/", + publishUrl: "https://vain-tabletop.biz", + }); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genLiveVideoToVideo + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + ctx := context.Background() + res, err := s.Generate.LiveVideoToVideo(ctx, components.LiveVideoToVideoParams{ + SubscribeURL: "https://soulful-lava.org/", + PublishURL: "https://vain-tabletop.biz", + }) + if err != nil { + log.Fatal(err) + } + if res.LiveVideoToVideoResponse != nil { + // handle response + } + } + - lang: python + label: genLiveVideoToVideo + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.live_video_to_video(request={ + "subscribe_url": "https://soulful-lava.org/", + "publish_url": "https://vain-tabletop.biz", + }) + + if res.live_video_to_video_response is not None: + # handle response + pass /text-to-speech: post: tags: - - generate + - generate summary: Text To Speech - description: Generate a text-to-speech audio file based on the provided text - input and speaker description. + description: Generate a text-to-speech audio file based on the provided text input and speaker description. operationId: genTextToSpeech requestBody: content: @@ -505,8 +1168,66 @@ paths: schema: $ref: '#/components/schemas/HTTPValidationError' security: - - HTTPBearer: [] + - HTTPBearer: [] x-speakeasy-name-override: textToSpeech + x-codeSamples: + - lang: typescript + label: genTextToSpeech + source: |- + import { Livepeer } from "@livepeer/ai"; + + const livepeer = new Livepeer({ + httpBearer: "", + }); + + async function run() { + const result = await livepeer.generate.textToSpeech({}); + + // Handle the result + console.log(result); + } + + run(); + - lang: go + label: genTextToSpeech + source: |- + package main + + import( + livepeeraigo "github.com/livepeer/livepeer-ai-go" + "context" + "github.com/livepeer/livepeer-ai-go/models/components" + "log" + ) + + func main() { + s := livepeeraigo.New( + livepeeraigo.WithSecurity(""), + ) + + ctx := context.Background() + res, err := s.Generate.TextToSpeech(ctx, components.TextToSpeechParams{}) + if err != nil { + log.Fatal(err) + } + if res.AudioResponse != nil { + // handle response + } + } + - lang: python + label: genTextToSpeech + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.text_to_speech(request={}) + + if res.audio_response is not None: + # handle response + pass components: schemas: APIError: @@ -517,18 +1238,18 @@ components: description: The error message. type: object required: - - msg + - msg title: APIError description: API error response model. AudioResponse: properties: audio: allOf: - - $ref: '#/components/schemas/MediaURL' + - $ref: '#/components/schemas/MediaURL' description: The generated audio. type: object required: - - audio + - audio title: AudioResponse description: Response model for audio generation. Body_genAudioToText: @@ -546,15 +1267,12 @@ components: return_timestamps: type: string title: Return Timestamps - description: 'Return timestamps for the transcribed text. Supported values: - ''sentence'', ''word'', or a string boolean (''true'' or ''false''). Default - is ''true'' (''sentence''). ''false'' means no timestamps. ''word'' means - word-based timestamps.' + description: 'Return timestamps for the transcribed text. Supported values: ''sentence'', ''word'', or a string boolean (''true'' or ''false''). Default is ''true'' (''sentence''). ''false'' means no timestamps. ''word'' means word-based timestamps.' default: 'true' type: object required: - - audio - - model_id + - audio + - model_id title: Body_genAudioToText Body_genImageToImage: properties: @@ -575,39 +1293,32 @@ components: loras: type: string title: Loras - description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight - for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, - "nerijs/pixel-art-xl": 1.2}.' + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}.' default: '' strength: type: number title: Strength - description: Degree of transformation applied to the reference image (0 - to 1). + description: Degree of transformation applied to the reference image (0 to 1). default: 0.8 guidance_scale: type: number title: Guidance Scale - description: Encourages model to generate images closely linked to the text - prompt (higher values may reduce image quality). + description: Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). default: 7.5 image_guidance_scale: type: number title: Image Guidance Scale - description: Degree to which the generated image is pushed towards the initial - image. + description: Degree to which the generated image is pushed towards the initial image. default: 1.5 negative_prompt: type: string title: Negative Prompt - description: Text prompt(s) to guide what to exclude from image generation. - Ignored if guidance_scale < 1. + description: Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. default: '' safety_check: type: boolean title: Safety Check - description: Perform a safety check to estimate if generated images could - be offensive or harmful. + description: Perform a safety check to estimate if generated images could be offensive or harmful. default: true seed: type: integer @@ -616,8 +1327,7 @@ components: num_inference_steps: type: integer title: Num Inference Steps - description: Number of denoising steps. More steps usually lead to higher - quality images but slower inference. Modulated by strength. + description: Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. default: 100 num_images_per_prompt: type: integer @@ -626,9 +1336,9 @@ components: default: 1 type: object required: - - prompt - - image - - model_id + - prompt + - image + - model_id title: Body_genImageToImage Body_genImageToText: properties: @@ -649,8 +1359,8 @@ components: default: '' type: object required: - - image - - model_id + - image + - model_id title: Body_genImageToText Body_genImageToVideo: properties: @@ -682,20 +1392,17 @@ components: motion_bucket_id: type: integer title: Motion Bucket Id - description: Used for conditioning the amount of motion for the generation. - The higher the number the more motion will be in the video. + description: Used for conditioning the amount of motion for the generation. The higher the number the more motion will be in the video. default: 127 noise_aug_strength: type: number title: Noise Aug Strength - description: Amount of noise added to the conditioning image. Higher values - reduce resemblance to the conditioning image and increase motion. + description: Amount of noise added to the conditioning image. Higher values reduce resemblance to the conditioning image and increase motion. default: 0.02 safety_check: type: boolean title: Safety Check - description: Perform a safety check to estimate if generated images could - be offensive or harmful. + description: Perform a safety check to estimate if generated images could be offensive or harmful. default: true seed: type: integer @@ -704,13 +1411,12 @@ components: num_inference_steps: type: integer title: Num Inference Steps - description: Number of denoising steps. More steps usually lead to higher - quality images but slower inference. Modulated by strength. + description: Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. default: 25 type: object required: - - image - - model_id + - image + - model_id title: Body_genImageToVideo Body_genLLM: properties: @@ -743,8 +1449,8 @@ components: default: false type: object required: - - prompt - - model_id + - prompt + - model_id title: Body_genLLM Body_genSegmentAnything2: properties: @@ -761,45 +1467,38 @@ components: point_coords: type: string title: Point Coords - description: Nx2 array of point prompts to the model, where each point is - in (X,Y) in pixels. + description: Nx2 array of point prompts to the model, where each point is in (X,Y) in pixels. point_labels: type: string title: Point Labels - description: Labels for the point prompts, where 1 indicates a foreground - point and 0 indicates a background point. + description: Labels for the point prompts, where 1 indicates a foreground point and 0 indicates a background point. box: type: string title: Box - description: A length 4 array given as a box prompt to the model, in XYXY - format. + description: A length 4 array given as a box prompt to the model, in XYXY format. mask_input: type: string title: Mask Input - description: A low-resolution mask input to the model, typically from a - previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). + description: A low-resolution mask input to the model, typically from a previous prediction iteration, with the form 1xHxW (H=W=256 for SAM). multimask_output: type: boolean title: Multimask Output - description: If true, the model will return three masks for ambiguous input - prompts, often producing better masks than a single prediction. + description: If true, the model will return three masks for ambiguous input prompts, often producing better masks than a single prediction. default: true return_logits: type: boolean title: Return Logits - description: If true, returns un-thresholded mask logits instead of a binary - mask. + description: If true, returns un-thresholded mask logits instead of a binary mask. default: true normalize_coords: type: boolean title: Normalize Coords - description: If true, the point coordinates will be normalized to the range - [0,1], with point_coords expected to be with respect to image dimensions. + description: If true, the point coordinates will be normalized to the range [0,1], with point_coords expected to be with respect to image dimensions. default: true type: object required: - - image - - model_id + - image + - model_id title: Body_genSegmentAnything2 Body_genUpscale: properties: @@ -820,8 +1519,7 @@ components: safety_check: type: boolean title: Safety Check - description: Perform a safety check to estimate if generated images could - be offensive or harmful. + description: Perform a safety check to estimate if generated images could be offensive or harmful. default: true seed: type: integer @@ -830,24 +1528,19 @@ components: num_inference_steps: type: integer title: Num Inference Steps - description: Number of denoising steps. More steps usually lead to higher - quality images but slower inference. Modulated by strength. + description: Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. default: 75 type: object required: - - prompt - - image - - model_id + - prompt + - image + - model_id title: Body_genUpscale Chunk: properties: timestamp: - prefixItems: - - type: number - - type: number + items: {} type: array - maxItems: 2 - minItems: 2 title: Timestamp description: The timestamp of the chunk. text: @@ -856,19 +1549,19 @@ components: description: The text of the chunk. type: object required: - - timestamp - - text + - timestamp + - text title: Chunk description: A chunk of text with a timestamp. HTTPError: properties: detail: allOf: - - $ref: '#/components/schemas/APIError' + - $ref: '#/components/schemas/APIError' description: Detailed error information. type: object required: - - detail + - detail title: HTTPError description: HTTP error response model. HTTPValidationError: @@ -890,7 +1583,7 @@ components: description: The generated images. type: object required: - - images + - images title: ImageResponse description: Response model for image generation. ImageToTextResponse: @@ -901,7 +1594,7 @@ components: description: The generated text. type: object required: - - text + - text title: ImageToTextResponse description: Response model for text generation. LLMResponse: @@ -914,8 +1607,8 @@ components: title: Tokens Used type: object required: - - response - - tokens_used + - response + - tokens_used title: LLMResponse LiveVideoToVideoParams: properties: @@ -936,12 +1629,29 @@ components: type: object title: Params description: Initial parameters for the model. + default: {} type: object required: - - subscribe_url - - publish_url - - model_id + - subscribe_url + - publish_url + - model_id title: LiveVideoToVideoParams + LiveVideoToVideoResponse: + properties: + subscribe_url: + type: string + title: Subscribe Url + description: Source URL of the incoming stream to subscribe to + publish_url: + type: string + title: Publish Url + description: Destination URL of the outgoing stream to publish to + type: object + required: + - subscribe_url + - publish_url + title: LiveVideoToVideoResponse + description: Response model for live video-to-video generation. MasksResponse: properties: masks: @@ -958,9 +1668,9 @@ components: description: The raw, unnormalized predictions (logits) for the masks. type: object required: - - masks - - scores - - logits + - masks + - scores + - logits title: MasksResponse description: Response model for object segmentation. Media: @@ -979,9 +1689,9 @@ components: description: Whether the media was flagged as NSFW. type: object required: - - url - - seed - - nsfw + - url + - seed + - nsfw title: Media description: A media object containing information about the generated media. MediaURL: @@ -992,7 +1702,7 @@ components: description: The URL where the media can be accessed. type: object required: - - url + - url title: MediaURL description: A URL from which media can be accessed. TextResponse: @@ -1009,8 +1719,8 @@ components: description: The generated text chunks. type: object required: - - text - - chunks + - text + - chunks title: TextResponse description: Response model for text generation. TextToImageParams: @@ -1023,15 +1733,12 @@ components: loras: type: string title: Loras - description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight - for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, - "nerijs/pixel-art-xl": 1.2}.' + description: 'A LoRA (Low-Rank Adaptation) model and its corresponding weight for image generation. Example: { "latent-consistency/lcm-lora-sdxl": 1.0, "nerijs/pixel-art-xl": 1.2}.' default: '' prompt: type: string title: Prompt - description: Text prompt(s) to guide image generation. Separate multiple - prompts with '|' if supported by the model. + description: Text prompt(s) to guide image generation. Separate multiple prompts with '|' if supported by the model. height: type: integer title: Height @@ -1045,20 +1752,17 @@ components: guidance_scale: type: number title: Guidance Scale - description: Encourages model to generate images closely linked to the text - prompt (higher values may reduce image quality). + description: Encourages model to generate images closely linked to the text prompt (higher values may reduce image quality). default: 7.5 negative_prompt: type: string title: Negative Prompt - description: Text prompt(s) to guide what to exclude from image generation. - Ignored if guidance_scale < 1. + description: Text prompt(s) to guide what to exclude from image generation. Ignored if guidance_scale < 1. default: '' safety_check: type: boolean title: Safety Check - description: Perform a safety check to estimate if generated images could - be offensive or harmful. + description: Perform a safety check to estimate if generated images could be offensive or harmful. default: true seed: type: integer @@ -1067,8 +1771,7 @@ components: num_inference_steps: type: integer title: Num Inference Steps - description: Number of denoising steps. More steps usually lead to higher - quality images but slower inference. Modulated by strength. + description: Number of denoising steps. More steps usually lead to higher quality images but slower inference. Modulated by strength. default: 50 num_images_per_prompt: type: integer @@ -1077,8 +1780,8 @@ components: default: 1 type: object required: - - prompt - - model_id + - prompt + - model_id title: TextToImageParams TextToSpeechParams: properties: @@ -1096,19 +1799,18 @@ components: type: string title: Description description: Description of speaker to steer text to speech generation. - default: A male speaker delivers a slightly expressive and animated speech - with a moderate speed and pitch. + default: A male speaker delivers a slightly expressive and animated speech with a moderate speed and pitch. type: object title: TextToSpeechParams required: - - model_id + - model_id ValidationError: properties: loc: items: anyOf: - - type: string - - type: integer + - type: string + - type: integer type: array title: Location msg: @@ -1119,9 +1821,9 @@ components: title: Error Type type: object required: - - loc - - msg - - type + - loc + - msg + - type title: ValidationError VideoResponse: properties: @@ -1133,7 +1835,7 @@ components: description: The generated images. type: object required: - - images + - images title: VideoResponse description: Response model for image generation. securitySchemes: