Your job is to convert a json workflow graph for ai image generation into a typescript function.
- You should define a type for the input, using Zod for validation.
- You should use
.describe
to describe each parameter to the best of your ability. - Filename prefix is always set by the system in a different location.
- Do not extrapolate enum values. Always use the checkpoint value from config and use imported types as demonstrated.
- Use snake_case for multi-word parameters.
- LoadImage inputs will always be accepted as either a url or base64 encoded string
- Only output the typescript, with no additional commentary.
Here is an example output:
import { z } from "zod";
import config from "../config";
let checkpoint: any = config.models.checkpoints.enum.optional();
if (config.warmupCkpt) {
checkpoint = checkpoint.default(config.warmupCkpt);
}
const ComfyNodeSchema = z.object({
inputs: z.any(),
class_type: z.string(),
_meta: z.any().optional(),
});
type ComfyNode = z.infer<typeof ComfyNodeSchema>;
interface Workflow {
RequestSchema: z.ZodObject<any, any>;
generateWorkflow: (input: any) => ComfyPrompt;
description?: string;
summary?: string;
}
const RequestSchema = z.object({
prompt: z.string().describe("The positive prompt for image generation"),
width: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Width of the generated image"),
height: z
.number()
.int()
.min(256)
.max(2048)
.optional()
.default(1024)
.describe("Height of the generated image"),
seed: z
.number()
.int()
.optional()``
.default(() => Math.floor(Math.random() * 1000000000000000))
.describe("Seed for random number generation"),
steps: z
.number()
.int()
.min(1)
.max(100)
.optional()
.default(4)
.describe("Number of sampling steps"),
cfg_scale: z
.number()
.min(0)
.max(20)
.optional()
.default(1)
.describe("Classifier-free guidance scale"),
sampler_name: config.samplers
.optional()
.default("euler")
.describe("Name of the sampler to use"),
scheduler: config.schedulers
.optional()
.default("simple")
.describe("Type of scheduler to use"),
denoise: z
.number()
.min(0)
.max(1)
.optional()
.default(1)
.describe("Denoising strength"),
checkpoint,
});
type InputType = z.infer<typeof RequestSchema>;
function generateWorkflow(input: InputType): ComfyPrompt {
return {
"6": {
inputs: {
text: input.prompt,
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Positive Prompt)",
},
},
"8": {
inputs: {
samples: ["31", 0],
vae: ["30", 2],
},
class_type: "VAEDecode",
_meta: {
title: "VAE Decode",
},
},
"9": {
inputs: {
filename_prefix: "Flux",
images: ["8", 0],
},
class_type: "SaveImage",
_meta: {
title: "Save Image",
},
},
"27": {
inputs: {
width: input.width,
height: input.height,
batch_size: 1,
},
class_type: "EmptySD3LatentImage",
_meta: {
title: "EmptySD3LatentImage",
},
},
"30": {
inputs: {
ckpt_name: input.checkpoint,
},
class_type: "CheckpointLoaderSimple",
_meta: {
title: "Load Checkpoint",
},
},
"31": {
inputs: {
seed: input.seed,
steps: input.steps,
cfg: input.cfg_scale,
sampler_name: input.sampler_name,
scheduler: input.scheduler,
denoise: input.denoise,
model: ["30", 0],
positive: ["6", 0],
negative: ["33", 0],
latent_image: ["27", 0],
},
class_type: "KSampler",
_meta: {
title: "KSampler",
},
},
"33": {
inputs: {
text: "",
clip: ["30", 1],
},
class_type: "CLIPTextEncode",
_meta: {
title: "CLIP Text Encode (Negative Prompt)",
},
},
};
}
const workflow: Workflow = {
RequestSchema,
generateWorkflow,
summary: "Text to Image",
description: "Generate an image from a text prompt",
};
export default workflow;