
参数说明
输入参数
| 参数 | 类型 | 默认值 | 说明 | 
|---|---|---|---|
| prompt | 字符串 | "" | 详细描述要生成内容的文本提示词 | 
| seed | 整数 | 0 | 最终结果与种子无关,这个参数只是决定是否重新执行 | 
| quality | 选择项 | ”standard” | 图像质量,可选值:“standard”或”hd” | 
| style | 选择项 | ”natural” | 视觉风格,可选值:“natural”或”vivid”。“vivid”使模型倾向于生成超现实和戏剧性图像,“natural”则生成更自然、不那么超现实的图像 | 
| size | 选择项 | ”1024x1024” | 输出图像尺寸,可选值:“1024x1024”、“1024x1792”或”1792x1024” | 
输出参数
| 输出 | 类型 | 说明 | 
|---|---|---|
| IMAGE | 图像 | 生成的图像结果 | 
源码参考
[节点源码 (更新于2025-05-03)]Copy
Ask AI
class OpenAIDalle3(ComfyNodeABC):
    """
    Generates images synchronously via OpenAI's DALL·E 3 endpoint.
    Uses the proxy at /proxy/openai/images/generations. Returned URLs are short‑lived,
    so download or cache results if you need to keep them.
    """
    def __init__(self):
        pass
    @classmethod
    def INPUT_TYPES(cls) -> InputTypeDict:
        return {
            "required": {
                "prompt": (
                    IO.STRING,
                    {
                        "multiline": True,
                        "default": "",
                        "tooltip": "Text prompt for DALL·E",
                    },
                ),
            },
            "optional": {
                "seed": (
                    IO.INT,
                    {
                        "default": 0,
                        "min": 0,
                        "max": 2**31 - 1,
                        "step": 1,
                        "display": "number",
                        "control_after_generate": True,
                        "tooltip": "not implemented yet in backend",
                    },
                ),
                "quality": (
                    IO.COMBO,
                    {
                        "options": ["standard", "hd"],
                        "default": "standard",
                        "tooltip": "Image quality",
                    },
                ),
                "style": (
                    IO.COMBO,
                    {
                        "options": ["natural", "vivid"],
                        "default": "natural",
                        "tooltip": "Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images.",
                    },
                ),
                "size": (
                    IO.COMBO,
                    {
                        "options": ["1024x1024", "1024x1792", "1792x1024"],
                        "default": "1024x1024",
                        "tooltip": "Image size",
                    },
                ),
            },
            "hidden": {"auth_token": "AUTH_TOKEN_COMFY_ORG"},
        }
    RETURN_TYPES = (IO.IMAGE,)
    FUNCTION = "api_call"
    CATEGORY = "api node/image/openai"
    DESCRIPTION = cleandoc(__doc__ or "")
    API_NODE = True
    def api_call(
        self,
        prompt,
        seed=0,
        style="natural",
        quality="standard",
        size="1024x1024",
        auth_token=None,
    ):
        model = "dall-e-3"
        # build the operation
        operation = SynchronousOperation(
            endpoint=ApiEndpoint(
                path="/proxy/openai/images/generations",
                method=HttpMethod.POST,
                request_model=OpenAIImageGenerationRequest,
                response_model=OpenAIImageGenerationResponse,
            ),
            request=OpenAIImageGenerationRequest(
                model=model,
                prompt=prompt,
                quality=quality,
                size=size,
                style=style,
                seed=seed,
            ),
            auth_token=auth_token,
        )
        response = operation.execute()
        img_tensor = validate_and_cast_response(response)
        return (img_tensor,)