ComfyUI Deployment for Image Generation

We design and deploy artificial intelligence systems: from prototype to production-ready solutions. Our team combines expertise in machine learning, data engineering and MLOps to make AI work not in the lab, but in real business.
Showing 1 of 1 servicesAll 1566 services
ComfyUI Deployment for Image Generation
Medium
from 1 business day to 3 business days
FAQ
AI Development Areas
AI Solution Development Stages
Latest works
  • image_website-b2b-advance_0.png
    B2B ADVANCE company website development
    1212
  • image_web-applications_feedme_466_0.webp
    Development of a web application for FEEDME
    1161
  • image_websites_belfingroup_462_0.webp
    Website development for BELFINGROUP
    852
  • image_ecommerce_furnoro_435_0.webp
    Development of an online store for the company FURNORO
    1041
  • image_logo-advance_0.png
    B2B Advance company logo design
    561
  • image_crm_enviok_479_0.webp
    Development of a web application for Enviok
    822

Deploy and integrate ComfyUI for image generation

ComfyUI is a node-based interface for Stable Diffusion with full pipeline control. Unlike Automatic1111, ComfyUI offers a more flexible API and supports complex workflows: SDXL + Refiner, ControlNet, IP-Adapter, and AnimateDiff in a single graph.

Installation and configuration

git clone https://github.com/comfyanonymous/ComfyUI
cd ComfyUI
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
pip install -r requirements.txt

# Структура моделей
mkdir -p models/{checkpoints,loras,controlnet,vae,embeddings,upscale_models}

# Запуск с API
python main.py \
    --listen 0.0.0.0 \
    --port 8188 \
    --highvram \
    --preview-method auto

Python API client

import websocket
import json
import urllib.request
import uuid
import io
from PIL import Image

class ComfyUIClient:
    def __init__(self, server: str = "127.0.0.1:8188"):
        self.server = server
        self.client_id = str(uuid.uuid4())

    def queue_prompt(self, workflow: dict) -> str:
        data = json.dumps({"prompt": workflow, "client_id": self.client_id}).encode()
        req = urllib.request.Request(f"http://{self.server}/prompt", data=data)
        return json.loads(urllib.request.urlopen(req).read())["prompt_id"]

    def get_images_ws(self, workflow: dict) -> list[bytes]:
        prompt_id = self.queue_prompt(workflow)
        ws = websocket.WebSocket()
        ws.connect(f"ws://{self.server}/ws?clientId={self.client_id}")

        images = []
        while True:
            msg = ws.recv()
            if isinstance(msg, str):
                data = json.loads(msg)
                if data["type"] == "executing" and data["data"].get("node") is None:
                    break
            else:
                # Бинарное сообщение = изображение (preview)
                images.append(msg[8:])  # первые 8 байт — заголовок

        ws.close()

        # Получаем финальное изображение через HTTP
        history = json.loads(urllib.request.urlopen(
            f"http://{self.server}/history/{prompt_id}"
        ).read())

        output_images = []
        for node_id, node_output in history[prompt_id]["outputs"].items():
            if "images" in node_output:
                for img_info in node_output["images"]:
                    url = (f"http://{self.server}/view?"
                           f"filename={img_info['filename']}&subfolder={img_info['subfolder']}&type={img_info['type']}")
                    output_images.append(urllib.request.urlopen(url).read())

        return output_images

Workflow as code

def build_sdxl_workflow(
    prompt: str,
    negative_prompt: str = "low quality, blurry",
    width: int = 1024,
    height: int = 1024,
    steps: int = 30,
    cfg: float = 7.0,
    seed: int = 42,
    checkpoint: str = "sd_xl_base_1.0.safetensors"
) -> dict:
    return {
        "1": {
            "class_type": "CheckpointLoaderSimple",
            "inputs": {"ckpt_name": checkpoint}
        },
        "2": {
            "class_type": "CLIPTextEncode",
            "inputs": {"text": prompt, "clip": ["1", 1]}
        },
        "3": {
            "class_type": "CLIPTextEncode",
            "inputs": {"text": negative_prompt, "clip": ["1", 1]}
        },
        "4": {
            "class_type": "EmptyLatentImage",
            "inputs": {"width": width, "height": height, "batch_size": 1}
        },
        "5": {
            "class_type": "KSampler",
            "inputs": {
                "model": ["1", 0],
                "positive": ["2", 0],
                "negative": ["3", 0],
                "latent_image": ["4", 0],
                "seed": seed,
                "steps": steps,
                "cfg": cfg,
                "sampler_name": "dpmpp_2m",
                "scheduler": "karras",
                "denoise": 1.0
            }
        },
        "6": {
            "class_type": "VAEDecode",
            "inputs": {"samples": ["5", 0], "vae": ["1", 2]}
        },
        "7": {
            "class_type": "SaveImage",
            "inputs": {"images": ["6", 0], "filename_prefix": "output"}
        }
    }

ControlNet workflow

def build_controlnet_workflow(
    prompt: str,
    control_image_b64: str,
    controlnet_model: str = "control_v11p_sd15_canny.pth",
    strength: float = 0.9
) -> dict:
    base_workflow = build_sdxl_workflow(prompt)
    # Добавляем ControlNet ноды
    base_workflow.update({
        "10": {
            "class_type": "LoadImage",
            "inputs": {"image": control_image_b64}
        },
        "11": {
            "class_type": "ControlNetLoader",
            "inputs": {"control_net_name": controlnet_model}
        },
        "12": {
            "class_type": "ControlNetApply",
            "inputs": {
                "conditioning": base_workflow["2"]["inputs"],
                "control_net": ["11", 0],
                "image": ["10", 0],
                "strength": strength
            }
        }
    })
    return base_workflow

ComfyUI is especially valuable for complex pipelines: generation → upscaling → detailing → face restoring in a single graph without intermediate files. Timeframe: deployment with basic workflows — 1 day. Development of a production API with custom workflows — 3–5 days.