1 Star 0 Fork 0

suifengpiao/ComfyScript

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
runtime.ipynb 20.23 KB
一键复制 编辑 原始数据 按行查看 历史

ComfyScript Runtime

from comfy_script.runtime import *

# load('http://127.0.0.1:8188/')
load()

# Nodes can only be imported after load()
from comfy_script.runtime.nodes import *
Nodes: 172

Basics

with Workflow():
    model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
    conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
    conditioning2 = CLIPTextEncode('text, watermark', clip)
    latent = EmptyLatentImage(512, 512, 1)
    latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
    image = VAEDecode(latent, vae)
    SaveImage(image, 'ComfyUI')
Queue remaining: 1
100%|██████████████████████████████████████████████████| 20/20
Queue remaining: 0

To control the queue:

queue.cancel_current()
queue.cancel_remaining()
queue.cancel_all()

To wait for the task:

with Workflow(wait=True):
    model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
    conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
    conditioning2 = CLIPTextEncode('text, watermark', clip)
    latent = EmptyLatentImage(512, 512, 1)
    latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
    image = VAEDecode(latent, vae)
    SaveImage(image, 'ComfyUI')

# Or:
with Workflow() as wf:
    model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
    conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
    conditioning2 = CLIPTextEncode('text, watermark', clip)
    latent = EmptyLatentImage(512, 512, 1)
    latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
    image = VAEDecode(latent, vae)
    SaveImage(image, 'ComfyUI')
wf.task.wait()

# Or with await:
# await wf.task
Queue remaining: 1
Queue remaining: 0

Without Workflow

model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
save = SaveImage(image, 'ComfyUI')

task = queue.put(save)
print(task)

# To wait the task:
# task.wait()
# or:
# await task
Task 39 (315e8ccd-63eb-48ad-8818-8f22d15520de)

Or use Workflow but not use with:

wf = Workflow()
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
wf += SaveImage(image, 'ComfyUI')

task = queue.put(wf)
print(task)
Task 40 (7fa2acca-0ecb-41d1-be73-5450200805e5)

Or just await or wait():

model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
await SaveImage(image, 'ComfyUI')
# or:
# SaveImage(image, 'ComfyUI').wait()

Select and process

import ipywidgets as widgets

queue.watch_display(False)

latents = []
image_batches = []
with Workflow():
    seed = 0
    pos = 'sky, 1girl, smile'
    neg = 'embedding:easynegative'
    model, clip, vae = CheckpointLoaderSimple(Checkpoints.AOM3A1B_orangemixs)
    model2, clip2, vae2 = CheckpointLoaderSimple(Checkpoints.CounterfeitV25_25)
    for color in 'red', 'green', 'blue':
        latent = EmptyLatentImage(440, 640)
        latent = KSampler(model, seed, steps=15, cfg=6, sampler_name='uni_pc',
                          positive=CLIPTextEncode(f'{color}, {pos}', clip), negative=CLIPTextEncode(neg, clip),
                          latent_image=latent)
        latents.append(latent)
        image_batches.append(SaveImage(VAEDecode(latent, vae), f'{seed} {color}'))

grid = widgets.GridspecLayout(1, len(image_batches))
for i, image_batch in enumerate(image_batches):
    image_batch = image_batch.wait()
    image = widgets.Image(value=image_batch[0]._repr_png_())

    button = widgets.Button(description=f'Hires fix {i}')
    def hiresfix(button, i=i):
        print(f'Image {i} is chosen')
        with Workflow():
            latent = LatentUpscaleBy(latents[i], scale_by=2)
            latent = KSampler(model2, seed, steps=15, cfg=6, sampler_name='uni_pc',
                            positive=CLIPTextEncode(pos, clip2), negative=CLIPTextEncode(neg, clip2),
                            latent_image=latent, denoise=0.6)
            image_batch = SaveImage(VAEDecode(latent, vae2), f'{seed} hires')
        display(image_batch.wait())
    button.on_click(hiresfix)

    grid[0, i] = widgets.VBox(children=(image, button))
display(grid)

Workflow generation

with Workflow(queue=False) as wf:
    model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
    conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
    conditioning2 = CLIPTextEncode('text, watermark', clip)
    latent = EmptyLatentImage(512, 512, 1)
    latent = KSampler(model, 123, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
    SaveImage(VAEDecode(latent, vae), '0')
    for i in range(5):
        latent = KSampler(model, 123, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 0.8)
        SaveImage(VAEDecode(latent, vae), f'{i}')

json = wf.api_format_json()
with open('prompt.json', 'w') as f:
    f.write(json)

print(json)
{
  "0": {
    "inputs": {
      "ckpt_name": "v1-5-pruned-emaonly.ckpt"
    },
    "class_type": "CheckpointLoaderSimple"
  },
  "1": {
    "inputs": {
      "text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
      "clip": [
        "0",
        1
      ]
    },
    "class_type": "CLIPTextEncode"
  },
  "2": {
    "inputs": {
      "text": "text, watermark",
      "clip": [
        "0",
        1
      ]
    },
    "class_type": "CLIPTextEncode"
  },
  "3": {
    "inputs": {
      "width": 512,
      "height": 512,
      "batch_size": 1
    },
    "class_type": "EmptyLatentImage"
  },
  "4": {
    "inputs": {
      "seed": 123,
      "steps": 20,
      "cfg": 8,
      "sampler_name": "euler",
      "scheduler": "normal",
      "denoise": 1,
      "model": [
        "0",
        0
      ],
      "positive": [
        "1",
        0
      ],
      "negative": [
        "2",
        0
      ],
      "latent_image": [
        "3",
        0
      ]
    },
    "class_type": "KSampler"
  },
  "5": {
    "inputs": {
      "samples": [
        "4",
        0
      ],
      "vae": [
        "0",
        2
      ]
    },
    "class_type": "VAEDecode"
  },
  "6": {
    "inputs": {
      "filename_prefix": "0",
      "images": [
        "5",
        0
      ]
    },
    "class_type": "SaveImage"
  },
  "7": {
    "inputs": {
      "seed": 123,
      "steps": 20,
      "cfg": 8,
      "sampler_name": "euler",
      "scheduler": "normal",
      "denoise": 0.8,
      "model": [
        "0",
        0
      ],
      "positive": [
        "1",
        0
      ],
      "negative": [
        "2",
        0
      ],
      "latent_image": [
        "4",
        0
      ]
    },
    "class_type": "KSampler"
  },
  "8": {
    "inputs": {
      "samples": [
        "7",
        0
      ],
      "vae": [
        "0",
        2
      ]
    },
    "class_type": "VAEDecode"
  },
  "9": {
    "inputs": {
      "filename_prefix": "0",
      "images": [
        "8",
        0
      ]
    },
    "class_type": "SaveImage"
  },
  "10": {
    "inputs": {
      "seed": 123,
      "steps": 20,
      "cfg": 8,
      "sampler_name": "euler",
      "scheduler": "normal",
      "denoise": 0.8,
      "model": [
        "0",
        0
      ],
      "positive": [
        "1",
        0
      ],
      "negative": [
        "2",
        0
      ],
      "latent_image": [
        "7",
        0
      ]
    },
    "class_type": "KSampler"
  },
  "11": {
    "inputs": {
      "samples": [
        "10",
        0
      ],
      "vae": [
        "0",
        2
      ]
    },
    "class_type": "VAEDecode"
  },
  "12": {
    "inputs": {
      "filename_prefix": "1",
      "images": [
        "11",
        0
      ]
    },
    "class_type": "SaveImage"
  },
  "13": {
    "inputs": {
      "seed": 123,
      "steps": 20,
      "cfg": 8,
      "sampler_name": "euler",
      "scheduler": "normal",
      "denoise": 0.8,
      "model": [
        "0",
        0
      ],
      "positive": [
        "1",
        0
      ],
      "negative": [
        "2",
        0
      ],
      "latent_image": [
        "10",
        0
      ]
    },
    "class_type": "KSampler"
  },
  "14": {
    "inputs": {
      "samples": [
        "13",
        0
      ],
      "vae": [
        "0",
        2
      ]
    },
    "class_type": "VAEDecode"
  },
  "15": {
    "inputs": {
      "filename_prefix": "2",
      "images": [
        "14",
        0
      ]
    },
    "class_type": "SaveImage"
  },
  "16": {
    "inputs": {
      "seed": 123,
      "steps": 20,
      "cfg": 8,
      "sampler_name": "euler",
      "scheduler": "normal",
      "denoise": 0.8,
      "model": [
        "0",
        0
      ],
      "positive": [
        "1",
        0
      ],
      "negative": [
        "2",
        0
      ],
      "latent_image": [
        "13",
        0
      ]
    },
    "class_type": "KSampler"
  },
  "17": {
    "inputs": {
      "samples": [
        "16",
        0
      ],
      "vae": [
        "0",
        2
      ]
    },
    "class_type": "VAEDecode"
  },
  "18": {
    "inputs": {
      "filename_prefix": "3",
      "images": [
        "17",
        0
      ]
    },
    "class_type": "SaveImage"
  },
  "19": {
    "inputs": {
      "seed": 123,
      "steps": 20,
      "cfg": 8,
      "sampler_name": "euler",
      "scheduler": "normal",
      "denoise": 0.8,
      "model": [
        "0",
        0
      ],
      "positive": [
        "1",
        0
      ],
      "negative": [
        "2",
        0
      ],
      "latent_image": [
        "16",
        0
      ]
    },
    "class_type": "KSampler"
  },
  "20": {
    "inputs": {
      "samples": [
        "19",
        0
      ],
      "vae": [
        "0",
        2
      ]
    },
    "class_type": "VAEDecode"
  },
  "21": {
    "inputs": {
      "filename_prefix": "4",
      "images": [
        "20",
        0
      ]
    },
    "class_type": "SaveImage"
  }
}

Real mode

from comfy_script.runtime.real import *
load()
from comfy_script.runtime.real.nodes import *
with Workflow():
    model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
    conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
    conditioning2 = CLIPTextEncode('text, watermark', clip)
    latent = EmptyLatentImage(512, 512, 1)
    latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
    image = VAEDecode(latent, vae)
    SaveImage(image, 'ComfyUI')
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/piaoddang/ComfyScript.git
git@gitee.com:piaoddang/ComfyScript.git
piaoddang
ComfyScript
ComfyScript
main

搜索帮助