代码拉取完成,页面将自动刷新
from comfy_script.runtime import *
# load('http://127.0.0.1:8188/')
load()
# Nodes can only be imported after load()
from comfy_script.runtime.nodes import *
Nodes: 172
with Workflow():
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
SaveImage(image, 'ComfyUI')
Queue remaining: 1 100%|██████████████████████████████████████████████████| 20/20 Queue remaining: 0
To control the queue:
queue.cancel_current()
queue.cancel_remaining()
queue.cancel_all()
To wait for the task:
with Workflow(wait=True):
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
SaveImage(image, 'ComfyUI')
# Or:
with Workflow() as wf:
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
SaveImage(image, 'ComfyUI')
wf.task.wait()
# Or with await:
# await wf.task
Queue remaining: 1 Queue remaining: 0
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
save = SaveImage(image, 'ComfyUI')
task = queue.put(save)
print(task)
# To wait the task:
# task.wait()
# or:
# await task
Task 39 (315e8ccd-63eb-48ad-8818-8f22d15520de)
Or use Workflow
but not use with
:
wf = Workflow()
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
wf += SaveImage(image, 'ComfyUI')
task = queue.put(wf)
print(task)
Task 40 (7fa2acca-0ecb-41d1-be73-5450200805e5)
Or just await
or wait()
:
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
await SaveImage(image, 'ComfyUI')
# or:
# SaveImage(image, 'ComfyUI').wait()
import ipywidgets as widgets
queue.watch_display(False)
latents = []
image_batches = []
with Workflow():
seed = 0
pos = 'sky, 1girl, smile'
neg = 'embedding:easynegative'
model, clip, vae = CheckpointLoaderSimple(Checkpoints.AOM3A1B_orangemixs)
model2, clip2, vae2 = CheckpointLoaderSimple(Checkpoints.CounterfeitV25_25)
for color in 'red', 'green', 'blue':
latent = EmptyLatentImage(440, 640)
latent = KSampler(model, seed, steps=15, cfg=6, sampler_name='uni_pc',
positive=CLIPTextEncode(f'{color}, {pos}', clip), negative=CLIPTextEncode(neg, clip),
latent_image=latent)
latents.append(latent)
image_batches.append(SaveImage(VAEDecode(latent, vae), f'{seed} {color}'))
grid = widgets.GridspecLayout(1, len(image_batches))
for i, image_batch in enumerate(image_batches):
image_batch = image_batch.wait()
image = widgets.Image(value=image_batch[0]._repr_png_())
button = widgets.Button(description=f'Hires fix {i}')
def hiresfix(button, i=i):
print(f'Image {i} is chosen')
with Workflow():
latent = LatentUpscaleBy(latents[i], scale_by=2)
latent = KSampler(model2, seed, steps=15, cfg=6, sampler_name='uni_pc',
positive=CLIPTextEncode(pos, clip2), negative=CLIPTextEncode(neg, clip2),
latent_image=latent, denoise=0.6)
image_batch = SaveImage(VAEDecode(latent, vae2), f'{seed} hires')
display(image_batch.wait())
button.on_click(hiresfix)
grid[0, i] = widgets.VBox(children=(image, button))
display(grid)
with Workflow(queue=False) as wf:
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 123, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
SaveImage(VAEDecode(latent, vae), '0')
for i in range(5):
latent = KSampler(model, 123, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 0.8)
SaveImage(VAEDecode(latent, vae), f'{i}')
json = wf.api_format_json()
with open('prompt.json', 'w') as f:
f.write(json)
print(json)
{ "0": { "inputs": { "ckpt_name": "v1-5-pruned-emaonly.ckpt" }, "class_type": "CheckpointLoaderSimple" }, "1": { "inputs": { "text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,", "clip": [ "0", 1 ] }, "class_type": "CLIPTextEncode" }, "2": { "inputs": { "text": "text, watermark", "clip": [ "0", 1 ] }, "class_type": "CLIPTextEncode" }, "3": { "inputs": { "width": 512, "height": 512, "batch_size": 1 }, "class_type": "EmptyLatentImage" }, "4": { "inputs": { "seed": 123, "steps": 20, "cfg": 8, "sampler_name": "euler", "scheduler": "normal", "denoise": 1, "model": [ "0", 0 ], "positive": [ "1", 0 ], "negative": [ "2", 0 ], "latent_image": [ "3", 0 ] }, "class_type": "KSampler" }, "5": { "inputs": { "samples": [ "4", 0 ], "vae": [ "0", 2 ] }, "class_type": "VAEDecode" }, "6": { "inputs": { "filename_prefix": "0", "images": [ "5", 0 ] }, "class_type": "SaveImage" }, "7": { "inputs": { "seed": 123, "steps": 20, "cfg": 8, "sampler_name": "euler", "scheduler": "normal", "denoise": 0.8, "model": [ "0", 0 ], "positive": [ "1", 0 ], "negative": [ "2", 0 ], "latent_image": [ "4", 0 ] }, "class_type": "KSampler" }, "8": { "inputs": { "samples": [ "7", 0 ], "vae": [ "0", 2 ] }, "class_type": "VAEDecode" }, "9": { "inputs": { "filename_prefix": "0", "images": [ "8", 0 ] }, "class_type": "SaveImage" }, "10": { "inputs": { "seed": 123, "steps": 20, "cfg": 8, "sampler_name": "euler", "scheduler": "normal", "denoise": 0.8, "model": [ "0", 0 ], "positive": [ "1", 0 ], "negative": [ "2", 0 ], "latent_image": [ "7", 0 ] }, "class_type": "KSampler" }, "11": { "inputs": { "samples": [ "10", 0 ], "vae": [ "0", 2 ] }, "class_type": "VAEDecode" }, "12": { "inputs": { "filename_prefix": "1", "images": [ "11", 0 ] }, "class_type": "SaveImage" }, "13": { "inputs": { "seed": 123, "steps": 20, "cfg": 8, "sampler_name": "euler", "scheduler": "normal", "denoise": 0.8, "model": [ "0", 0 ], "positive": [ "1", 0 ], "negative": [ "2", 0 ], "latent_image": [ "10", 0 ] }, "class_type": "KSampler" }, "14": { "inputs": { "samples": [ "13", 0 ], "vae": [ "0", 2 ] }, "class_type": "VAEDecode" }, "15": { "inputs": { "filename_prefix": "2", "images": [ "14", 0 ] }, "class_type": "SaveImage" }, "16": { "inputs": { "seed": 123, "steps": 20, "cfg": 8, "sampler_name": "euler", "scheduler": "normal", "denoise": 0.8, "model": [ "0", 0 ], "positive": [ "1", 0 ], "negative": [ "2", 0 ], "latent_image": [ "13", 0 ] }, "class_type": "KSampler" }, "17": { "inputs": { "samples": [ "16", 0 ], "vae": [ "0", 2 ] }, "class_type": "VAEDecode" }, "18": { "inputs": { "filename_prefix": "3", "images": [ "17", 0 ] }, "class_type": "SaveImage" }, "19": { "inputs": { "seed": 123, "steps": 20, "cfg": 8, "sampler_name": "euler", "scheduler": "normal", "denoise": 0.8, "model": [ "0", 0 ], "positive": [ "1", 0 ], "negative": [ "2", 0 ], "latent_image": [ "16", 0 ] }, "class_type": "KSampler" }, "20": { "inputs": { "samples": [ "19", 0 ], "vae": [ "0", 2 ] }, "class_type": "VAEDecode" }, "21": { "inputs": { "filename_prefix": "4", "images": [ "20", 0 ] }, "class_type": "SaveImage" } }
from comfy_script.runtime.real import *
load()
from comfy_script.runtime.real.nodes import *
with Workflow():
model, clip, vae = CheckpointLoaderSimple('v1-5-pruned-emaonly.ckpt')
conditioning = CLIPTextEncode('beautiful scenery nature glass bottle landscape, , purple galaxy bottle,', clip)
conditioning2 = CLIPTextEncode('text, watermark', clip)
latent = EmptyLatentImage(512, 512, 1)
latent = KSampler(model, 156680208700286, 20, 8, 'euler', 'normal', conditioning, conditioning2, latent, 1)
image = VAEDecode(latent, vae)
SaveImage(image, 'ComfyUI')
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。