mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-21 21:22:11 -03:00
checkpoint
This commit is contained in:
@@ -2,13 +2,13 @@ from .py.lora_manager import LoraManager
|
|||||||
from .py.nodes.lora_loader import LoraManagerLoader
|
from .py.nodes.lora_loader import LoraManagerLoader
|
||||||
from .py.nodes.trigger_word_toggle import TriggerWordToggle
|
from .py.nodes.trigger_word_toggle import TriggerWordToggle
|
||||||
from .py.nodes.lora_stacker import LoraStacker
|
from .py.nodes.lora_stacker import LoraStacker
|
||||||
# from .py.nodes.save_image import SaveImage
|
from .py.nodes.save_image import SaveImage
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
LoraManagerLoader.NAME: LoraManagerLoader,
|
LoraManagerLoader.NAME: LoraManagerLoader,
|
||||||
TriggerWordToggle.NAME: TriggerWordToggle,
|
TriggerWordToggle.NAME: TriggerWordToggle,
|
||||||
LoraStacker.NAME: LoraStacker,
|
LoraStacker.NAME: LoraStacker,
|
||||||
# SaveImage.NAME: SaveImage
|
SaveImage.NAME: SaveImage
|
||||||
}
|
}
|
||||||
|
|
||||||
WEB_DIRECTORY = "./web/comfyui"
|
WEB_DIRECTORY = "./web/comfyui"
|
||||||
|
|||||||
@@ -1,16 +1,43 @@
|
|||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import re
|
||||||
|
import numpy as np
|
||||||
|
import time
|
||||||
from server import PromptServer # type: ignore
|
from server import PromptServer # type: ignore
|
||||||
|
import folder_paths # type: ignore
|
||||||
|
from ..services.lora_scanner import LoraScanner
|
||||||
|
from ..config import config
|
||||||
|
from ..workflow.parser import WorkflowParser
|
||||||
|
from PIL import Image, PngImagePlugin
|
||||||
|
import piexif
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
class SaveImage:
|
class SaveImage:
|
||||||
NAME = "Save Image (LoraManager)"
|
NAME = "Save Image (LoraManager)"
|
||||||
CATEGORY = "Lora Manager/utils"
|
CATEGORY = "Lora Manager/utils"
|
||||||
DESCRIPTION = "Experimental node to display image preview and print prompt and extra_pnginfo"
|
DESCRIPTION = "Save images with embedded generation metadata in compatible format"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.output_dir = folder_paths.get_output_directory()
|
||||||
|
self.type = "output"
|
||||||
|
self.prefix_append = ""
|
||||||
|
self.compress_level = 4
|
||||||
|
self.counter = 0
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(cls):
|
def INPUT_TYPES(cls):
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"image": ("IMAGE",),
|
"image": ("IMAGE",),
|
||||||
|
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||||
|
"file_format": (["png", "jpeg", "webp"],),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"lossless_webp": ("BOOLEAN", {"default": True}),
|
||||||
|
"quality": ("INT", {"default": 100, "min": 1, "max": 100}),
|
||||||
|
"save_workflow_json": ("BOOLEAN", {"default": False}),
|
||||||
|
"add_counter_to_filename": ("BOOLEAN", {"default": True}),
|
||||||
},
|
},
|
||||||
"hidden": {
|
"hidden": {
|
||||||
"prompt": "PROMPT",
|
"prompt": "PROMPT",
|
||||||
@@ -18,24 +45,252 @@ class SaveImage:
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
RETURN_TYPES = ("IMAGE", "STRING")
|
||||||
RETURN_NAMES = ("image",)
|
RETURN_NAMES = ("image", "filename")
|
||||||
FUNCTION = "process_image"
|
FUNCTION = "process_image"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
def process_image(self, image, prompt=None, extra_pnginfo=None):
|
async def get_lora_hash(self, lora_name):
|
||||||
# Print the prompt information
|
"""Get the lora hash from cache"""
|
||||||
print("SaveImage Node - Prompt:")
|
scanner = await LoraScanner.get_instance()
|
||||||
|
cache = await scanner.get_cached_data()
|
||||||
|
|
||||||
|
for item in cache.raw_data:
|
||||||
|
if item.get('file_name') == lora_name:
|
||||||
|
return item.get('sha256')
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def format_metadata(self, parsed_workflow):
|
||||||
|
"""Format metadata in the requested format similar to userComment example"""
|
||||||
|
if not parsed_workflow:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
# Extract the prompt and negative prompt
|
||||||
|
prompt = parsed_workflow.get('prompt', '')
|
||||||
|
negative_prompt = parsed_workflow.get('negative_prompt', '')
|
||||||
|
|
||||||
|
# Extract loras from the prompt if present
|
||||||
|
loras_text = parsed_workflow.get('loras', '')
|
||||||
|
lora_hashes = {}
|
||||||
|
|
||||||
|
# If loras are found, add them on a new line after the prompt
|
||||||
|
if loras_text:
|
||||||
|
prompt_with_loras = f"{prompt}\n{loras_text}"
|
||||||
|
|
||||||
|
# Extract lora names from the format <lora:name:strength>
|
||||||
|
lora_matches = re.findall(r'<lora:([^:]+):([^>]+)>', loras_text)
|
||||||
|
|
||||||
|
# Get hash for each lora
|
||||||
|
for lora_name, strength in lora_matches:
|
||||||
|
hash_value = await self.get_lora_hash(lora_name)
|
||||||
|
if hash_value:
|
||||||
|
lora_hashes[lora_name] = hash_value
|
||||||
|
else:
|
||||||
|
prompt_with_loras = prompt
|
||||||
|
|
||||||
|
# Format the first part (prompt and loras)
|
||||||
|
metadata_parts = [prompt_with_loras]
|
||||||
|
|
||||||
|
# Add negative prompt
|
||||||
|
if negative_prompt:
|
||||||
|
metadata_parts.append(f"Negative prompt: {negative_prompt}")
|
||||||
|
|
||||||
|
# Format the second part (generation parameters)
|
||||||
|
params = []
|
||||||
|
|
||||||
|
# Add standard parameters in the correct order
|
||||||
|
if 'steps' in parsed_workflow:
|
||||||
|
params.append(f"Steps: {parsed_workflow.get('steps')}")
|
||||||
|
|
||||||
|
if 'sampler' in parsed_workflow:
|
||||||
|
sampler = parsed_workflow.get('sampler')
|
||||||
|
# Convert ComfyUI sampler names to user-friendly names
|
||||||
|
sampler_mapping = {
|
||||||
|
'euler': 'Euler',
|
||||||
|
'euler_ancestral': 'Euler a',
|
||||||
|
'dpm_2': 'DPM2',
|
||||||
|
'dpm_2_ancestral': 'DPM2 a',
|
||||||
|
'heun': 'Heun',
|
||||||
|
'dpm_fast': 'DPM fast',
|
||||||
|
'dpm_adaptive': 'DPM adaptive',
|
||||||
|
'lms': 'LMS',
|
||||||
|
'dpmpp_2s_ancestral': 'DPM++ 2S a',
|
||||||
|
'dpmpp_sde': 'DPM++ SDE',
|
||||||
|
'dpmpp_sde_gpu': 'DPM++ SDE',
|
||||||
|
'dpmpp_2m': 'DPM++ 2M',
|
||||||
|
'dpmpp_2m_sde': 'DPM++ 2M SDE',
|
||||||
|
'dpmpp_2m_sde_gpu': 'DPM++ 2M SDE',
|
||||||
|
'ddim': 'DDIM'
|
||||||
|
}
|
||||||
|
sampler_name = sampler_mapping.get(sampler, sampler)
|
||||||
|
params.append(f"Sampler: {sampler_name}")
|
||||||
|
|
||||||
|
if 'scheduler' in parsed_workflow:
|
||||||
|
scheduler = parsed_workflow.get('scheduler')
|
||||||
|
scheduler_mapping = {
|
||||||
|
'normal': 'Simple',
|
||||||
|
'karras': 'Karras',
|
||||||
|
'exponential': 'Exponential',
|
||||||
|
'sgm_uniform': 'SGM Uniform',
|
||||||
|
'sgm_quadratic': 'SGM Quadratic'
|
||||||
|
}
|
||||||
|
scheduler_name = scheduler_mapping.get(scheduler, scheduler)
|
||||||
|
params.append(f"Schedule type: {scheduler_name}")
|
||||||
|
|
||||||
|
# CFG scale (cfg in parsed_workflow)
|
||||||
|
if 'cfg_scale' in parsed_workflow:
|
||||||
|
params.append(f"CFG scale: {parsed_workflow.get('cfg_scale')}")
|
||||||
|
elif 'cfg' in parsed_workflow:
|
||||||
|
params.append(f"CFG scale: {parsed_workflow.get('cfg')}")
|
||||||
|
|
||||||
|
# Seed
|
||||||
|
if 'seed' in parsed_workflow:
|
||||||
|
params.append(f"Seed: {parsed_workflow.get('seed')}")
|
||||||
|
|
||||||
|
# Size
|
||||||
|
if 'size' in parsed_workflow:
|
||||||
|
params.append(f"Size: {parsed_workflow.get('size')}")
|
||||||
|
|
||||||
|
# Model info
|
||||||
|
if 'checkpoint' in parsed_workflow:
|
||||||
|
# Extract basename without path
|
||||||
|
checkpoint = os.path.basename(parsed_workflow.get('checkpoint', ''))
|
||||||
|
# Remove extension if present
|
||||||
|
checkpoint = os.path.splitext(checkpoint)[0]
|
||||||
|
params.append(f"Model: {checkpoint}")
|
||||||
|
|
||||||
|
# Add LoRA hashes if available
|
||||||
|
if lora_hashes:
|
||||||
|
lora_hash_parts = []
|
||||||
|
for lora_name, hash_value in lora_hashes.items():
|
||||||
|
lora_hash_parts.append(f"{lora_name}: {hash_value}")
|
||||||
|
|
||||||
|
if lora_hash_parts:
|
||||||
|
params.append(f"Lora hashes: \"{', '.join(lora_hash_parts)}\"")
|
||||||
|
|
||||||
|
# Combine all parameters with commas
|
||||||
|
metadata_parts.append(", ".join(params))
|
||||||
|
|
||||||
|
# Join all parts with a new line
|
||||||
|
return "\n".join(metadata_parts)
|
||||||
|
|
||||||
|
def save_images(self, images, filename_prefix, file_format, prompt=None, extra_pnginfo=None,
|
||||||
|
lossless_webp=True, quality=100, save_workflow_json=False, add_counter_to_filename=True):
|
||||||
|
"""Save images with metadata"""
|
||||||
|
results = []
|
||||||
|
|
||||||
|
# Parse the workflow using the WorkflowParser
|
||||||
|
parser = WorkflowParser()
|
||||||
if prompt:
|
if prompt:
|
||||||
print(json.dumps(prompt, indent=2))
|
parsed_workflow = parser.parse_workflow(prompt)
|
||||||
else:
|
else:
|
||||||
print("No prompt information available")
|
parsed_workflow = {}
|
||||||
|
|
||||||
|
# Get or create metadata asynchronously
|
||||||
|
metadata = asyncio.run(self.format_metadata(parsed_workflow))
|
||||||
|
|
||||||
# Print the extra_pnginfo
|
# Process each image
|
||||||
print("\nSaveImage Node - Extra PNG Info:")
|
for i, image in enumerate(images):
|
||||||
if extra_pnginfo:
|
# Convert the tensor image to numpy array
|
||||||
print(json.dumps(extra_pnginfo, indent=2))
|
img = 255. * image.cpu().numpy()
|
||||||
else:
|
img = Image.fromarray(np.clip(img, 0, 255).astype(np.uint8))
|
||||||
print("No extra PNG info available")
|
|
||||||
|
# Generate filename with counter if needed
|
||||||
|
if add_counter_to_filename:
|
||||||
|
filename = f"{filename_prefix}_{self.counter:05d}"
|
||||||
|
self.counter += 1
|
||||||
|
else:
|
||||||
|
filename = f"{filename_prefix}"
|
||||||
|
|
||||||
|
# Set file extension and prepare saving parameters
|
||||||
|
if file_format == "png":
|
||||||
|
filename += ".png"
|
||||||
|
file_extension = ".png"
|
||||||
|
save_kwargs = {"optimize": True, "compress_level": self.compress_level}
|
||||||
|
pnginfo = PngImagePlugin.PngInfo()
|
||||||
|
elif file_format == "jpeg":
|
||||||
|
filename += ".jpg"
|
||||||
|
file_extension = ".jpg"
|
||||||
|
save_kwargs = {"quality": quality, "optimize": True}
|
||||||
|
elif file_format == "webp":
|
||||||
|
filename += ".webp"
|
||||||
|
file_extension = ".webp"
|
||||||
|
save_kwargs = {"quality": quality, "lossless": lossless_webp}
|
||||||
|
|
||||||
|
# Full save path
|
||||||
|
file_path = os.path.join(self.output_dir, filename)
|
||||||
|
|
||||||
|
# Save the image with metadata
|
||||||
|
try:
|
||||||
|
if file_format == "png":
|
||||||
|
if metadata:
|
||||||
|
pnginfo.add_text("parameters", metadata)
|
||||||
|
if save_workflow_json and extra_pnginfo is not None:
|
||||||
|
workflow_json = json.dumps(extra_pnginfo)
|
||||||
|
pnginfo.add_text("workflow", workflow_json)
|
||||||
|
save_kwargs["pnginfo"] = pnginfo
|
||||||
|
img.save(file_path, format="PNG", **save_kwargs)
|
||||||
|
elif file_format == "jpeg":
|
||||||
|
# For JPEG, use piexif
|
||||||
|
if metadata:
|
||||||
|
try:
|
||||||
|
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||||
|
exif_bytes = piexif.dump(exif_dict)
|
||||||
|
save_kwargs["exif"] = exif_bytes
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error adding EXIF data: {e}")
|
||||||
|
img.save(file_path, format="JPEG", **save_kwargs)
|
||||||
|
elif file_format == "webp":
|
||||||
|
# For WebP, also use piexif for metadata
|
||||||
|
if metadata:
|
||||||
|
try:
|
||||||
|
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||||
|
exif_bytes = piexif.dump(exif_dict)
|
||||||
|
save_kwargs["exif"] = exif_bytes
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error adding EXIF data: {e}")
|
||||||
|
img.save(file_path, format="WEBP", **save_kwargs)
|
||||||
|
|
||||||
|
results.append({
|
||||||
|
"filename": filename,
|
||||||
|
"subfolder": "",
|
||||||
|
"type": self.type
|
||||||
|
})
|
||||||
|
|
||||||
|
# Notify UI about saved image
|
||||||
|
PromptServer.instance.send_sync("image", {
|
||||||
|
"filename": filename,
|
||||||
|
"subfolder": "",
|
||||||
|
"type": self.type,
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error saving image: {e}")
|
||||||
|
|
||||||
# Return the image unchanged
|
return results
|
||||||
return (image,)
|
|
||||||
|
def process_image(self, image, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
|
||||||
|
lossless_webp=True, quality=100, save_workflow_json=False, add_counter_to_filename=True):
|
||||||
|
"""Process and save image with metadata"""
|
||||||
|
# Make sure the output directory exists
|
||||||
|
os.makedirs(self.output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# Convert single image to list for consistent processing
|
||||||
|
images = [image[0]] if len(image.shape) == 3 else [img for img in image]
|
||||||
|
|
||||||
|
# Save all images
|
||||||
|
results = self.save_images(
|
||||||
|
images,
|
||||||
|
filename_prefix,
|
||||||
|
file_format,
|
||||||
|
prompt,
|
||||||
|
extra_pnginfo,
|
||||||
|
lossless_webp,
|
||||||
|
quality,
|
||||||
|
save_workflow_json,
|
||||||
|
add_counter_to_filename
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return the first saved filename and the original image
|
||||||
|
filename = results[0]["filename"] if results else ""
|
||||||
|
return (image, filename)
|
||||||
@@ -2,13 +2,10 @@ a dynamic and dramatic digital artwork featuring a stylized anthropomorphic whit
|
|||||||
Negative prompt:
|
Negative prompt:
|
||||||
Steps: 30, Sampler: Undefined, CFG scale: 3.5, Seed: 90300501, Size: 832x1216, Clip skip: 2, Created Date: 2025-03-05T13:51:18.1770234Z, Civitai resources: [{"type":"checkpoint","modelVersionId":691639,"modelName":"FLUX","modelVersionName":"Dev"},{"type":"lora","weight":0.4,"modelVersionId":1202162,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Gothic Lines"},{"type":"lora","weight":0.8,"modelVersionId":1470588,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Retro"},{"type":"lora","weight":0.75,"modelVersionId":746484,"modelName":"Elden Ring - Yoshitaka Amano","modelVersionName":"V1"},{"type":"lora","weight":0.2,"modelVersionId":914935,"modelName":"Ink-style","modelVersionName":"ink-dynamic"},{"type":"lora","weight":0.2,"modelVersionId":1189379,"modelName":"Painterly Fantasy by ChronoKnight - [FLUX \u0026 IL]","modelVersionName":"FLUX"},{"type":"lora","weight":0.2,"modelVersionId":757030,"modelName":"Mezzotint Artstyle for Flux - by Ethanar","modelVersionName":"V1"}], Civitai metadata: {}
|
Steps: 30, Sampler: Undefined, CFG scale: 3.5, Seed: 90300501, Size: 832x1216, Clip skip: 2, Created Date: 2025-03-05T13:51:18.1770234Z, Civitai resources: [{"type":"checkpoint","modelVersionId":691639,"modelName":"FLUX","modelVersionName":"Dev"},{"type":"lora","weight":0.4,"modelVersionId":1202162,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Gothic Lines"},{"type":"lora","weight":0.8,"modelVersionId":1470588,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Retro"},{"type":"lora","weight":0.75,"modelVersionId":746484,"modelName":"Elden Ring - Yoshitaka Amano","modelVersionName":"V1"},{"type":"lora","weight":0.2,"modelVersionId":914935,"modelName":"Ink-style","modelVersionName":"ink-dynamic"},{"type":"lora","weight":0.2,"modelVersionId":1189379,"modelName":"Painterly Fantasy by ChronoKnight - [FLUX \u0026 IL]","modelVersionName":"FLUX"},{"type":"lora","weight":0.2,"modelVersionId":757030,"modelName":"Mezzotint Artstyle for Flux - by Ethanar","modelVersionName":"V1"}], Civitai metadata: {}
|
||||||
|
|
||||||
<lora:ck-shadow-circuit-IL:0.78>,
|
|
||||||
masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject,
|
masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject,
|
||||||
dynamic angle, dutch angle, from below, epic half body portrait, gritty, wabi sabi, looking at viewer, woman is a geisha, parted lips,
|
dynamic angle, dutch angle, from below, epic half body portrait, gritty, wabi sabi, looking at viewer, woman is a geisha, parted lips,
|
||||||
holographic skin, holofoil glitter, faint, glowing, ethereal, neon hair, glowing hair, otherworldly glow, she is dangerous,
|
holographic skin, holofoil glitter, faint, glowing, ethereal, neon hair, glowing hair, otherworldly glow, she is dangerous
|
||||||
<lora:ck-nc-cyberpunk-IL-000011:0.4>
|
<lora:ck-shadow-circuit-IL:0.78>, <lora:ck-nc-cyberpunk-IL-000011:0.4>, <lora:ck-neon-retrowave-IL:0.2>, <lora:ck-yoneyama-mai-IL-000014:0.4>
|
||||||
<lora:ck-neon-retrowave-IL:0.2>
|
|
||||||
<lora:ck-yoneyama-mai-IL-000014:0.4>
|
|
||||||
Negative prompt: score_6, score_5, score_4, bad quality, worst quality, worst detail, sketch, censorship, furry, window, headphones,
|
Negative prompt: score_6, score_5, score_4, bad quality, worst quality, worst detail, sketch, censorship, furry, window, headphones,
|
||||||
Steps: 30, Sampler: Euler a, Schedule type: Simple, CFG scale: 7, Seed: 1405717592, Size: 832x1216, Model hash: 1ad6ca7f70, Model: waiNSFWIllustrious_v100, Denoising strength: 0.35, Hires CFG Scale: 5, Hires upscale: 1.3, Hires steps: 20, Hires upscaler: 4x-AnimeSharp, Lora hashes: "ck-shadow-circuit-IL: 88e247aa8c3d, ck-nc-cyberpunk-IL-000011: 935e6755554c, ck-neon-retrowave-IL: edafb9df7da1, ck-yoneyama-mai-IL-000014: 1b9305692a2e", Version: f2.0.1v1.10.1-1.10.1, Diffusion in Low Bits: Automatic (fp16 LoRA)
|
Steps: 30, Sampler: Euler a, Schedule type: Simple, CFG scale: 7, Seed: 1405717592, Size: 832x1216, Model hash: 1ad6ca7f70, Model: waiNSFWIllustrious_v100, Denoising strength: 0.35, Hires CFG Scale: 5, Hires upscale: 1.3, Hires steps: 20, Hires upscaler: 4x-AnimeSharp, Lora hashes: "ck-shadow-circuit-IL: 88e247aa8c3d, ck-nc-cyberpunk-IL-000011: 935e6755554c, ck-neon-retrowave-IL: edafb9df7da1, ck-yoneyama-mai-IL-000014: 1b9305692a2e", Version: f2.0.1v1.10.1-1.10.1, Diffusion in Low Bits: Automatic (fp16 LoRA)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user