Add files via upload

Addresses issue of JSON not holding the workflow.
This commit is contained in:
tusharbhutt
2025-07-06 21:00:14 -06:00
committed by GitHub
parent 5a15469bce
commit ef7a68527f

View File

@@ -1,7 +1,9 @@
import os import os
import json import json
import re import re
from server import PromptServer
from datetime import datetime from datetime import datetime
from PIL import Image, PngImagePlugin from PIL import Image, PngImagePlugin
import numpy as np import numpy as np
import torch import torch
@@ -9,6 +11,28 @@ import folder_paths
from PIL.PngImagePlugin import PngInfo from PIL.PngImagePlugin import PngInfo
import platform import platform
def get_workflow(prompt=None, extra_pnginfo=None):
from server import PromptServer
# ✅ First: try directly from prompt
if isinstance(prompt, dict) and "workflow" in prompt:
return prompt["workflow"]
# ✅ Second: try from extra_pnginfo
if isinstance(extra_pnginfo, dict) and "workflow" in extra_pnginfo:
print("[INFO] Workflow recovered from extra_pnginfo.")
return extra_pnginfo["workflow"]
# ✅ Third: fallback from PromptServer
last = getattr(PromptServer.instance, "last_prompt", {})
workflow = last.get("workflow")
if workflow:
print("[INFO] Workflow recovered from PromptServer.last_prompt.")
return workflow
raise ValueError("🚫 No workflow found in prompt, extra_pnginfo, or PromptServer context.")
class EndlessNode_Imagesaver: class EndlessNode_Imagesaver:
""" """
Enhanced batch image saver with comprehensive metadata support Enhanced batch image saver with comprehensive metadata support
@@ -33,6 +57,9 @@ class EndlessNode_Imagesaver:
else: else:
return 200 # Conservative fallback return 200 # Conservative fallback
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": return {"required":
@@ -190,25 +217,60 @@ class EndlessNode_Imagesaver:
counter += 1 counter += 1
def save_json_metadata(self, json_path, prompt_text, negative_text, def save_json_metadata(self, json_path, prompt_text, negative_text,
batch_index, creation_time, prompt=None, extra_pnginfo=None): batch_index, creation_time, prompt=None, extra_pnginfo=None):
"""Save JSON metadata file""" """Exports a drag-and-drop-compatible ComfyUI workflow JSON with optional metadata."""
metadata = {
"prompt": prompt_text, import json
"negative_prompt": negative_text, import math
"batch_index": batch_index,
"creation_time": creation_time, def sanitize_json(obj):
"workflow_prompt": prompt, if isinstance(obj, dict):
"extra_pnginfo": extra_pnginfo return {k: sanitize_json(v) for k, v in obj.items()}
} elif isinstance(obj, list):
return [sanitize_json(v) for v in obj]
elif isinstance(obj, float) and math.isnan(obj):
return None
else:
return obj
try: try:
with open(json_path, 'w', encoding='utf-8', newline='\n') as f: workflow = get_workflow(prompt)
json.dump(metadata, f, indent=2, default=self.encode_emoji, ensure_ascii=False)
# ✅ Ensure core fields
workflow.setdefault("version", 1)
workflow.setdefault("nodes", [])
workflow.setdefault("links", [])
if "state" not in workflow:
max_id = max((n.get("id", 0) for n in workflow["nodes"]), default=0)
workflow["state"] = {"idCounter": max_id + 1}
# ✅ Remove sidecar metadata if present
workflow.pop("extra_pnginfo", None)
# ✅ Embed custom metadata
workflow["extra"] = workflow.get("extra", {})
workflow["extra"].update({
"prompt": prompt_text,
"negative_prompt": negative_text,
"batch_index": batch_index,
"creation_time": creation_time,
"source": "FluxSaver"
})
clean_json = sanitize_json(workflow)
with open(json_path, "w", encoding="utf-8", newline="\n") as f:
json.dump(clean_json, f, indent=2, default=self.encode_emoji, ensure_ascii=False)
print(f"[SUCCESS] Workflow JSON saved: {json_path}")
return True return True
except Exception as e: except Exception as e:
print(f"Failed to save JSON metadata: {e}") print(f"[ERROR] Failed to save workflow JSON: {e}")
return False return False
def generate_numbered_filename(self, filename_prefix, delimiter, counter, def generate_numbered_filename(self, filename_prefix, delimiter, counter,
filename_number_padding, filename_number_start, filename_number_padding, filename_number_start,
enable_filename_numbering, date_str, clean_prompt, ext): enable_filename_numbering, date_str, clean_prompt, ext):
@@ -243,18 +305,28 @@ class EndlessNode_Imagesaver:
return filename return filename
def save_batch_images(self, images, prompt_list, include_timestamp=True, def save_batch_images(self, images, prompt_list, include_timestamp=True,
timestamp_format="%Y-%m-%d_%H-%M-%S", image_format="PNG", timestamp_format="%Y-%m-%d_%H-%M-%S", image_format="PNG",
jpeg_quality=95, delimiter="_", jpeg_quality=95, delimiter="_",
prompt_words_limit=8, embed_workflow=True, save_json_metadata=False, prompt_words_limit=8, embed_workflow=True, save_json_metadata=False,
enable_filename_numbering=True, filename_number_padding=2, enable_filename_numbering=True, filename_number_padding=2,
filename_number_start=False, embed_png_metadata=True, filename_number_start=False, embed_png_metadata=True,
output_path="", filename_prefix="batch", output_path="", filename_prefix="batch",
negative_prompt_list="", json_folder="", prompt=None, extra_pnginfo=None): negative_prompt_list="", json_folder="", prompt=None, extra_pnginfo=None):
# Debug: Print tensor information # Debug: Print tensor information
print(f"DEBUG: Images tensor shape: {images.shape}") print(f"DEBUG: Images tensor shape: {images.shape}")
print(f"DEBUG: Images tensor type: {type(images)}") print(f"DEBUG: Images tensor type: {type(images)}")
# ✅ Fallback: repair prompt if missing or partial
if prompt is None or not isinstance(prompt, dict) or "workflow" not in prompt:
if extra_pnginfo and "workflow" in extra_pnginfo:
print("[INFO] Workflow recovered from extra_pnginfo.")
prompt = {"workflow": extra_pnginfo["workflow"]}
else:
print("[INFO] Workflow recovered from PromptServer.last_prompt.")
prompt = PromptServer.instance.last_prompt or {}
# Process output path with date/time validation (always process regardless of timestamp toggle) # Process output path with date/time validation (always process regardless of timestamp toggle)
processed_output_path = self.validate_and_process_path(output_path, delimiter) processed_output_path = self.validate_and_process_path(output_path, delimiter)
@@ -389,27 +461,25 @@ class EndlessNode_Imagesaver:
if image_format == "PNG": if image_format == "PNG":
# ITEM #3: Conditional PNG metadata embedding # ITEM #3: Conditional PNG metadata embedding
if embed_png_metadata: if embed_png_metadata:
# Prepare PNG metadata
metadata = PngImagePlugin.PngInfo() metadata = PngImagePlugin.PngInfo()
metadata.add_text("prompt", prompt_text) metadata.add_text("prompt", prompt_text)
metadata.add_text("negative_prompt", negative_text) metadata.add_text("negative_prompt", negative_text)
metadata.add_text("batch_index", str(i+1)) metadata.add_text("batch_index", str(i + 1))
metadata.add_text("creation_time", now.isoformat()) metadata.add_text("creation_time", now.isoformat())
# Add workflow data if requested if embed_workflow and prompt and "workflow" in prompt:
if embed_workflow: metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji))
if prompt is not None:
metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji)) if extra_pnginfo:
if extra_pnginfo is not None: for key, value in extra_pnginfo.items():
for key, value in extra_pnginfo.items(): metadata.add_text(key, json.dumps(value, default=self.encode_emoji))
metadata.add_text(key, json.dumps(value, default=self.encode_emoji))
img.save(file_path, format="PNG", optimize=True, img.save(file_path, format="PNG", optimize=True,
compress_level=self.compress_level, pnginfo=metadata) compress_level=self.compress_level, pnginfo=metadata)
else: else:
# ITEM #3: Save clean PNG without metadata
img.save(file_path, format="PNG", optimize=True, img.save(file_path, format="PNG", optimize=True,
compress_level=self.compress_level) compress_level=self.compress_level)
elif image_format == "JPEG": elif image_format == "JPEG":
# Convert RGBA to RGB for JPEG # Convert RGBA to RGB for JPEG