mirror of
https://github.com/tusharbhutt/Endless-Nodes.git
synced 2026-03-21 20:42:12 -03:00
Add files via upload
Uploading files for Endless Nodes V1.0
This commit is contained in:
32
batchers/__init__.py
Normal file
32
batchers/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
"""
|
||||||
|
EndlessSeaofStars Custom Nodes for ComfyUI
|
||||||
|
Batch processing nodes with specialized support for FLUX and SDXL models
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .endless_batchers import (
|
||||||
|
EndlessNode_SimpleBatchPrompts,
|
||||||
|
EndlessNode_FluxBatchPrompts,
|
||||||
|
EndlessNode_SDXLBatchPrompts,
|
||||||
|
EndlessNode_BatchNegativePrompts,
|
||||||
|
EndlessNode_PromptCounter,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Node class mappings for ComfyUI
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"SimpleBatchPrompts": EndlessNode_SimpleBatchPrompts,
|
||||||
|
"FluxBatchPrompts": EndlessNode_FluxBatchPrompts,
|
||||||
|
"SDXLBatchPrompts": EndlessNode_SDXLBatchPrompts,
|
||||||
|
"BatchNegativePrompts": EndlessNode_BatchNegativePrompts,
|
||||||
|
"PromptCounter": EndlessNode_PromptCounter,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Display names for ComfyUI interface
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"SimpleBatchPrompts": "Simple Batch Prompts",
|
||||||
|
"FluxBatchPrompts": "FLUX Batch Prompts",
|
||||||
|
"SDXLBatchPrompts": "SDXL Batch Prompts",
|
||||||
|
"BatchNegativePrompts": "Batch Negative Prompts",
|
||||||
|
"PromptCounter": "Prompt Counter",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
443
batchers/endless_batchers.py
Normal file
443
batchers/endless_batchers.py
Normal file
@@ -0,0 +1,443 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
from PIL import Image, PngImagePlugin
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import folder_paths
|
||||||
|
from PIL.PngImagePlugin import PngInfo
|
||||||
|
import platform
|
||||||
|
|
||||||
|
class EndlessNode_SimpleBatchPrompts:
|
||||||
|
"""
|
||||||
|
Takes multiple prompts (one per line) and creates batched conditioning tensors
|
||||||
|
Automatically detects number of prompts and creates appropriate batch size
|
||||||
|
Handles batch size mismatches by cycling through prompts if needed
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {
|
||||||
|
"prompts": ("STRING", {"multiline": True, "default": "beautiful landscape\nmountain sunset\nocean waves\nfield of sunflowers"}),
|
||||||
|
"clip": ("CLIP", ),
|
||||||
|
"print_output": ("BOOLEAN", {"default": True}),
|
||||||
|
"max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}),
|
||||||
|
}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("CONDITIONING", "STRING", "INT")
|
||||||
|
RETURN_NAMES = ("CONDITIONING", "PROMPT_LIST", "PROMPT_COUNT")
|
||||||
|
FUNCTION = "batch_encode"
|
||||||
|
CATEGORY = "Endless 🌊✨/BatchProcessing"
|
||||||
|
|
||||||
|
def batch_encode(self, prompts, clip, print_output, max_batch_size=0):
|
||||||
|
# Split prompts by lines and clean them
|
||||||
|
prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()]
|
||||||
|
prompt_count = len(prompt_lines)
|
||||||
|
|
||||||
|
if not prompt_lines:
|
||||||
|
raise ValueError("No valid prompts found. Please enter at least one prompt.")
|
||||||
|
|
||||||
|
# Handle batch size logic
|
||||||
|
if max_batch_size > 0 and max_batch_size < len(prompt_lines):
|
||||||
|
# Limit to max_batch_size
|
||||||
|
prompt_lines = prompt_lines[:max_batch_size]
|
||||||
|
if print_output:
|
||||||
|
print(f"Limited to first {max_batch_size} prompts due to max_batch_size setting")
|
||||||
|
elif max_batch_size > len(prompt_lines) and max_batch_size > 0:
|
||||||
|
# Cycle through prompts to fill batch
|
||||||
|
original_count = len(prompt_lines)
|
||||||
|
while len(prompt_lines) < max_batch_size:
|
||||||
|
prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))])
|
||||||
|
if print_output:
|
||||||
|
print(f"Cycling through {original_count} prompts to fill batch size of {max_batch_size}")
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Processing {len(prompt_lines)} prompts in batch:")
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
print(f" {i+1}: {prompt}")
|
||||||
|
|
||||||
|
# Encode each prompt separately with error handling
|
||||||
|
cond_tensors = []
|
||||||
|
pooled_tensors = []
|
||||||
|
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize(prompt)
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error encoding prompt {i+1} '{prompt}': {e}")
|
||||||
|
# Use a fallback empty prompt
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize("")
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
print(f" Using empty fallback for prompt {i+1}")
|
||||||
|
except Exception as fallback_error:
|
||||||
|
raise ValueError(f"Failed to encode prompt {i+1} and fallback failed: {fallback_error}")
|
||||||
|
|
||||||
|
# Batch the conditioning tensors properly
|
||||||
|
try:
|
||||||
|
# Stack the conditioning tensors along batch dimension
|
||||||
|
batched_cond = torch.cat(cond_tensors, dim=0)
|
||||||
|
batched_pooled = torch.cat(pooled_tensors, dim=0)
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Created batched conditioning: {batched_cond.shape}")
|
||||||
|
print(f"Created batched pooled: {batched_pooled.shape}")
|
||||||
|
|
||||||
|
# Return as proper conditioning format
|
||||||
|
conditioning = [[batched_cond, {"pooled_output": batched_pooled}]]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error creating batched conditioning: {e}")
|
||||||
|
print("Falling back to list format...")
|
||||||
|
# Fallback to list format if batching fails
|
||||||
|
conditioning = []
|
||||||
|
for i in range(len(cond_tensors)):
|
||||||
|
conditioning.append([cond_tensors[i], {"pooled_output": pooled_tensors[i]}])
|
||||||
|
|
||||||
|
# Create the prompt list string for filename use
|
||||||
|
prompt_list_str = "|".join(prompt_lines) # Join with | separator
|
||||||
|
|
||||||
|
return (conditioning, prompt_list_str, prompt_count)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_FluxBatchPrompts:
|
||||||
|
"""
|
||||||
|
Specialized batch prompt encoder for FLUX models
|
||||||
|
Handles FLUX-specific conditioning requirements including guidance and T5 text encoding
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {
|
||||||
|
"prompts": ("STRING", {"multiline": True, "default": "beautiful landscape\nmountain sunset\nocean waves\nfield of sunflowers"}),
|
||||||
|
"clip": ("CLIP", ),
|
||||||
|
"guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
|
||||||
|
"print_output": ("BOOLEAN", {"default": True}),
|
||||||
|
"max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}),
|
||||||
|
}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("CONDITIONING", "STRING", "INT")
|
||||||
|
RETURN_NAMES = ("CONDITIONING", "PROMPT_LIST", "PROMPT_COUNT")
|
||||||
|
FUNCTION = "batch_encode_flux"
|
||||||
|
CATEGORY = "Endless 🌊✨/BatchProcessing"
|
||||||
|
|
||||||
|
def batch_encode_flux(self, prompts, clip, guidance, print_output, max_batch_size=0):
|
||||||
|
# Split prompts by lines and clean them
|
||||||
|
prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()]
|
||||||
|
prompt_count = len(prompt_lines)
|
||||||
|
|
||||||
|
if not prompt_lines:
|
||||||
|
raise ValueError("No valid prompts found. Please enter at least one prompt.")
|
||||||
|
|
||||||
|
# Handle batch size logic
|
||||||
|
if max_batch_size > 0 and max_batch_size < len(prompt_lines):
|
||||||
|
prompt_lines = prompt_lines[:max_batch_size]
|
||||||
|
if print_output:
|
||||||
|
print(f"Limited to first {max_batch_size} prompts due to max_batch_size setting")
|
||||||
|
elif max_batch_size > len(prompt_lines) and max_batch_size > 0:
|
||||||
|
original_count = len(prompt_lines)
|
||||||
|
while len(prompt_lines) < max_batch_size:
|
||||||
|
prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))])
|
||||||
|
if print_output:
|
||||||
|
print(f"Cycling through {original_count} prompts to fill batch size of {max_batch_size}")
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Processing {len(prompt_lines)} FLUX prompts in batch:")
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
print(f" {i+1}: {prompt}")
|
||||||
|
|
||||||
|
# Encode each prompt with FLUX-specific conditioning
|
||||||
|
cond_tensors = []
|
||||||
|
pooled_tensors = []
|
||||||
|
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize(prompt)
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error encoding FLUX prompt {i+1} '{prompt}': {e}")
|
||||||
|
# Use a fallback empty prompt
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize("")
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
print(f" Using empty fallback for FLUX prompt {i+1}")
|
||||||
|
except Exception as fallback_error:
|
||||||
|
raise ValueError(f"Failed to encode FLUX prompt {i+1} and fallback failed: {fallback_error}")
|
||||||
|
|
||||||
|
# Batch the conditioning tensors properly for FLUX
|
||||||
|
try:
|
||||||
|
# Stack the conditioning tensors along batch dimension
|
||||||
|
batched_cond = torch.cat(cond_tensors, dim=0)
|
||||||
|
batched_pooled = torch.cat(pooled_tensors, dim=0)
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Created FLUX batched conditioning: {batched_cond.shape}")
|
||||||
|
print(f"Created FLUX batched pooled: {batched_pooled.shape}")
|
||||||
|
|
||||||
|
# FLUX-specific conditioning with guidance
|
||||||
|
conditioning = [[batched_cond, {
|
||||||
|
"pooled_output": batched_pooled,
|
||||||
|
"guidance": guidance,
|
||||||
|
"guidance_scale": guidance # Some FLUX implementations use this key
|
||||||
|
}]]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error creating FLUX batched conditioning: {e}")
|
||||||
|
print("Falling back to list format...")
|
||||||
|
# Fallback to list format if batching fails
|
||||||
|
conditioning = []
|
||||||
|
for i in range(len(cond_tensors)):
|
||||||
|
flux_conditioning = [cond_tensors[i], {
|
||||||
|
"pooled_output": pooled_tensors[i],
|
||||||
|
"guidance": guidance,
|
||||||
|
"guidance_scale": guidance
|
||||||
|
}]
|
||||||
|
conditioning.append(flux_conditioning)
|
||||||
|
|
||||||
|
prompt_list_str = "|".join(prompt_lines)
|
||||||
|
return (conditioning, prompt_list_str, prompt_count)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_SDXLBatchPrompts:
|
||||||
|
"""
|
||||||
|
Specialized batch prompt encoder for SDXL models
|
||||||
|
Handles dual text encoders and SDXL-specific conditioning requirements
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {
|
||||||
|
"prompts": ("STRING", {"multiline": True, "default": "beautiful landscape\nmountain sunset\nocean waves"}),
|
||||||
|
"clip": ("CLIP", ),
|
||||||
|
"print_output": ("BOOLEAN", {"default": True}),
|
||||||
|
"max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}),
|
||||||
|
}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("CONDITIONING", "STRING", "INT")
|
||||||
|
RETURN_NAMES = ("CONDITIONING", "PROMPT_LIST", "PROMPT_COUNT")
|
||||||
|
FUNCTION = "batch_encode_sdxl"
|
||||||
|
CATEGORY = "Endless 🌊✨/BatchProcessing"
|
||||||
|
|
||||||
|
def batch_encode_sdxl(self, prompts, clip, print_output, max_batch_size=0):
|
||||||
|
# Split prompts by lines and clean them
|
||||||
|
prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()]
|
||||||
|
prompt_count = len(prompt_lines)
|
||||||
|
|
||||||
|
if not prompt_lines:
|
||||||
|
raise ValueError("No valid prompts found. Please enter at least one prompt.")
|
||||||
|
|
||||||
|
# Handle batch size logic
|
||||||
|
if max_batch_size > 0 and max_batch_size < len(prompt_lines):
|
||||||
|
prompt_lines = prompt_lines[:max_batch_size]
|
||||||
|
if print_output:
|
||||||
|
print(f"Limited to first {max_batch_size} prompts due to max_batch_size setting")
|
||||||
|
elif max_batch_size > len(prompt_lines) and max_batch_size > 0:
|
||||||
|
original_count = len(prompt_lines)
|
||||||
|
while len(prompt_lines) < max_batch_size:
|
||||||
|
prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))])
|
||||||
|
if print_output:
|
||||||
|
print(f"Cycling through {original_count} prompts to fill batch size of {max_batch_size}")
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Processing {len(prompt_lines)} SDXL prompts in batch:")
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
print(f" {i+1}: {prompt}")
|
||||||
|
|
||||||
|
# Encode each prompt with SDXL-specific conditioning
|
||||||
|
cond_tensors = []
|
||||||
|
pooled_tensors = []
|
||||||
|
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize(prompt)
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error encoding SDXL prompt {i+1} '{prompt}': {e}")
|
||||||
|
# Use a fallback empty prompt
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize("")
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
print(f" Using empty fallback for SDXL prompt {i+1}")
|
||||||
|
except Exception as fallback_error:
|
||||||
|
raise ValueError(f"Failed to encode SDXL prompt {i+1} and fallback failed: {fallback_error}")
|
||||||
|
|
||||||
|
# Batch the conditioning tensors properly for SDXL
|
||||||
|
try:
|
||||||
|
# Stack the conditioning tensors along batch dimension
|
||||||
|
batched_cond = torch.cat(cond_tensors, dim=0)
|
||||||
|
batched_pooled = torch.cat(pooled_tensors, dim=0)
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Created SDXL batched conditioning: {batched_cond.shape}")
|
||||||
|
print(f"Created SDXL batched pooled: {batched_pooled.shape}")
|
||||||
|
|
||||||
|
# SDXL-specific conditioning - simplified without size parameters
|
||||||
|
conditioning = [[batched_cond, {"pooled_output": batched_pooled}]]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error creating SDXL batched conditioning: {e}")
|
||||||
|
print("Falling back to list format...")
|
||||||
|
# Fallback to list format if batching fails
|
||||||
|
conditioning = []
|
||||||
|
for i in range(len(cond_tensors)):
|
||||||
|
sdxl_conditioning = [cond_tensors[i], {"pooled_output": pooled_tensors[i]}]
|
||||||
|
conditioning.append(sdxl_conditioning)
|
||||||
|
|
||||||
|
prompt_list_str = "|".join(prompt_lines)
|
||||||
|
return (conditioning, prompt_list_str, prompt_count)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_BatchNegativePrompts:
|
||||||
|
"""
|
||||||
|
Handles batch negative prompts - simplified version without unnecessary parameters
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {
|
||||||
|
"negative_prompts": ("STRING", {"multiline": True, "default": "blurry, low quality\nartifacts, distorted\nnoise, bad anatomy"}),
|
||||||
|
"clip": ("CLIP", ),
|
||||||
|
"print_output": ("BOOLEAN", {"default": True}),
|
||||||
|
"max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}),
|
||||||
|
}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("CONDITIONING", "STRING")
|
||||||
|
RETURN_NAMES = ("NEGATIVE_CONDITIONING", "NEGATIVE_PROMPT_LIST")
|
||||||
|
FUNCTION = "batch_encode_negative"
|
||||||
|
CATEGORY = "Endless 🌊✨/BatchProcessing"
|
||||||
|
|
||||||
|
def batch_encode_negative(self, negative_prompts, clip, print_output, max_batch_size=0):
|
||||||
|
# Split prompts by lines and clean them
|
||||||
|
prompt_lines = [line.strip() for line in negative_prompts.split('\n') if line.strip()]
|
||||||
|
|
||||||
|
if not prompt_lines:
|
||||||
|
# Use empty negative prompt if none provided
|
||||||
|
prompt_lines = [""]
|
||||||
|
|
||||||
|
# Handle batch size logic
|
||||||
|
if max_batch_size > 0 and max_batch_size < len(prompt_lines):
|
||||||
|
prompt_lines = prompt_lines[:max_batch_size]
|
||||||
|
if print_output:
|
||||||
|
print(f"Limited to first {max_batch_size} negative prompts due to max_batch_size setting")
|
||||||
|
elif max_batch_size > len(prompt_lines) and max_batch_size > 0:
|
||||||
|
original_count = len(prompt_lines)
|
||||||
|
while len(prompt_lines) < max_batch_size:
|
||||||
|
prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))])
|
||||||
|
if print_output:
|
||||||
|
print(f"Cycling through {original_count} negative prompts to fill batch size of {max_batch_size}")
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Processing {len(prompt_lines)} negative prompts in batch:")
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
print(f" {i+1}: {prompt if prompt else '(empty)'}")
|
||||||
|
|
||||||
|
# Encode each negative prompt
|
||||||
|
cond_tensors = []
|
||||||
|
pooled_tensors = []
|
||||||
|
|
||||||
|
for i, prompt in enumerate(prompt_lines):
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize(prompt)
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error encoding negative prompt {i+1} '{prompt}': {e}")
|
||||||
|
# Use fallback empty prompt
|
||||||
|
try:
|
||||||
|
tokens = clip.tokenize("")
|
||||||
|
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||||
|
cond_tensors.append(cond)
|
||||||
|
pooled_tensors.append(pooled)
|
||||||
|
print(f" Using empty fallback for negative prompt {i+1}")
|
||||||
|
except Exception as fallback_error:
|
||||||
|
raise ValueError(f"Failed to encode negative prompt {i+1} and fallback failed: {fallback_error}")
|
||||||
|
|
||||||
|
# Batch the conditioning tensors - simplified without model-specific parameters
|
||||||
|
try:
|
||||||
|
# Stack the conditioning tensors along batch dimension
|
||||||
|
batched_cond = torch.cat(cond_tensors, dim=0)
|
||||||
|
batched_pooled = torch.cat(pooled_tensors, dim=0)
|
||||||
|
|
||||||
|
if print_output:
|
||||||
|
print(f"Created negative batched conditioning: {batched_cond.shape}")
|
||||||
|
print(f"Created negative batched pooled: {batched_pooled.shape}")
|
||||||
|
|
||||||
|
# Simple conditioning format that works with all model types
|
||||||
|
conditioning = [[batched_cond, {"pooled_output": batched_pooled}]]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error creating negative batched conditioning: {e}")
|
||||||
|
print("Falling back to list format...")
|
||||||
|
# Fallback to list format if batching fails
|
||||||
|
conditioning = []
|
||||||
|
for i in range(len(cond_tensors)):
|
||||||
|
cond_item = [cond_tensors[i], {"pooled_output": pooled_tensors[i]}]
|
||||||
|
conditioning.append(cond_item)
|
||||||
|
|
||||||
|
prompt_list_str = "|".join(prompt_lines)
|
||||||
|
return (conditioning, prompt_list_str)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_PromptCounter:
|
||||||
|
"""
|
||||||
|
Utility node to count prompts from input text and display a preview.
|
||||||
|
The preview will be shown in the console output and returned as a string output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"prompts": ("STRING", {"multiline": True, "forceInput": True}),
|
||||||
|
"print_to_console": ("BOOLEAN", {"default": True}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT", "STRING")
|
||||||
|
RETURN_NAMES = ("count", "preview")
|
||||||
|
FUNCTION = "count_prompts"
|
||||||
|
CATEGORY = "Endless 🌊✨/BatchProcessing"
|
||||||
|
|
||||||
|
def count_prompts(self, prompts, print_to_console):
|
||||||
|
prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()]
|
||||||
|
count = len(prompt_lines)
|
||||||
|
|
||||||
|
preview = f"Found {count} prompt{'s' if count != 1 else ''}:\n"
|
||||||
|
for i, prompt in enumerate(prompt_lines[:5]):
|
||||||
|
preview += f"{i+1}. {prompt}\n"
|
||||||
|
if count > 5:
|
||||||
|
preview += f"... and {count - 5} more"
|
||||||
|
|
||||||
|
if print_to_console:
|
||||||
|
print(f"\n=== Prompt Counter ===")
|
||||||
|
print(preview)
|
||||||
|
print("======================\n")
|
||||||
|
|
||||||
|
return (count, preview)
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"EndlessNode_SimpleBatchPrompts": EndlessNode_SimpleBatchPrompts,
|
||||||
|
"EndlessNode_FluxBatchPrompts": EndlessNode_FluxBatchPrompts,
|
||||||
|
"EndlessNode_SDXLBatchPrompts": EndlessNode_SDXLBatchPrompts,
|
||||||
|
"EndlessNode_BatchNegativePrompts": EndlessNode_BatchNegativePrompts,
|
||||||
|
"EndlessNode_PromptCounter": EndlessNode_PromptCounter,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"EndlessNode_SimpleBatchPrompts": "Simple Batch Prompts",
|
||||||
|
"EndlessNode_FluxBatchPrompts": "Flux Batch Prompts",
|
||||||
|
"EndlessNode_SDXLBatchPrompts": "SDXL Batch Prompts",
|
||||||
|
"EndlessNode_BatchNegativePrompts": "Batch Negative Prompts",
|
||||||
|
"EndlessNode_PromptCounter": "Prompt Counter",
|
||||||
|
}
|
||||||
14
image_analysis/__init__.py
Normal file
14
image_analysis/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from .endless_image_analysis import (
|
||||||
|
EndlessNode_ImageNoveltyScorer,
|
||||||
|
EndlessNode_ImageComplexityScorer,
|
||||||
|
)
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"ImageNoveltyScorer": EndlessNode_ImageNoveltyScorer,
|
||||||
|
"ImageComplexityScorer": EndlessNode_ImageComplexityScorer,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"ImageNoveltyScorer": "Novelty Score (CLIP)",
|
||||||
|
"ImageComplexityScorer": "Complexity Score (Edge Density)",
|
||||||
|
}
|
||||||
131
image_analysis/endless_image_analysis.py
Normal file
131
image_analysis/endless_image_analysis.py
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
import torch
|
||||||
|
import torchvision.transforms as transforms
|
||||||
|
import torchvision.models as models
|
||||||
|
import torch.nn.functional as F
|
||||||
|
import numpy as np
|
||||||
|
from PIL import Image, ImageFilter
|
||||||
|
import os
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
CLIP_MODEL_NAME = "ViT-B/32"
|
||||||
|
CLIP_DOWNLOAD_PATH = os.path.join(os.path.expanduser("~"), ".cache", "clip")
|
||||||
|
|
||||||
|
# Helper to download/load CLIP model from OpenAI
|
||||||
|
def load_clip_model():
|
||||||
|
import clip # requires `clip` package from OpenAI
|
||||||
|
model, preprocess = clip.load(CLIP_MODEL_NAME, device="cpu", download_root=CLIP_DOWNLOAD_PATH)
|
||||||
|
return model.eval(), preprocess
|
||||||
|
|
||||||
|
# Image Complexity via Edge Density
|
||||||
|
def compute_edge_density(image: Image.Image) -> float:
|
||||||
|
grayscale = image.convert("L")
|
||||||
|
edges = grayscale.filter(ImageFilter.FIND_EDGES)
|
||||||
|
edge_array = np.asarray(edges, dtype=np.uint8)
|
||||||
|
edge_density = np.mean(edge_array > 20) # percentage of edge pixels
|
||||||
|
return round(edge_density * 10, 3) # scale 0-10
|
||||||
|
|
||||||
|
# Image Novelty via distance from reference CLIP embeddings
|
||||||
|
class ClipImageEmbedder:
|
||||||
|
def __init__(self):
|
||||||
|
self.model, self.preprocess = load_clip_model()
|
||||||
|
|
||||||
|
def get_embedding(self, image: Image.Image) -> torch.Tensor:
|
||||||
|
image_input = self.preprocess(image).unsqueeze(0)
|
||||||
|
with torch.no_grad():
|
||||||
|
embedding = self.model.encode_image(image_input).float()
|
||||||
|
return F.normalize(embedding, dim=-1)
|
||||||
|
|
||||||
|
# You could preload this from reference images
|
||||||
|
REFERENCE_EMBEDDINGS = []
|
||||||
|
|
||||||
|
class EndlessNode_ImageNoveltyScorer:
|
||||||
|
def __init__(self):
|
||||||
|
self.embedder = ClipImageEmbedder()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"image": ("IMAGE",),
|
||||||
|
"reference_images": ("IMAGE", {"default": None, "optional": True}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("FLOAT",)
|
||||||
|
RETURN_NAMES = ("novelty_score",)
|
||||||
|
FUNCTION = "score_novelty"
|
||||||
|
CATEGORY = "Endless 🌊✨/Image Scoring"
|
||||||
|
|
||||||
|
def score_novelty(self, image, reference_images=None):
|
||||||
|
img = self._to_pil(image)
|
||||||
|
img_emb = self.embedder.get_embedding(img)
|
||||||
|
|
||||||
|
references = REFERENCE_EMBEDDINGS
|
||||||
|
if reference_images is not None:
|
||||||
|
references = [self.embedder.get_embedding(self._to_pil(ref)) for ref in reference_images]
|
||||||
|
|
||||||
|
if not references:
|
||||||
|
return (0.0,)
|
||||||
|
|
||||||
|
sims = [F.cosine_similarity(img_emb, ref_emb).item() for ref_emb in references]
|
||||||
|
avg_sim = sum(sims) / len(sims)
|
||||||
|
novelty = round((1.0 - avg_sim) * 10, 3) # higher = more novel
|
||||||
|
return (novelty,)
|
||||||
|
|
||||||
|
def _to_pil(self, img):
|
||||||
|
if isinstance(img, torch.Tensor):
|
||||||
|
img = img.squeeze().detach().cpu().numpy()
|
||||||
|
if isinstance(img, np.ndarray):
|
||||||
|
if img.max() <= 1.0:
|
||||||
|
img = (img * 255).astype(np.uint8)
|
||||||
|
else:
|
||||||
|
img = img.astype(np.uint8)
|
||||||
|
if img.ndim == 3:
|
||||||
|
return Image.fromarray(img)
|
||||||
|
elif img.ndim == 2:
|
||||||
|
return Image.fromarray(img, mode='L')
|
||||||
|
elif isinstance(img, Image.Image):
|
||||||
|
return img
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported image type: {type(img)}")
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_ImageComplexityScorer:
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"image": ("IMAGE",)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("FLOAT",)
|
||||||
|
RETURN_NAMES = ("complexity_score",)
|
||||||
|
FUNCTION = "score_complexity"
|
||||||
|
CATEGORY = "Endless 🌊✨/Image Scoring"
|
||||||
|
|
||||||
|
def score_complexity(self, image):
|
||||||
|
img = self._to_pil(image)
|
||||||
|
complexity = compute_edge_density(img)
|
||||||
|
return (complexity,)
|
||||||
|
|
||||||
|
def _to_pil(self, img):
|
||||||
|
if isinstance(img, torch.Tensor):
|
||||||
|
img = img.squeeze().detach().cpu().numpy()
|
||||||
|
if isinstance(img, np.ndarray):
|
||||||
|
if img.max() <= 1.0:
|
||||||
|
img = (img * 255).astype(np.uint8)
|
||||||
|
else:
|
||||||
|
img = img.astype(np.uint8)
|
||||||
|
if img.ndim == 3:
|
||||||
|
return Image.fromarray(img)
|
||||||
|
elif img.ndim == 2:
|
||||||
|
return Image.fromarray(img, mode='L')
|
||||||
|
elif isinstance(img, Image.Image):
|
||||||
|
return img
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported image type: {type(img)}")
|
||||||
|
|
||||||
8
image_analysis/requirements.txt
Normal file
8
image_analysis/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
torch>=1.13.1
|
||||||
|
torchvision>=0.14.1
|
||||||
|
Pillow>=9.0.0
|
||||||
|
numpy>=1.21.0
|
||||||
|
ftfy
|
||||||
|
regex
|
||||||
|
tqdm
|
||||||
|
clip @ git+https://github.com/openai/CLIP.git
|
||||||
8
image_saver/__init__.py
Normal file
8
image_saver/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from .endless_image_saver import EndlessNode_Imagesaver
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Image_saver": EndlessNode_Imagesaver,
|
||||||
|
}
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Image_saver": "Image Saver",
|
||||||
|
}
|
||||||
594
image_saver/endless_image_saver.py
Normal file
594
image_saver/endless_image_saver.py
Normal file
@@ -0,0 +1,594 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
from PIL import Image, PngImagePlugin
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import folder_paths
|
||||||
|
from PIL.PngImagePlugin import PngInfo
|
||||||
|
import platform
|
||||||
|
|
||||||
|
class EndlessNode_Imagesaver:
|
||||||
|
"""
|
||||||
|
Enhanced batch image saver with comprehensive metadata support
|
||||||
|
Saves batched images with individual prompt names in filenames
|
||||||
|
Automatically handles multiple images from batch processing
|
||||||
|
Enhanced with workflow embedding, JSON export, and robust filename handling
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.output_dir = folder_paths.get_output_directory()
|
||||||
|
self.type = "output"
|
||||||
|
self.compress_level = 4
|
||||||
|
# OS-specific filename length limits
|
||||||
|
self.max_filename_length = self._get_max_filename_length()
|
||||||
|
|
||||||
|
def _get_max_filename_length(self):
|
||||||
|
"""Get maximum filename length based on OS"""
|
||||||
|
system = platform.system().lower()
|
||||||
|
if system == 'windows':
|
||||||
|
return 255 # NTFS limit
|
||||||
|
elif system in ['linux', 'darwin']: # Linux and macOS
|
||||||
|
return 255 # ext4/APFS limit
|
||||||
|
else:
|
||||||
|
return 200 # Conservative fallback
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required":
|
||||||
|
{"images": ("IMAGE", ),
|
||||||
|
"prompt_list": ("STRING", {"forceInput": True}),
|
||||||
|
"include_timestamp": ("BOOLEAN", {"default": True}),
|
||||||
|
"timestamp_format": ("STRING", {"default": "%Y-%m-%d_%H-%M-%S", "description": "Use Python strftime format.\nExample: %Y-%m-%d %H-%M-%S\nSee: strftime.org for full options."}),
|
||||||
|
"image_format": (["PNG", "JPEG", "WEBP"], {"default": "PNG"}),
|
||||||
|
"jpeg_quality": ("INT", {"default": 95, "min": 1, "max": 100, "step": 1}),
|
||||||
|
"delimiter": ("STRING", {"default": "_"}),
|
||||||
|
"prompt_words_limit": ("INT", {"default": 8, "min": 1, "max": 16, "step": 1}),
|
||||||
|
"embed_workflow": ("BOOLEAN", {"default": True}),
|
||||||
|
"save_json_metadata": ("BOOLEAN", {"default": False}),
|
||||||
|
# ITEM #2: Enable/disable number padding
|
||||||
|
"enable_filename_numbering": ("BOOLEAN", {"default": True}),
|
||||||
|
# ITEM #1: Filename Number Padding Control
|
||||||
|
"filename_number_padding": ("INT", {"default": 2, "min": 1, "max": 9, "step": 1}),
|
||||||
|
"filename_number_start": ("BOOLEAN", {"default": False}),
|
||||||
|
# ITEM #3: Conditional PNG Metadata Embedding
|
||||||
|
"embed_png_metadata": ("BOOLEAN", {"default": True}),
|
||||||
|
},
|
||||||
|
"optional":
|
||||||
|
{"output_path": ("STRING", {"default": ""}),
|
||||||
|
"filename_prefix": ("STRING", {"default": "Batch"}),
|
||||||
|
"negative_prompt_list": ("STRING", {"default": ""}),
|
||||||
|
"json_folder": ("STRING", {"default": ""}),
|
||||||
|
},
|
||||||
|
"hidden": {
|
||||||
|
"prompt": "PROMPT",
|
||||||
|
"extra_pnginfo": "EXTRA_PNGINFO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
RETURN_NAMES = ("saved_paths",)
|
||||||
|
FUNCTION = "save_batch_images"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
CATEGORY = "Endless 🌊✨/IO"
|
||||||
|
|
||||||
|
def encode_emoji(self, obj):
|
||||||
|
"""Properly encode emojis and special characters"""
|
||||||
|
if isinstance(obj, str):
|
||||||
|
return obj.encode('utf-8', 'surrogatepass').decode('utf-8')
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def clean_filename(self, text, max_words=8, delimiter="_"):
|
||||||
|
"""Clean text for use in filenames with word limit and emoji support"""
|
||||||
|
# Limit to specified number of words
|
||||||
|
words = text.split()[:max_words]
|
||||||
|
text = ' '.join(words)
|
||||||
|
|
||||||
|
# Handle emojis by encoding them properly
|
||||||
|
text = self.encode_emoji(text)
|
||||||
|
|
||||||
|
# Replace illegal characters with delimiter, then clean up spaces
|
||||||
|
illegal_chars = r'[<>:"/\\|?*]'
|
||||||
|
clean_text = re.sub(illegal_chars, delimiter, text)
|
||||||
|
clean_text = re.sub(r'\s+', delimiter, clean_text) # Replace spaces with delimiter
|
||||||
|
clean_text = re.sub(r'[^\w\-_.{}]'.format(re.escape(delimiter)), '', clean_text) # Keep only safe chars
|
||||||
|
|
||||||
|
return clean_text
|
||||||
|
|
||||||
|
def format_timestamp(self, dt, format_string, delimiter='_'):
|
||||||
|
try:
|
||||||
|
formatted = dt.strftime(format_string)
|
||||||
|
# Replace colons first
|
||||||
|
formatted = formatted.replace(':', '-')
|
||||||
|
# Then replace all whitespace with the user's delimiter
|
||||||
|
if delimiter:
|
||||||
|
formatted = re.sub(r'\s+', delimiter, formatted)
|
||||||
|
return formatted
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Invalid timestamp format: {e}")
|
||||||
|
return dt.strftime("%Y-%m-%d_%H-%M-%S")
|
||||||
|
|
||||||
|
def validate_and_process_path(self, path, delimiter="_"):
|
||||||
|
if not path or path.strip() == "":
|
||||||
|
return path
|
||||||
|
|
||||||
|
now = datetime.now()
|
||||||
|
|
||||||
|
# Normalize path separators
|
||||||
|
path = path.replace("/", os.sep).replace("\\", os.sep)
|
||||||
|
|
||||||
|
# Handle UNC or drive prefix
|
||||||
|
unc_prefix = ""
|
||||||
|
parts = path.split(os.sep)
|
||||||
|
|
||||||
|
if path.startswith("\\\\"): # UNC path
|
||||||
|
if len(parts) >= 4:
|
||||||
|
unc_prefix = os.sep.join(parts[:4]) # \\server\share
|
||||||
|
parts = parts[4:]
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Invalid UNC path: {path}")
|
||||||
|
elif re.match(r"^[A-Za-z]:$", parts[0]): # Drive letter
|
||||||
|
unc_prefix = parts[0]
|
||||||
|
parts = parts[1:]
|
||||||
|
|
||||||
|
# Process the remaining subfolders
|
||||||
|
processed_parts = []
|
||||||
|
for part in parts:
|
||||||
|
if not part:
|
||||||
|
continue
|
||||||
|
if "%" in part:
|
||||||
|
# Format date placeholders
|
||||||
|
formatted = self.format_timestamp(now, part, delimiter)
|
||||||
|
else:
|
||||||
|
# Sanitize folder names
|
||||||
|
formatted = re.sub(r'[<>:"/\\|?*]', delimiter, part)
|
||||||
|
processed_parts.append(formatted)
|
||||||
|
|
||||||
|
# Reconstruct full path
|
||||||
|
full_path = os.path.join(unc_prefix, *processed_parts)
|
||||||
|
return full_path
|
||||||
|
|
||||||
|
def ensure_filename_length(self, full_path, base_name, extension):
|
||||||
|
"""Ensure the full filename doesn't exceed OS limits"""
|
||||||
|
directory = os.path.dirname(full_path)
|
||||||
|
|
||||||
|
# Calculate available space for filename
|
||||||
|
dir_length = len(directory) + 1 # +1 for path separator
|
||||||
|
available_length = self.max_filename_length - len(extension)
|
||||||
|
max_base_length = available_length - dir_length
|
||||||
|
|
||||||
|
if len(base_name) > max_base_length:
|
||||||
|
# Truncate base name to fit
|
||||||
|
base_name = base_name[:max_base_length-3] + "..." # -3 for ellipsis
|
||||||
|
|
||||||
|
return os.path.join(directory, base_name + extension)
|
||||||
|
|
||||||
|
def get_unique_filename(self, file_path):
|
||||||
|
"""Generate unique filename by adding incremental numbers if file exists"""
|
||||||
|
if not os.path.exists(file_path):
|
||||||
|
return file_path
|
||||||
|
|
||||||
|
directory = os.path.dirname(file_path)
|
||||||
|
filename = os.path.basename(file_path)
|
||||||
|
name, ext = os.path.splitext(filename)
|
||||||
|
|
||||||
|
counter = 1
|
||||||
|
while True:
|
||||||
|
new_name = f"{name}_{counter:03d}{ext}"
|
||||||
|
new_path = os.path.join(directory, new_name)
|
||||||
|
|
||||||
|
# Check length constraints
|
||||||
|
if len(new_name) > self.max_filename_length:
|
||||||
|
# Truncate original name to make room for counter
|
||||||
|
available = self.max_filename_length - len(f"_{counter:03d}{ext}")
|
||||||
|
truncated_name = name[:available-3] + "..."
|
||||||
|
new_name = f"{truncated_name}_{counter:03d}{ext}"
|
||||||
|
new_path = os.path.join(directory, new_name)
|
||||||
|
|
||||||
|
if not os.path.exists(new_path):
|
||||||
|
return new_path
|
||||||
|
counter += 1
|
||||||
|
|
||||||
|
def save_json_metadata(self, json_path, prompt_text, negative_text,
|
||||||
|
batch_index, creation_time, prompt=None, extra_pnginfo=None):
|
||||||
|
"""Save JSON metadata file"""
|
||||||
|
metadata = {
|
||||||
|
"prompt": prompt_text,
|
||||||
|
"negative_prompt": negative_text,
|
||||||
|
"batch_index": batch_index,
|
||||||
|
"creation_time": creation_time,
|
||||||
|
"workflow_prompt": prompt,
|
||||||
|
"extra_pnginfo": extra_pnginfo
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(json_path, 'w', encoding='utf-8', newline='\n') as f:
|
||||||
|
json.dump(metadata, f, indent=2, default=self.encode_emoji, ensure_ascii=False)
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to save JSON metadata: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def generate_numbered_filename(self, filename_prefix, delimiter, counter,
|
||||||
|
filename_number_padding, filename_number_start,
|
||||||
|
enable_filename_numbering, date_str, clean_prompt, ext):
|
||||||
|
"""Generate filename with configurable number positioning and padding"""
|
||||||
|
# ITEM #3: Build filename parts in the correct order based on settings
|
||||||
|
filename_parts = []
|
||||||
|
|
||||||
|
# Always add timestamp first if provided
|
||||||
|
if date_str:
|
||||||
|
filename_parts.append(date_str)
|
||||||
|
|
||||||
|
# Add number after timestamp if number_start is True AND numbering is enabled
|
||||||
|
if enable_filename_numbering and filename_number_start:
|
||||||
|
counter_str = f"{counter:0{filename_number_padding}}"
|
||||||
|
filename_parts.append(counter_str)
|
||||||
|
|
||||||
|
# Add filename prefix if provided
|
||||||
|
if filename_prefix:
|
||||||
|
filename_parts.append(filename_prefix)
|
||||||
|
|
||||||
|
# Add cleaned prompt
|
||||||
|
filename_parts.append(clean_prompt)
|
||||||
|
|
||||||
|
# Add number at the end if number_start is False AND numbering is enabled
|
||||||
|
if enable_filename_numbering and not filename_number_start:
|
||||||
|
counter_str = f"{counter:0{filename_number_padding}}"
|
||||||
|
filename_parts.append(counter_str)
|
||||||
|
|
||||||
|
# Join all parts with delimiter
|
||||||
|
filename = delimiter.join(filename_parts) + ext
|
||||||
|
|
||||||
|
return filename
|
||||||
|
|
||||||
|
def save_batch_images(self, images, prompt_list, include_timestamp=True,
|
||||||
|
timestamp_format="%Y-%m-%d_%H-%M-%S", image_format="PNG",
|
||||||
|
jpeg_quality=95, delimiter="_",
|
||||||
|
prompt_words_limit=8, embed_workflow=True, save_json_metadata=False,
|
||||||
|
enable_filename_numbering=True, filename_number_padding=2,
|
||||||
|
filename_number_start=False, embed_png_metadata=True,
|
||||||
|
output_path="", filename_prefix="batch",
|
||||||
|
negative_prompt_list="", json_folder="", prompt=None, extra_pnginfo=None):
|
||||||
|
|
||||||
|
# Debug: Print tensor information
|
||||||
|
print(f"DEBUG: Images tensor shape: {images.shape}")
|
||||||
|
print(f"DEBUG: Images tensor type: {type(images)}")
|
||||||
|
|
||||||
|
# Process output path with date/time validation (always process regardless of timestamp toggle)
|
||||||
|
processed_output_path = self.validate_and_process_path(output_path, delimiter)
|
||||||
|
|
||||||
|
# Set output directory
|
||||||
|
if processed_output_path.strip() != "":
|
||||||
|
if not os.path.isabs(processed_output_path):
|
||||||
|
output_dir = os.path.join(self.output_dir, processed_output_path)
|
||||||
|
else:
|
||||||
|
output_dir = processed_output_path
|
||||||
|
else:
|
||||||
|
output_dir = self.output_dir
|
||||||
|
|
||||||
|
# Create directory if it doesn't exist
|
||||||
|
try:
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Could not create output directory {output_dir}: {e}")
|
||||||
|
|
||||||
|
# Set up JSON directory
|
||||||
|
if save_json_metadata:
|
||||||
|
if json_folder.strip():
|
||||||
|
processed_json_folder = self.validate_and_process_path(json_folder, delimiter)
|
||||||
|
if not os.path.isabs(processed_json_folder):
|
||||||
|
json_dir = os.path.join(self.output_dir, processed_json_folder)
|
||||||
|
else:
|
||||||
|
json_dir = processed_json_folder
|
||||||
|
else:
|
||||||
|
json_dir = output_dir
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.makedirs(json_dir, exist_ok=True)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Could not create JSON directory {json_dir}: {e}")
|
||||||
|
json_dir = output_dir
|
||||||
|
|
||||||
|
# Generate datetime string if timestamp is enabled
|
||||||
|
now = datetime.now()
|
||||||
|
if include_timestamp:
|
||||||
|
date_str = self.format_timestamp(now, timestamp_format, delimiter)
|
||||||
|
else:
|
||||||
|
date_str = None
|
||||||
|
|
||||||
|
# Parse individual prompts from the prompt list
|
||||||
|
individual_prompts = prompt_list.split('|')
|
||||||
|
individual_negatives = negative_prompt_list.split('|') if negative_prompt_list else []
|
||||||
|
|
||||||
|
# Set file extension
|
||||||
|
if image_format == "PNG":
|
||||||
|
ext = ".png"
|
||||||
|
elif image_format == "JPEG":
|
||||||
|
ext = ".jpg"
|
||||||
|
elif image_format == "WEBP":
|
||||||
|
ext = ".webp"
|
||||||
|
else:
|
||||||
|
ext = ".png"
|
||||||
|
|
||||||
|
saved_paths = []
|
||||||
|
|
||||||
|
# Handle different tensor formats
|
||||||
|
if isinstance(images, torch.Tensor):
|
||||||
|
# Convert to numpy for easier handling
|
||||||
|
images_np = images.cpu().numpy()
|
||||||
|
print(f"DEBUG: Converted to numpy shape: {images_np.shape}")
|
||||||
|
|
||||||
|
# Check if we have a batch dimension
|
||||||
|
if len(images_np.shape) == 4: # Batch format: [B, H, W, C] or [B, C, H, W]
|
||||||
|
batch_size = images_np.shape[0]
|
||||||
|
print(f"DEBUG: Found batch of {batch_size} images")
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
try:
|
||||||
|
# Extract single image from batch
|
||||||
|
img_array = images_np[i]
|
||||||
|
|
||||||
|
# Validate and process the image array
|
||||||
|
if len(img_array.shape) != 3:
|
||||||
|
raise ValueError(f"Expected 3D tensor for image {i+1}, got shape {img_array.shape}")
|
||||||
|
|
||||||
|
# Convert to 0-255 range if needed
|
||||||
|
if img_array.max() <= 1.0:
|
||||||
|
img_array = img_array * 255.0
|
||||||
|
|
||||||
|
img_array = np.clip(img_array, 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
# Handle different channel orders (HWC vs CHW)
|
||||||
|
if img_array.shape[0] == 3 or img_array.shape[0] == 4: # CHW format
|
||||||
|
img_array = np.transpose(img_array, (1, 2, 0)) # Convert to HWC
|
||||||
|
|
||||||
|
img = Image.fromarray(img_array)
|
||||||
|
|
||||||
|
# Get the corresponding prompt for this image
|
||||||
|
if i < len(individual_prompts):
|
||||||
|
prompt_text = individual_prompts[i].strip()
|
||||||
|
else:
|
||||||
|
# Cycle through prompts if we have more images than prompts
|
||||||
|
prompt_text = individual_prompts[i % len(individual_prompts)].strip()
|
||||||
|
print(f"Note: Cycling prompt for image {i+1} (using prompt {(i % len(individual_prompts)) + 1})")
|
||||||
|
|
||||||
|
# Get corresponding negative prompt
|
||||||
|
negative_text = ""
|
||||||
|
if individual_negatives:
|
||||||
|
if i < len(individual_negatives):
|
||||||
|
negative_text = individual_negatives[i].strip()
|
||||||
|
else:
|
||||||
|
negative_text = individual_negatives[i % len(individual_negatives)].strip()
|
||||||
|
|
||||||
|
# Clean the prompt for filename use
|
||||||
|
clean_prompt = self.clean_filename(prompt_text, prompt_words_limit, delimiter)
|
||||||
|
|
||||||
|
# Generate filename using the new method
|
||||||
|
filename = self.generate_numbered_filename(
|
||||||
|
filename_prefix, delimiter, i+1,
|
||||||
|
filename_number_padding, filename_number_start,
|
||||||
|
enable_filename_numbering, date_str, clean_prompt, ext
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create full file path and ensure length constraints
|
||||||
|
base_filename = os.path.splitext(filename)[0]
|
||||||
|
temp_path = os.path.join(output_dir, filename)
|
||||||
|
file_path = self.ensure_filename_length(temp_path, base_filename, ext)
|
||||||
|
|
||||||
|
# Ensure unique filename
|
||||||
|
file_path = self.get_unique_filename(file_path)
|
||||||
|
|
||||||
|
# Create JSON path if needed
|
||||||
|
if save_json_metadata:
|
||||||
|
json_base = os.path.splitext(os.path.basename(file_path))[0]
|
||||||
|
json_path = os.path.join(json_dir, json_base + ".json")
|
||||||
|
json_path = self.get_unique_filename(json_path)
|
||||||
|
|
||||||
|
# Save image based on format
|
||||||
|
if image_format == "PNG":
|
||||||
|
# ITEM #3: Conditional PNG metadata embedding
|
||||||
|
if embed_png_metadata:
|
||||||
|
# Prepare PNG metadata
|
||||||
|
metadata = PngImagePlugin.PngInfo()
|
||||||
|
metadata.add_text("prompt", prompt_text)
|
||||||
|
metadata.add_text("negative_prompt", negative_text)
|
||||||
|
metadata.add_text("batch_index", str(i+1))
|
||||||
|
metadata.add_text("creation_time", now.isoformat())
|
||||||
|
|
||||||
|
# Add workflow data if requested
|
||||||
|
if embed_workflow:
|
||||||
|
if prompt is not None:
|
||||||
|
metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji))
|
||||||
|
if extra_pnginfo is not None:
|
||||||
|
for key, value in extra_pnginfo.items():
|
||||||
|
metadata.add_text(key, json.dumps(value, default=self.encode_emoji))
|
||||||
|
|
||||||
|
img.save(file_path, format="PNG", optimize=True,
|
||||||
|
compress_level=self.compress_level, pnginfo=metadata)
|
||||||
|
else:
|
||||||
|
# ITEM #3: Save clean PNG without metadata
|
||||||
|
img.save(file_path, format="PNG", optimize=True,
|
||||||
|
compress_level=self.compress_level)
|
||||||
|
|
||||||
|
elif image_format == "JPEG":
|
||||||
|
# Convert RGBA to RGB for JPEG
|
||||||
|
if img.mode == 'RGBA':
|
||||||
|
background = Image.new('RGB', img.size, (255, 255, 255))
|
||||||
|
background.paste(img, mask=img.split()[-1])
|
||||||
|
img = background
|
||||||
|
img.save(file_path, format="JPEG", quality=jpeg_quality, optimize=True)
|
||||||
|
|
||||||
|
elif image_format == "WEBP":
|
||||||
|
img.save(file_path, format="WEBP", quality=jpeg_quality, method=6)
|
||||||
|
|
||||||
|
# Save JSON metadata if requested
|
||||||
|
if save_json_metadata:
|
||||||
|
self.save_json_metadata(json_path, prompt_text, negative_text,
|
||||||
|
i+1, now.isoformat(), prompt, extra_pnginfo)
|
||||||
|
|
||||||
|
saved_paths.append(file_path)
|
||||||
|
print(f"Saved: {os.path.basename(file_path)}")
|
||||||
|
print(f" Prompt: {prompt_text}")
|
||||||
|
if negative_text:
|
||||||
|
print(f" Negative: {negative_text}")
|
||||||
|
if save_json_metadata:
|
||||||
|
print(f" JSON: {os.path.basename(json_path)}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Failed to save image {i+1}: {e}"
|
||||||
|
print(error_msg)
|
||||||
|
# Continue with other images rather than failing completely
|
||||||
|
saved_paths.append(f"ERROR: {error_msg}")
|
||||||
|
|
||||||
|
elif len(images_np.shape) == 3: # Single image format: [H, W, C]
|
||||||
|
print("DEBUG: Single image detected, processing as batch of 1")
|
||||||
|
# Process as single image
|
||||||
|
img_array = images_np
|
||||||
|
|
||||||
|
# Convert to 0-255 range if needed
|
||||||
|
if img_array.max() <= 1.0:
|
||||||
|
img_array = img_array * 255.0
|
||||||
|
|
||||||
|
img_array = np.clip(img_array, 0, 255).astype(np.uint8)
|
||||||
|
img = Image.fromarray(img_array)
|
||||||
|
|
||||||
|
prompt_text = individual_prompts[0].strip() if individual_prompts else "no_prompt"
|
||||||
|
negative_text = individual_negatives[0].strip() if individual_negatives else ""
|
||||||
|
clean_prompt = self.clean_filename(prompt_text, prompt_words_limit, delimiter)
|
||||||
|
|
||||||
|
# Generate filename using the new method
|
||||||
|
filename = self.generate_numbered_filename(
|
||||||
|
filename_prefix, delimiter, 1,
|
||||||
|
filename_number_padding, filename_number_start,
|
||||||
|
enable_filename_numbering, date_str, clean_prompt, ext
|
||||||
|
)
|
||||||
|
|
||||||
|
base_filename = os.path.splitext(filename)[0]
|
||||||
|
temp_path = os.path.join(output_dir, filename)
|
||||||
|
file_path = self.ensure_filename_length(temp_path, base_filename, ext)
|
||||||
|
file_path = self.get_unique_filename(file_path)
|
||||||
|
|
||||||
|
if save_json_metadata:
|
||||||
|
json_base = os.path.splitext(os.path.basename(file_path))[0]
|
||||||
|
json_path = os.path.join(json_dir, json_base + ".json")
|
||||||
|
json_path = self.get_unique_filename(json_path)
|
||||||
|
|
||||||
|
if image_format == "PNG":
|
||||||
|
# ITEM #3: Conditional PNG metadata embedding
|
||||||
|
if embed_png_metadata:
|
||||||
|
metadata = PngImagePlugin.PngInfo()
|
||||||
|
metadata.add_text("prompt", prompt_text)
|
||||||
|
metadata.add_text("negative_prompt", negative_text)
|
||||||
|
metadata.add_text("batch_index", "1")
|
||||||
|
metadata.add_text("creation_time", now.isoformat())
|
||||||
|
|
||||||
|
if embed_workflow:
|
||||||
|
if prompt is not None:
|
||||||
|
metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji))
|
||||||
|
if extra_pnginfo is not None:
|
||||||
|
for key, value in extra_pnginfo.items():
|
||||||
|
metadata.add_text(key, json.dumps(value, default=self.encode_emoji))
|
||||||
|
|
||||||
|
img.save(file_path, format="PNG", optimize=True,
|
||||||
|
compress_level=self.compress_level, pnginfo=metadata)
|
||||||
|
else:
|
||||||
|
# ITEM #3: Save clean PNG without metadata
|
||||||
|
img.save(file_path, format="PNG", optimize=True,
|
||||||
|
compress_level=self.compress_level)
|
||||||
|
elif image_format == "JPEG":
|
||||||
|
if img.mode == 'RGBA':
|
||||||
|
background = Image.new('RGB', img.size, (255, 255, 255))
|
||||||
|
background.paste(img, mask=img.split()[-1])
|
||||||
|
img = background
|
||||||
|
img.save(file_path, format="JPEG", quality=jpeg_quality, optimize=True)
|
||||||
|
elif image_format == "WEBP":
|
||||||
|
img.save(file_path, format="WEBP", quality=jpeg_quality, method=6)
|
||||||
|
|
||||||
|
if save_json_metadata:
|
||||||
|
self.save_json_metadata(json_path, prompt_text, negative_text,
|
||||||
|
1, now.isoformat(), prompt, extra_pnginfo)
|
||||||
|
|
||||||
|
saved_paths.append(file_path)
|
||||||
|
print(f"Saved: {os.path.basename(file_path)}")
|
||||||
|
print(f" Prompt: {prompt_text}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unexpected image tensor shape: {images_np.shape}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Handle case where images might be a list
|
||||||
|
print(f"DEBUG: Images is not a tensor, type: {type(images)}")
|
||||||
|
for i, image in enumerate(images):
|
||||||
|
try:
|
||||||
|
if isinstance(image, torch.Tensor):
|
||||||
|
img_array = image.cpu().numpy()
|
||||||
|
else:
|
||||||
|
img_array = np.array(image)
|
||||||
|
|
||||||
|
# Process similar to above...
|
||||||
|
if img_array.max() <= 1.0:
|
||||||
|
img_array = img_array * 255.0
|
||||||
|
|
||||||
|
img_array = np.clip(img_array, 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
if len(img_array.shape) == 3 and (img_array.shape[0] == 3 or img_array.shape[0] == 4):
|
||||||
|
img_array = np.transpose(img_array, (1, 2, 0))
|
||||||
|
|
||||||
|
img = Image.fromarray(img_array)
|
||||||
|
|
||||||
|
prompt_text = individual_prompts[i % len(individual_prompts)].strip() if individual_prompts else "no_prompt"
|
||||||
|
negative_text = individual_negatives[i % len(individual_negatives)].strip() if individual_negatives else ""
|
||||||
|
clean_prompt = self.clean_filename(prompt_text, prompt_words_limit, delimiter)
|
||||||
|
|
||||||
|
# Generate filename using the new method
|
||||||
|
filename = self.generate_numbered_filename(
|
||||||
|
filename_prefix, delimiter, i+1,
|
||||||
|
filename_number_padding, filename_number_start,
|
||||||
|
enable_filename_numbering, date_str, clean_prompt, ext
|
||||||
|
)
|
||||||
|
|
||||||
|
base_filename = os.path.splitext(filename)[0]
|
||||||
|
temp_path = os.path.join(output_dir, filename)
|
||||||
|
file_path = self.ensure_filename_length(temp_path, base_filename, ext)
|
||||||
|
file_path = self.get_unique_filename(file_path)
|
||||||
|
|
||||||
|
if save_json_metadata:
|
||||||
|
json_base = os.path.splitext(os.path.basename(file_path))[0]
|
||||||
|
json_path = os.path.join(json_dir, json_base + ".json")
|
||||||
|
json_path = self.get_unique_filename(json_path)
|
||||||
|
|
||||||
|
# ITEM #3: Apply conditional PNG metadata for all image formats logic
|
||||||
|
if image_format == "PNG" and embed_png_metadata:
|
||||||
|
metadata = PngImagePlugin.PngInfo()
|
||||||
|
metadata.add_text("prompt", prompt_text)
|
||||||
|
metadata.add_text("negative_prompt", negative_text)
|
||||||
|
metadata.add_text("batch_index", str(i+1))
|
||||||
|
metadata.add_text("creation_time", now.isoformat())
|
||||||
|
|
||||||
|
if embed_workflow:
|
||||||
|
if prompt is not None:
|
||||||
|
metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji))
|
||||||
|
if extra_pnginfo is not None:
|
||||||
|
for key, value in extra_pnginfo.items():
|
||||||
|
metadata.add_text(key, json.dumps(value, default=self.encode_emoji))
|
||||||
|
|
||||||
|
img.save(file_path, format="PNG", optimize=True,
|
||||||
|
compress_level=self.compress_level, pnginfo=metadata)
|
||||||
|
else:
|
||||||
|
img.save(file_path, format=image_format.upper())
|
||||||
|
|
||||||
|
if save_json_metadata:
|
||||||
|
self.save_json_metadata(json_path, prompt_text, negative_text,
|
||||||
|
i+1, now.isoformat(), prompt, extra_pnginfo)
|
||||||
|
|
||||||
|
saved_paths.append(file_path)
|
||||||
|
print(f"Saved: {os.path.basename(file_path)}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Failed to save image {i+1}: {e}"
|
||||||
|
print(error_msg)
|
||||||
|
saved_paths.append(f"ERROR: {error_msg}")
|
||||||
|
|
||||||
|
# Return all saved paths joined with newlines
|
||||||
|
return ("\n".join(saved_paths),)
|
||||||
17
int_switches/__init__.py
Normal file
17
int_switches/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from .endless_int_switches import (
|
||||||
|
EndlessNode_FourInputIntSwitch,
|
||||||
|
EndlessNode_SixInputIntSwitch,
|
||||||
|
EndlessNode_EightInputIntSwitch,
|
||||||
|
)
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch": EndlessNode_FourInputIntSwitch,
|
||||||
|
"Six_Input_Int_Switch": EndlessNode_SixInputIntSwitch,
|
||||||
|
"Eight_Input_Int_Switch": EndlessNode_EightInputIntSwitch,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch": "Four Input Integer Switch",
|
||||||
|
"Six_Input_Int_Switch": "Six Input Integer Switch",
|
||||||
|
"Eight_Input_Int_Switch": "Eight Input Integer Switch",
|
||||||
|
}
|
||||||
116
int_switches/endless_int_switches.py
Normal file
116
int_switches/endless_int_switches.py
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
class EndlessNode_FourInputIntSwitch:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 4}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"int1": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int2": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int3": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int4": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
FUNCTION = "switch_int"
|
||||||
|
CATEGORY = "Endless 🌊✨/Integer Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_int(self, switch, int1=None, int2=None, int3=None, int4=None):
|
||||||
|
ints = [int1, int2, int3, int4]
|
||||||
|
|
||||||
|
# Check if the selected switch position has a connected input
|
||||||
|
if 1 <= switch <= 4:
|
||||||
|
selected_value = ints[switch - 1]
|
||||||
|
if selected_value is not None:
|
||||||
|
return (selected_value,)
|
||||||
|
|
||||||
|
# If no valid input is connected at the switch position, return 0
|
||||||
|
return (0,)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_SixInputIntSwitch:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 6}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"int1": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int2": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int3": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int4": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int5": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int6": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
FUNCTION = "switch_int"
|
||||||
|
CATEGORY = "Endless 🌊✨/Integer Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_int(self, switch, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None):
|
||||||
|
ints = [int1, int2, int3, int4, int5, int6]
|
||||||
|
|
||||||
|
# Check if the selected switch position has a connected input
|
||||||
|
if 1 <= switch <= 6:
|
||||||
|
selected_value = ints[switch - 1]
|
||||||
|
if selected_value is not None:
|
||||||
|
return (selected_value,)
|
||||||
|
|
||||||
|
# If no valid input is connected at the switch position, return 0
|
||||||
|
return (0,)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_EightInputIntSwitch:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 8}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"int1": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int2": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int3": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int4": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int5": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int6": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int7": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
"int8": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
FUNCTION = "switch_int"
|
||||||
|
CATEGORY = "Endless 🌊✨/Integer Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_int(self, switch, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None):
|
||||||
|
ints = [int1, int2, int3, int4, int5, int6, int7, int8]
|
||||||
|
|
||||||
|
# Check if the selected switch position has a connected input
|
||||||
|
if 1 <= switch <= 8:
|
||||||
|
selected_value = ints[switch - 1]
|
||||||
|
if selected_value is not None:
|
||||||
|
return (selected_value,)
|
||||||
|
|
||||||
|
# If no valid input is connected at the switch position, return 0
|
||||||
|
return (0,)
|
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch": EndlessNode_FourInputIntSwitch,
|
||||||
|
"Six_Input_Int_Switch": EndlessNode_SixInputIntSwitch,
|
||||||
|
"Eight_Input_Int_Switch": EndlessNode_EightInputIntSwitch,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch": "Four Input Integer Switch",
|
||||||
|
"Six_Input_Int_Switch": "Six Input Integer Switch",
|
||||||
|
"Eight_Input_Int_Switch": "Eight Input Integer Switch",
|
||||||
|
}
|
||||||
17
int_switches_widget/__init__.py
Normal file
17
int_switches_widget/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from .endless_int_switches_widget import (
|
||||||
|
EndlessNode_FourInputIntSwitch_Widget,
|
||||||
|
EndlessNode_SixInputIntSwitch_Widget,
|
||||||
|
EndlessNode_EightInputIntSwitch_Widget,
|
||||||
|
)
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch_Widget": EndlessNode_FourInputIntSwitch_Widget,
|
||||||
|
"Six_Input_Int_Switch_Widget": EndlessNode_SixInputIntSwitch_Widget,
|
||||||
|
"Eight_Input_Int_Switch_Widget": EndlessNode_EightInputIntSwitch_Widget,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch_Widget": "Four Input Integer Switch (Widget)",
|
||||||
|
"Six_Input_Int_Switch_Widget": "Six Input Integer Switch (Widget)",
|
||||||
|
"Eight_Input_Int_Switch_Widget": "Eight Input Integer Switch (Widget)",
|
||||||
|
}
|
||||||
BIN
int_switches_widget/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
int_switches_widget/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
98
int_switches_widget/endless_int_switches_widget.py
Normal file
98
int_switches_widget/endless_int_switches_widget.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
class EndlessNode_FourInputIntSwitch_Widget:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 4, "widget": "int"}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"int1": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int2": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int3": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int4": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
FUNCTION = "switch_int"
|
||||||
|
CATEGORY = "Endless 🌊✨/Integer Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_int(self, switch, int1, int2, int3, int4):
|
||||||
|
ints = [int1, int2, int3, int4]
|
||||||
|
if 1 <= switch <= 4:
|
||||||
|
return (ints[switch - 1],)
|
||||||
|
return (0,)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_SixInputIntSwitch_Widget:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 6, "widget": "int"}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"int1": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int2": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int3": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int4": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int5": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int6": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
FUNCTION = "switch_int"
|
||||||
|
CATEGORY = "Endless 🌊✨/Integer Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_int(self, switch, int1, int2, int3, int4, int5, int6):
|
||||||
|
ints = [int1, int2, int3, int4, int5, int6]
|
||||||
|
if 1 <= switch <= 6:
|
||||||
|
return (ints[switch - 1],)
|
||||||
|
return (0,)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_EightInputIntSwitch_Widget:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 8, "widget": "int"}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"int1": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int2": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int3": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int4": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int5": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int6": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int7": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
"int8": ("INT", {"default": 0, "widget": "int"}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
FUNCTION = "switch_int"
|
||||||
|
CATEGORY = "Endless 🌊✨/Integer Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_int(self, switch, int1, int2, int3, int4, int5, int6, int7, int8):
|
||||||
|
ints = [int1, int2, int3, int4, int5, int6, int7, int8]
|
||||||
|
if 1 <= switch <= 8:
|
||||||
|
return (ints[switch - 1],)
|
||||||
|
return (0,)
|
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch_Widget": EndlessNode_FourInputIntSwitch_Widget,
|
||||||
|
"Six_Input_Int_Switch_Widget": EndlessNode_SixInputIntSwitch_Widget,
|
||||||
|
"Eight_Input_Int_Switch_Widget": EndlessNode_EightInputIntSwitch_Widget,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Four_Input_Int_Switch_Widget": "Four Input Integer Switch (Widget)",
|
||||||
|
"Six_Input_Int_Switch_Widget": "Six Input Integer Switch (Widget)",
|
||||||
|
"Eight_Input_Int_Switch_Widget": "Eight Input Integer Switch (Widget)",
|
||||||
|
}
|
||||||
15
random_prompt_selectors/__init__.py
Normal file
15
random_prompt_selectors/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
from .endless_random_prompt_selectors import (
|
||||||
|
EndlessNode_RandomPromptSelector,
|
||||||
|
EndlessNode_RandomPromptMultiPicker,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Random_Prompt_Selector": EndlessNode_RandomPromptSelector,
|
||||||
|
"Random_Prompt_Multipicker": EndlessNode_RandomPromptMultiPicker,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Random_Prompt_Selector": "Random Prompt Selector",
|
||||||
|
"Random_Prompt_Multipicker": "Random Multiprompt Picker"
|
||||||
|
}
|
||||||
107
random_prompt_selectors/endless_random_prompt_selectors.py
Normal file
107
random_prompt_selectors/endless_random_prompt_selectors.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
|
class EndlessNode_RandomPromptSelector:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"prompts": ("STRING", {
|
||||||
|
"multiline": True,
|
||||||
|
"default": "Prompt A\nPrompt B\nPrompt C"
|
||||||
|
}),
|
||||||
|
"seed": ("INT", {
|
||||||
|
"default": 0,
|
||||||
|
"min": 0,
|
||||||
|
"max": 2**32 - 1
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
RETURN_NAMES = ("selected_prompt",)
|
||||||
|
FUNCTION = "pick_random_prompt"
|
||||||
|
CATEGORY = "Endless 🌊✨/Randomizers"
|
||||||
|
|
||||||
|
def pick_random_prompt(self, prompts, seed):
|
||||||
|
# Use the seed to ensure reproducible randomness
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
lines = [line.strip() for line in prompts.splitlines() if line.strip()]
|
||||||
|
if not lines:
|
||||||
|
return ("",)
|
||||||
|
return (random.choice(lines),)
|
||||||
|
|
||||||
|
class EndlessNode_RandomPromptMultiPicker:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"prompts": ("STRING", {
|
||||||
|
"multiline": True,
|
||||||
|
"default": "Line 1\nLine 2\nLine 3\nLine 4"
|
||||||
|
}),
|
||||||
|
"num_to_pick": ("INT", {
|
||||||
|
"default": 2,
|
||||||
|
"min": 1,
|
||||||
|
"max": 100,
|
||||||
|
}),
|
||||||
|
"allow_duplicates": ("BOOLEAN", {
|
||||||
|
"default": False
|
||||||
|
}),
|
||||||
|
"delimiter": ("STRING", {
|
||||||
|
"default": "\n"
|
||||||
|
}),
|
||||||
|
"seed": ("INT", {
|
||||||
|
"default": 0,
|
||||||
|
"min": 0,
|
||||||
|
"max": 2**32 - 1
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
RETURN_NAMES = ("selected_prompts",)
|
||||||
|
FUNCTION = "pick_multiple"
|
||||||
|
CATEGORY = "Endless 🌊✨/Randomizers"
|
||||||
|
|
||||||
|
def pick_multiple(self, prompts, num_to_pick, allow_duplicates, delimiter, seed):
|
||||||
|
# Use the seed to ensure reproducible randomness
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
lines = [line.strip() for line in prompts.splitlines() if line.strip()]
|
||||||
|
if not lines:
|
||||||
|
return ("",)
|
||||||
|
|
||||||
|
if allow_duplicates:
|
||||||
|
picks = random.choices(lines, k=num_to_pick)
|
||||||
|
else:
|
||||||
|
picks = random.sample(lines, k=min(num_to_pick, len(lines)))
|
||||||
|
|
||||||
|
return (delimiter.join(picks),)
|
||||||
|
|
||||||
|
# Optional: Auto-seed generator node
|
||||||
|
class EndlessNode_AutoSeed:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {},
|
||||||
|
"optional": {
|
||||||
|
"base_seed": ("INT", {
|
||||||
|
"default": 0,
|
||||||
|
"min": 0,
|
||||||
|
"max": 2**32 - 1
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT",)
|
||||||
|
RETURN_NAMES = ("seed",)
|
||||||
|
FUNCTION = "generate_seed"
|
||||||
|
CATEGORY = "Endless 🌊✨/Randomizers"
|
||||||
|
|
||||||
|
def generate_seed(self, base_seed=0):
|
||||||
|
# Generate a new seed based on current time and base seed
|
||||||
|
current_time = int(time.time() * 1000000) # microseconds for more variation
|
||||||
|
new_seed = (base_seed + current_time) % (2**32 - 1)
|
||||||
|
return (new_seed,)
|
||||||
17
randomizers/__init__.py
Normal file
17
randomizers/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from .endless_randomizers import (
|
||||||
|
EndlessNode_Mayhem,
|
||||||
|
EndlessNode_Chaos,
|
||||||
|
EndlessNode_Pandemonium,
|
||||||
|
)
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Randomzier_Mayhem": EndlessNode_Mayhem,
|
||||||
|
"Randomzier_Chaos": EndlessNode_Chaos,
|
||||||
|
# "Randomzier_Pandemonium": EndlessNode_Pandemonium,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Randomzier_Mayhem": "Mayhem Randomizer",
|
||||||
|
"Randomzier_Chaos": "Chaos Randomizer",
|
||||||
|
# "Randomzier_Pandemonium": "Pandemonium Randomizer",
|
||||||
|
}
|
||||||
173
randomizers/endless_randomizers.py
Normal file
173
randomizers/endless_randomizers.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
import random
|
||||||
|
|
||||||
|
# Safe samplers and schedulers for Flux (example set from your flux matrix)
|
||||||
|
SAFE_SAMPLERS = [
|
||||||
|
"DDIM", "Euler", "Euler a", "LMS", "Heun", "DPM2", "DPM2 a", "DPM++ 2S a", "DPM++ 2M", "DPM++ SDE"
|
||||||
|
]
|
||||||
|
SAFE_SCHEDULERS = [
|
||||||
|
"Default", "Scheduler A", "Scheduler B" # Replace with actual safe schedulers if known
|
||||||
|
]
|
||||||
|
|
||||||
|
class EndlessNode_Mayhem:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"steps_min": ("INT", {"default": 20, "min": 1, "max": 150}),
|
||||||
|
"steps_max": ("INT", {"default": 40, "min": 1, "max": 150}),
|
||||||
|
"cfg_min": ("FLOAT", {"default": 6.0, "min": 1.0, "max": 20.0}),
|
||||||
|
"cfg_max": ("FLOAT", {"default": 12.0, "min": 1.0, "max": 20.0}),
|
||||||
|
"height_min": ("INT", {"default": 512, "min": 64, "max": 4096}),
|
||||||
|
"height_max": ("INT", {"default": 768, "min": 256, "max": 4096}),
|
||||||
|
"width_min": ("INT", {"default": 512, "min": 64, "max": 4096}),
|
||||||
|
"width_max": ("INT", {"default": 768, "min": 256, "max": 4096}),
|
||||||
|
"seed_min": ("INT", {"default": 0, "min": 0, "max": 2**32 - 1}),
|
||||||
|
"seed_max": ("INT", {"default": 8675309, "min": 0, "max": 2**32 - 1}),
|
||||||
|
"seed": ("INT", {
|
||||||
|
"default": 0,
|
||||||
|
"min": 0,
|
||||||
|
"max": 2**32 - 1
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT", "FLOAT", "INT", "INT", "INT")
|
||||||
|
RETURN_NAMES = ("steps", "cfg_scale", "height", "width", "seed")
|
||||||
|
FUNCTION = "randomize"
|
||||||
|
CATEGORY = "Endless 🌊✨/Randomizers"
|
||||||
|
|
||||||
|
def randomize(self, steps_min, steps_max, cfg_min, cfg_max, height_min, height_max, width_min, width_max, seed_min, seed_max, seed):
|
||||||
|
# Use the seed to ensure reproducible randomness
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
# Ensure dimensions are divisible by 16 and at least 256
|
||||||
|
height_min = max(256, (height_min // 16) * 16)
|
||||||
|
height_max = max(256, (height_max // 16) * 16)
|
||||||
|
width_min = max(256, (width_min // 16) * 16)
|
||||||
|
width_max = max(256, (width_max // 16) * 16)
|
||||||
|
|
||||||
|
steps = random.randint(steps_min, steps_max)
|
||||||
|
cfg_scale = round(random.uniform(cfg_min, cfg_max), 2)
|
||||||
|
height = random.randint(height_min // 16, height_max // 16) * 16
|
||||||
|
width = random.randint(width_min // 16, width_max // 16) * 16
|
||||||
|
output_seed = random.randint(seed_min, seed_max)
|
||||||
|
return (steps, cfg_scale, height, width, output_seed)
|
||||||
|
|
||||||
|
class EndlessNode_Chaos:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"steps_min": ("INT", {"default": 20, "min": 1, "max": 150}),
|
||||||
|
"steps_max": ("INT", {"default": 40, "min": 1, "max": 150}),
|
||||||
|
"cfg_min": ("FLOAT", {"default": 6.0, "min": 1.0, "max": 20.0}),
|
||||||
|
"cfg_max": ("FLOAT", {"default": 12.0, "min": 1.0, "max": 20.0}),
|
||||||
|
"height_min": ("INT", {"default": 512, "min": 64, "max": 4096}),
|
||||||
|
"height_max": ("INT", {"default": 768, "min": 64, "max": 4096}),
|
||||||
|
"width_min": ("INT", {"default": 512, "min": 64, "max": 4096}),
|
||||||
|
"width_max": ("INT", {"default": 768, "min": 64, "max": 4096}),
|
||||||
|
"seed_min": ("INT", {"default": 0, "min": 0, "max": 2**32 - 1}),
|
||||||
|
"seed_max": ("INT", {"default": 8675309, "min": 0, "max": 2**32 - 1}),
|
||||||
|
"seed": ("INT", {
|
||||||
|
"default": 0,
|
||||||
|
"min": 0,
|
||||||
|
"max": 2**32 - 1
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT", "FLOAT", "INT", "INT", "INT")
|
||||||
|
RETURN_NAMES = ("steps", "cfg_scale", "height", "width", "seed")
|
||||||
|
FUNCTION = "randomize_with_flip"
|
||||||
|
CATEGORY = "Endless 🌊✨/Randomizers"
|
||||||
|
|
||||||
|
def randomize_with_flip(self, steps_min, steps_max, cfg_min, cfg_max, height_min, height_max, width_min, width_max, seed_min, seed_max, seed):
|
||||||
|
# Use the seed to ensure reproducible randomness
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
# Ensure dimensions are divisible by 16 and at least 256
|
||||||
|
height_min = max(256, (height_min // 16) * 16)
|
||||||
|
height_max = max(256, (height_max // 16) * 16)
|
||||||
|
width_min = max(256, (width_min // 16) * 16)
|
||||||
|
width_max = max(256, (width_max // 16) * 16)
|
||||||
|
|
||||||
|
steps = random.randint(steps_min, steps_max)
|
||||||
|
cfg_scale = round(random.uniform(cfg_min, cfg_max), 2)
|
||||||
|
|
||||||
|
# Randomly flip height and width with 50% chance
|
||||||
|
if random.random() < 0.5:
|
||||||
|
height = random.randint(height_min // 16, height_max // 16) * 16
|
||||||
|
width = random.randint(width_min // 16, width_max // 16) * 16
|
||||||
|
else:
|
||||||
|
width = random.randint(height_min // 16, height_max // 16) * 16
|
||||||
|
height = random.randint(width_min // 16, width_max // 16) * 16
|
||||||
|
|
||||||
|
output_seed = random.randint(seed_min, seed_max)
|
||||||
|
return (steps, cfg_scale, height, width, output_seed)
|
||||||
|
|
||||||
|
class EndlessNode_Pandemonium:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"steps_min": ("INT", {"default": 20, "min": 1, "max": 150}),
|
||||||
|
"steps_max": ("INT", {"default": 40, "min": 1, "max": 150}),
|
||||||
|
"cfg_min": ("FLOAT", {"default": 6.0, "min": 1.0, "max": 20.0}),
|
||||||
|
"cfg_max": ("FLOAT", {"default": 12.0, "min": 1.0, "max": 20.0}),
|
||||||
|
"height_min": ("INT", {"default": 512, "min": 64, "max": 4096}),
|
||||||
|
"height_max": ("INT", {"default": 768, "min": 64, "max": 4096}),
|
||||||
|
"width_min": ("INT", {"default": 512, "min": 64, "max": 4096}),
|
||||||
|
"width_max": ("INT", {"default": 768, "min": 64, "max": 4096}),
|
||||||
|
"seed_min": ("INT", {"default": 0, "min": 0, "max": 2**32-1}),
|
||||||
|
"seed_max": ("INT", {"default": 8675309, "min": 0, "max": 2**32 - 1}),
|
||||||
|
"samplers": ("STRING", {
|
||||||
|
"multiline": True,
|
||||||
|
"default": "euler\neuler_ancestral\nheun\nheunpp2\ndpm_2\ndpm_2_ancestral\nlms\ndpm_fast\ndpm_adaptive\ndpmpp_2s_ancestral\ndpmpp_sde\ndpmpp_sde_gpu\ndpmpp_2m\ndpmpp_2m_sde\ndpmpp_2m_sde_gpu\ndpmpp_3m_sde\ndpmpp_3m_sde_gpu\nddpm\nlcm\nddim\nuni_pc\nuni_pc_bh2"
|
||||||
|
}),
|
||||||
|
"schedulers": ("STRING", {
|
||||||
|
"multiline": True,
|
||||||
|
"default": "normal\nkarras\nexponential\nsgm_uniform\nsimple\nddim_uniform\nbeta"
|
||||||
|
}),
|
||||||
|
"seed": ("INT", {
|
||||||
|
"default": 0,
|
||||||
|
"min": 0,
|
||||||
|
"max": 2**32 - 1
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("INT", "FLOAT", "INT", "INT", "INT", "STRING", "STRING")
|
||||||
|
RETURN_NAMES = ("steps", "cfg_scale", "height", "width", "seed", "sampler", "scheduler")
|
||||||
|
FUNCTION = "randomize_all"
|
||||||
|
CATEGORY = "Endless 🌊✨/Randomizers"
|
||||||
|
|
||||||
|
def randomize_all(self, steps_min, steps_max, cfg_min, cfg_max, height_min, height_max, width_min, width_max, seed_min, seed_max, samplers, schedulers, seed):
|
||||||
|
# Use the seed to ensure reproducible randomness
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
# Ensure dimensions are divisible by 16 and at least 256
|
||||||
|
height_min = max(256, (height_min // 16) * 16)
|
||||||
|
height_max = max(256, (height_max // 16) * 16)
|
||||||
|
width_min = max(256, (width_min // 16) * 16)
|
||||||
|
width_max = max(256, (width_max // 16) * 16)
|
||||||
|
|
||||||
|
steps = random.randint(steps_min, steps_max)
|
||||||
|
cfg_scale = round(random.uniform(cfg_min, cfg_max), 2)
|
||||||
|
height = random.randint(height_min // 16, height_max // 16) * 16
|
||||||
|
width = random.randint(width_min // 16, width_max // 16) * 16
|
||||||
|
output_seed = random.randint(seed_min, seed_max)
|
||||||
|
|
||||||
|
# Parse samplers and schedulers from input strings
|
||||||
|
sampler_list = [s.strip() for s in samplers.splitlines() if s.strip()]
|
||||||
|
scheduler_list = [s.strip() for s in schedulers.splitlines() if s.strip()]
|
||||||
|
|
||||||
|
# Fallback to defaults if lists are empty
|
||||||
|
if not sampler_list:
|
||||||
|
sampler_list = SAFE_SAMPLERS
|
||||||
|
if not scheduler_list:
|
||||||
|
scheduler_list = SAFE_SCHEDULERS
|
||||||
|
|
||||||
|
sampler = random.choice(sampler_list)
|
||||||
|
scheduler = random.choice(scheduler_list)
|
||||||
|
|
||||||
|
return (steps, cfg_scale, height, width, output_seed, sampler, scheduler)
|
||||||
17
text_switches/__init__.py
Normal file
17
text_switches/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from .endless_text_switches import (
|
||||||
|
EndlessNode_FourInputTextSwitch,
|
||||||
|
EndlessNode_SixInputTextSwitch,
|
||||||
|
EndlessNode_EightInputTextSwitch,
|
||||||
|
)
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Four_Input_Text_Switch": EndlessNode_FourInputTextSwitch,
|
||||||
|
"Six_Input_Text_Switch": EndlessNode_SixInputTextSwitch,
|
||||||
|
"Eight_Input_Text_Switch": EndlessNode_EightInputTextSwitch,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Four_Input_Text_Switch": "Four Input Text Switch",
|
||||||
|
"Six_Input_Text_Switch": "Six Input Text Switch",
|
||||||
|
"Eight_Input_Text_Switch": "Eight Input Text Switch",
|
||||||
|
}
|
||||||
100
text_switches/endless_text_switches.py
Normal file
100
text_switches/endless_text_switches.py
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
# text_switches.py
|
||||||
|
|
||||||
|
class EndlessNode_FourInputTextSwitch:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 4, "widget": "int"}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"text1": ("STRING", {"default": ""}),
|
||||||
|
"text2": ("STRING", {"default": ""}),
|
||||||
|
"text3": ("STRING", {"default": ""}),
|
||||||
|
"text4": ("STRING", {"default": ""}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
FUNCTION = "switch_text"
|
||||||
|
CATEGORY = "Endless 🌊✨/Text Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_text(self, switch, text1, text2, text3, text4):
|
||||||
|
texts = [text1, text2, text3, text4]
|
||||||
|
if 1 <= switch <= 4:
|
||||||
|
return (texts[switch - 1],)
|
||||||
|
return ("",)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_SixInputTextSwitch:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 6, "widget": "int"}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"text1": ("STRING", {"default": ""}),
|
||||||
|
"text2": ("STRING", {"default": ""}),
|
||||||
|
"text3": ("STRING", {"default": ""}),
|
||||||
|
"text4": ("STRING", {"default": ""}),
|
||||||
|
"text5": ("STRING", {"default": ""}),
|
||||||
|
"text6": ("STRING", {"default": ""}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
FUNCTION = "switch_text"
|
||||||
|
CATEGORY = "Endless 🌊✨/Text Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_text(self, switch, text1, text2, text3, text4, text5, text6):
|
||||||
|
texts = [text1, text2, text3, text4, text5, text6]
|
||||||
|
if 1 <= switch <= 6:
|
||||||
|
return (texts[switch - 1],)
|
||||||
|
return ("",)
|
||||||
|
|
||||||
|
|
||||||
|
class EndlessNode_EightInputTextSwitch:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"switch": ("INT", {"default": 1, "min": 1, "max": 8, "widget": "int"}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"text1": ("STRING", {"default": ""}),
|
||||||
|
"text2": ("STRING", {"default": ""}),
|
||||||
|
"text3": ("STRING", {"default": ""}),
|
||||||
|
"text4": ("STRING", {"default": ""}),
|
||||||
|
"text5": ("STRING", {"default": ""}),
|
||||||
|
"text6": ("STRING", {"default": ""}),
|
||||||
|
"text7": ("STRING", {"default": ""}),
|
||||||
|
"text8": ("STRING", {"default": ""}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
FUNCTION = "switch_text"
|
||||||
|
CATEGORY = "Endless 🌊✨/Text Switches"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
def switch_text(self, switch, text1, text2, text3, text4, text5, text6, text7, text8):
|
||||||
|
texts = [text1, text2, text3, text4, text5, text6, text7, text8]
|
||||||
|
if 1 <= switch <= 8:
|
||||||
|
return (texts[switch - 1],)
|
||||||
|
return ("",)
|
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"Four_Input_Text_Switch": EndlessNode_FourInputTextSwitch,
|
||||||
|
"Six_Input_Text_Switch": EndlessNode_SixInputTextSwitch,
|
||||||
|
"Eight_Input_Text_Switch": EndlessNode_EightInputTextSwitch,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"Four_Input_Text_Switch": "Four Input Text Switch",
|
||||||
|
"Six_Input_Text_Switch": "Six Input Text Switch",
|
||||||
|
"Eight_Input_Text_Switch": "Eight Input Text Switch",
|
||||||
|
}
|
||||||
603
workflows/MultiPromptFlux.json
Normal file
603
workflows/MultiPromptFlux.json
Normal file
@@ -0,0 +1,603 @@
|
|||||||
|
{
|
||||||
|
"id": "64c85f2c-2f42-43db-a373-c30c74c02d1b",
|
||||||
|
"revision": 0,
|
||||||
|
"last_node_id": 18,
|
||||||
|
"last_link_id": 35,
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"type": "CLIPTextEncode",
|
||||||
|
"pos": [
|
||||||
|
-1400,
|
||||||
|
-130
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
400,
|
||||||
|
130
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 1,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "clip",
|
||||||
|
"type": "CLIP",
|
||||||
|
"link": 1
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "CONDITIONING",
|
||||||
|
"type": "CONDITIONING",
|
||||||
|
"links": [
|
||||||
|
34
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.39",
|
||||||
|
"Node name for S&R": "CLIPTextEncode"
|
||||||
|
},
|
||||||
|
"widgets_values": [
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"type": "PreviewImage",
|
||||||
|
"pos": [
|
||||||
|
240,
|
||||||
|
-610
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
1312,
|
||||||
|
1352
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 7,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "images",
|
||||||
|
"type": "IMAGE",
|
||||||
|
"link": 2
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.39",
|
||||||
|
"Node name for S&R": "PreviewImage"
|
||||||
|
},
|
||||||
|
"widgets_values": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"type": "CheckpointLoaderSimple",
|
||||||
|
"pos": [
|
||||||
|
-1800,
|
||||||
|
-600
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
270,
|
||||||
|
98
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 0,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "MODEL",
|
||||||
|
"type": "MODEL",
|
||||||
|
"links": [
|
||||||
|
32
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "CLIP",
|
||||||
|
"type": "CLIP",
|
||||||
|
"links": [
|
||||||
|
1,
|
||||||
|
22
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "VAE",
|
||||||
|
"type": "VAE",
|
||||||
|
"links": [
|
||||||
|
33
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.39",
|
||||||
|
"Node name for S&R": "CheckpointLoaderSimple"
|
||||||
|
},
|
||||||
|
"widgets_values": [
|
||||||
|
"Flux\\flux1-dev-fp8.large.safetensors"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 6,
|
||||||
|
"type": "EmptyLatentImage",
|
||||||
|
"pos": [
|
||||||
|
-870,
|
||||||
|
-250
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
210,
|
||||||
|
106
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 3,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "batch_size",
|
||||||
|
"type": "INT",
|
||||||
|
"widget": {
|
||||||
|
"name": "batch_size"
|
||||||
|
},
|
||||||
|
"link": 25
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "LATENT",
|
||||||
|
"type": "LATENT",
|
||||||
|
"links": [
|
||||||
|
15
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.39",
|
||||||
|
"Node name for S&R": "EmptyLatentImage"
|
||||||
|
},
|
||||||
|
"widgets_values": [
|
||||||
|
1024,
|
||||||
|
1024,
|
||||||
|
1
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 8,
|
||||||
|
"type": "VAEDecode",
|
||||||
|
"pos": [
|
||||||
|
-80,
|
||||||
|
-310
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
140,
|
||||||
|
46
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 6,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "samples",
|
||||||
|
"type": "LATENT",
|
||||||
|
"link": 6
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "vae",
|
||||||
|
"type": "VAE",
|
||||||
|
"link": 33
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "IMAGE",
|
||||||
|
"type": "IMAGE",
|
||||||
|
"links": [
|
||||||
|
2,
|
||||||
|
28
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.39",
|
||||||
|
"Node name for S&R": "VAEDecode"
|
||||||
|
},
|
||||||
|
"widgets_values": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 12,
|
||||||
|
"type": "KSampler",
|
||||||
|
"pos": [
|
||||||
|
-480,
|
||||||
|
-310
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
270,
|
||||||
|
262
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 5,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "model",
|
||||||
|
"type": "MODEL",
|
||||||
|
"link": 32
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "positive",
|
||||||
|
"type": "CONDITIONING",
|
||||||
|
"link": 30
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "negative",
|
||||||
|
"type": "CONDITIONING",
|
||||||
|
"link": 34
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "latent_image",
|
||||||
|
"type": "LATENT",
|
||||||
|
"link": 15
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "LATENT",
|
||||||
|
"type": "LATENT",
|
||||||
|
"links": [
|
||||||
|
6
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.39",
|
||||||
|
"Node name for S&R": "KSampler"
|
||||||
|
},
|
||||||
|
"widgets_values": [
|
||||||
|
8675312,
|
||||||
|
"increment",
|
||||||
|
25,
|
||||||
|
1,
|
||||||
|
"euler",
|
||||||
|
"beta",
|
||||||
|
1
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 15,
|
||||||
|
"type": "FluxBatchPrompts",
|
||||||
|
"pos": [
|
||||||
|
-1400,
|
||||||
|
-440
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
400,
|
||||||
|
200
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 2,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "clip",
|
||||||
|
"type": "CLIP",
|
||||||
|
"link": 22
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "CONDITIONING",
|
||||||
|
"type": "CONDITIONING",
|
||||||
|
"links": [
|
||||||
|
30
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "PROMPT_LIST",
|
||||||
|
"type": "STRING",
|
||||||
|
"links": [
|
||||||
|
31
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "PROMPT_COUNT",
|
||||||
|
"type": "INT",
|
||||||
|
"links": [
|
||||||
|
25,
|
||||||
|
35
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"Node name for S&R": "FluxBatchPrompts"
|
||||||
|
},
|
||||||
|
"widgets_values": [
|
||||||
|
"beautiful landscape\nmountain sunset\nocean waves\nfield of sunflowers",
|
||||||
|
3.5,
|
||||||
|
true,
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 16,
|
||||||
|
"type": "Image_saver",
|
||||||
|
"pos": [
|
||||||
|
250,
|
||||||
|
810
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
1310,
|
||||||
|
440
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 8,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "images",
|
||||||
|
"type": "IMAGE",
|
||||||
|
"link": 28
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "prompt_list",
|
||||||
|
"type": "STRING",
|
||||||
|
"link": 31
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "saved_paths",
|
||||||
|
"type": "STRING",
|
||||||
|
"links": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"Node name for S&R": "Image_saver"
|
||||||
|
},
|
||||||
|
"widgets_values": [
|
||||||
|
true,
|
||||||
|
"%Y-%m-%d_%H-%M-%S",
|
||||||
|
"PNG",
|
||||||
|
95,
|
||||||
|
"_",
|
||||||
|
8,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
2,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
"Batch",
|
||||||
|
"",
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 18,
|
||||||
|
"type": "PreviewAny",
|
||||||
|
"pos": [
|
||||||
|
-850,
|
||||||
|
-400
|
||||||
|
],
|
||||||
|
"size": [
|
||||||
|
140,
|
||||||
|
76
|
||||||
|
],
|
||||||
|
"flags": {},
|
||||||
|
"order": 4,
|
||||||
|
"mode": 0,
|
||||||
|
"inputs": [
|
||||||
|
{
|
||||||
|
"name": "source",
|
||||||
|
"type": "*",
|
||||||
|
"link": 35
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"outputs": [],
|
||||||
|
"properties": {
|
||||||
|
"cnr_id": "comfy-core",
|
||||||
|
"ver": "0.3.41",
|
||||||
|
"Node name for S&R": "PreviewAny"
|
||||||
|
},
|
||||||
|
"widgets_values": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"links": [
|
||||||
|
[
|
||||||
|
1,
|
||||||
|
5,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
0,
|
||||||
|
"CLIP"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
2,
|
||||||
|
8,
|
||||||
|
0,
|
||||||
|
3,
|
||||||
|
0,
|
||||||
|
"IMAGE"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
6,
|
||||||
|
12,
|
||||||
|
0,
|
||||||
|
8,
|
||||||
|
0,
|
||||||
|
"LATENT"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
15,
|
||||||
|
6,
|
||||||
|
0,
|
||||||
|
12,
|
||||||
|
3,
|
||||||
|
"LATENT"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
22,
|
||||||
|
5,
|
||||||
|
1,
|
||||||
|
15,
|
||||||
|
0,
|
||||||
|
"CLIP"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
25,
|
||||||
|
15,
|
||||||
|
2,
|
||||||
|
6,
|
||||||
|
0,
|
||||||
|
"INT"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
28,
|
||||||
|
8,
|
||||||
|
0,
|
||||||
|
16,
|
||||||
|
0,
|
||||||
|
"IMAGE"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
30,
|
||||||
|
15,
|
||||||
|
0,
|
||||||
|
12,
|
||||||
|
1,
|
||||||
|
"CONDITIONING"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
31,
|
||||||
|
15,
|
||||||
|
1,
|
||||||
|
16,
|
||||||
|
1,
|
||||||
|
"STRING"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
32,
|
||||||
|
5,
|
||||||
|
0,
|
||||||
|
12,
|
||||||
|
0,
|
||||||
|
"MODEL"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
33,
|
||||||
|
5,
|
||||||
|
2,
|
||||||
|
8,
|
||||||
|
1,
|
||||||
|
"VAE"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
34,
|
||||||
|
2,
|
||||||
|
0,
|
||||||
|
12,
|
||||||
|
2,
|
||||||
|
"CONDITIONING"
|
||||||
|
],
|
||||||
|
[
|
||||||
|
35,
|
||||||
|
15,
|
||||||
|
2,
|
||||||
|
18,
|
||||||
|
0,
|
||||||
|
"*"
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"groups": [],
|
||||||
|
"config": {},
|
||||||
|
"extra": {
|
||||||
|
"ds": {
|
||||||
|
"scale": 0.9090909090909091,
|
||||||
|
"offset": [
|
||||||
|
1888.0173001315407,
|
||||||
|
721.0654164895749
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"frontendVersion": "1.20.7",
|
||||||
|
"reroutes": [
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"pos": [
|
||||||
|
-790,
|
||||||
|
-490
|
||||||
|
],
|
||||||
|
"linkIds": [
|
||||||
|
30
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"pos": [
|
||||||
|
-920,
|
||||||
|
-30
|
||||||
|
],
|
||||||
|
"linkIds": [
|
||||||
|
31
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"pos": [
|
||||||
|
-580,
|
||||||
|
-590
|
||||||
|
],
|
||||||
|
"linkIds": [
|
||||||
|
32
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"pos": [
|
||||||
|
-130,
|
||||||
|
-550
|
||||||
|
],
|
||||||
|
"linkIds": [
|
||||||
|
33
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 6,
|
||||||
|
"pos": [
|
||||||
|
-790,
|
||||||
|
-120
|
||||||
|
],
|
||||||
|
"linkIds": [
|
||||||
|
34
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"linkExtensions": [
|
||||||
|
{
|
||||||
|
"id": 30,
|
||||||
|
"parentId": 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 31,
|
||||||
|
"parentId": 3
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 32,
|
||||||
|
"parentId": 4
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 33,
|
||||||
|
"parentId": 5
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 34,
|
||||||
|
"parentId": 6
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"version": 0.4
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user