From df0c9d20c214c1a3888804014b2d6d8e3868dff7 Mon Sep 17 00:00:00 2001 From: tusharbhutt Date: Sat, 21 Jun 2025 18:51:00 -0600 Subject: [PATCH] Add files via upload Uploading files for Endless Nodes V1.0 --- batchers/__init__.py | 32 + batchers/endless_batchers.py | 443 +++++++++++++ image_analysis/__init__.py | 14 + image_analysis/endless_image_analysis.py | 131 ++++ image_analysis/requirements.txt | 8 + image_saver/__init__.py | 8 + image_saver/endless_image_saver.py | 594 +++++++++++++++++ int_switches/__init__.py | 17 + int_switches/endless_int_switches.py | 116 ++++ int_switches_widget/__init__.py | 17 + .../__pycache__/__init__.cpython-311.pyc | Bin 0 -> 761 bytes ...ndless_int_switches_widget.cpython-311.pyc | Bin 0 -> 4116 bytes .../endless_int_switches_widget.py | 98 +++ random_prompt_selectors/__init__.py | 15 + .../endless_random_prompt_selectors.py | 107 ++++ randomizers/__init__.py | 17 + randomizers/endless_randomizers.py | 173 +++++ text_switches/__init__.py | 17 + text_switches/endless_text_switches.py | 100 +++ workflows/MultiPromptFlux.json | 603 ++++++++++++++++++ 20 files changed, 2510 insertions(+) create mode 100644 batchers/__init__.py create mode 100644 batchers/endless_batchers.py create mode 100644 image_analysis/__init__.py create mode 100644 image_analysis/endless_image_analysis.py create mode 100644 image_analysis/requirements.txt create mode 100644 image_saver/__init__.py create mode 100644 image_saver/endless_image_saver.py create mode 100644 int_switches/__init__.py create mode 100644 int_switches/endless_int_switches.py create mode 100644 int_switches_widget/__init__.py create mode 100644 int_switches_widget/__pycache__/__init__.cpython-311.pyc create mode 100644 int_switches_widget/__pycache__/endless_int_switches_widget.cpython-311.pyc create mode 100644 int_switches_widget/endless_int_switches_widget.py create mode 100644 random_prompt_selectors/__init__.py create mode 100644 random_prompt_selectors/endless_random_prompt_selectors.py create mode 100644 randomizers/__init__.py create mode 100644 randomizers/endless_randomizers.py create mode 100644 text_switches/__init__.py create mode 100644 text_switches/endless_text_switches.py create mode 100644 workflows/MultiPromptFlux.json diff --git a/batchers/__init__.py b/batchers/__init__.py new file mode 100644 index 0000000..89f9e67 --- /dev/null +++ b/batchers/__init__.py @@ -0,0 +1,32 @@ +""" +EndlessSeaofStars Custom Nodes for ComfyUI +Batch processing nodes with specialized support for FLUX and SDXL models +""" + +from .endless_batchers import ( + EndlessNode_SimpleBatchPrompts, + EndlessNode_FluxBatchPrompts, + EndlessNode_SDXLBatchPrompts, + EndlessNode_BatchNegativePrompts, + EndlessNode_PromptCounter, +) + +# Node class mappings for ComfyUI +NODE_CLASS_MAPPINGS = { + "SimpleBatchPrompts": EndlessNode_SimpleBatchPrompts, + "FluxBatchPrompts": EndlessNode_FluxBatchPrompts, + "SDXLBatchPrompts": EndlessNode_SDXLBatchPrompts, + "BatchNegativePrompts": EndlessNode_BatchNegativePrompts, + "PromptCounter": EndlessNode_PromptCounter, +} + +# Display names for ComfyUI interface +NODE_DISPLAY_NAME_MAPPINGS = { + "SimpleBatchPrompts": "Simple Batch Prompts", + "FluxBatchPrompts": "FLUX Batch Prompts", + "SDXLBatchPrompts": "SDXL Batch Prompts", + "BatchNegativePrompts": "Batch Negative Prompts", + "PromptCounter": "Prompt Counter", +} + + diff --git a/batchers/endless_batchers.py b/batchers/endless_batchers.py new file mode 100644 index 0000000..ca4c14d --- /dev/null +++ b/batchers/endless_batchers.py @@ -0,0 +1,443 @@ +import os +import json +import re +from datetime import datetime +from PIL import Image, PngImagePlugin +import numpy as np +import torch +import folder_paths +from PIL.PngImagePlugin import PngInfo +import platform + +class EndlessNode_SimpleBatchPrompts: + """ + Takes multiple prompts (one per line) and creates batched conditioning tensors + Automatically detects number of prompts and creates appropriate batch size + Handles batch size mismatches by cycling through prompts if needed + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "prompts": ("STRING", {"multiline": True, "default": "beautiful landscape\nmountain sunset\nocean waves\nfield of sunflowers"}), + "clip": ("CLIP", ), + "print_output": ("BOOLEAN", {"default": True}), + "max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), + }} + + RETURN_TYPES = ("CONDITIONING", "STRING", "INT") + RETURN_NAMES = ("CONDITIONING", "PROMPT_LIST", "PROMPT_COUNT") + FUNCTION = "batch_encode" + CATEGORY = "Endless 🌊✨/BatchProcessing" + + def batch_encode(self, prompts, clip, print_output, max_batch_size=0): + # Split prompts by lines and clean them + prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()] + prompt_count = len(prompt_lines) + + if not prompt_lines: + raise ValueError("No valid prompts found. Please enter at least one prompt.") + + # Handle batch size logic + if max_batch_size > 0 and max_batch_size < len(prompt_lines): + # Limit to max_batch_size + prompt_lines = prompt_lines[:max_batch_size] + if print_output: + print(f"Limited to first {max_batch_size} prompts due to max_batch_size setting") + elif max_batch_size > len(prompt_lines) and max_batch_size > 0: + # Cycle through prompts to fill batch + original_count = len(prompt_lines) + while len(prompt_lines) < max_batch_size: + prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))]) + if print_output: + print(f"Cycling through {original_count} prompts to fill batch size of {max_batch_size}") + + if print_output: + print(f"Processing {len(prompt_lines)} prompts in batch:") + for i, prompt in enumerate(prompt_lines): + print(f" {i+1}: {prompt}") + + # Encode each prompt separately with error handling + cond_tensors = [] + pooled_tensors = [] + + for i, prompt in enumerate(prompt_lines): + try: + tokens = clip.tokenize(prompt) + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + except Exception as e: + print(f"Error encoding prompt {i+1} '{prompt}': {e}") + # Use a fallback empty prompt + try: + tokens = clip.tokenize("") + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + print(f" Using empty fallback for prompt {i+1}") + except Exception as fallback_error: + raise ValueError(f"Failed to encode prompt {i+1} and fallback failed: {fallback_error}") + + # Batch the conditioning tensors properly + try: + # Stack the conditioning tensors along batch dimension + batched_cond = torch.cat(cond_tensors, dim=0) + batched_pooled = torch.cat(pooled_tensors, dim=0) + + if print_output: + print(f"Created batched conditioning: {batched_cond.shape}") + print(f"Created batched pooled: {batched_pooled.shape}") + + # Return as proper conditioning format + conditioning = [[batched_cond, {"pooled_output": batched_pooled}]] + + except Exception as e: + print(f"Error creating batched conditioning: {e}") + print("Falling back to list format...") + # Fallback to list format if batching fails + conditioning = [] + for i in range(len(cond_tensors)): + conditioning.append([cond_tensors[i], {"pooled_output": pooled_tensors[i]}]) + + # Create the prompt list string for filename use + prompt_list_str = "|".join(prompt_lines) # Join with | separator + + return (conditioning, prompt_list_str, prompt_count) + + +class EndlessNode_FluxBatchPrompts: + """ + Specialized batch prompt encoder for FLUX models + Handles FLUX-specific conditioning requirements including guidance and T5 text encoding + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "prompts": ("STRING", {"multiline": True, "default": "beautiful landscape\nmountain sunset\nocean waves\nfield of sunflowers"}), + "clip": ("CLIP", ), + "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}), + "print_output": ("BOOLEAN", {"default": True}), + "max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), + }} + + RETURN_TYPES = ("CONDITIONING", "STRING", "INT") + RETURN_NAMES = ("CONDITIONING", "PROMPT_LIST", "PROMPT_COUNT") + FUNCTION = "batch_encode_flux" + CATEGORY = "Endless 🌊✨/BatchProcessing" + + def batch_encode_flux(self, prompts, clip, guidance, print_output, max_batch_size=0): + # Split prompts by lines and clean them + prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()] + prompt_count = len(prompt_lines) + + if not prompt_lines: + raise ValueError("No valid prompts found. Please enter at least one prompt.") + + # Handle batch size logic + if max_batch_size > 0 and max_batch_size < len(prompt_lines): + prompt_lines = prompt_lines[:max_batch_size] + if print_output: + print(f"Limited to first {max_batch_size} prompts due to max_batch_size setting") + elif max_batch_size > len(prompt_lines) and max_batch_size > 0: + original_count = len(prompt_lines) + while len(prompt_lines) < max_batch_size: + prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))]) + if print_output: + print(f"Cycling through {original_count} prompts to fill batch size of {max_batch_size}") + + if print_output: + print(f"Processing {len(prompt_lines)} FLUX prompts in batch:") + for i, prompt in enumerate(prompt_lines): + print(f" {i+1}: {prompt}") + + # Encode each prompt with FLUX-specific conditioning + cond_tensors = [] + pooled_tensors = [] + + for i, prompt in enumerate(prompt_lines): + try: + tokens = clip.tokenize(prompt) + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + except Exception as e: + print(f"Error encoding FLUX prompt {i+1} '{prompt}': {e}") + # Use a fallback empty prompt + try: + tokens = clip.tokenize("") + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + print(f" Using empty fallback for FLUX prompt {i+1}") + except Exception as fallback_error: + raise ValueError(f"Failed to encode FLUX prompt {i+1} and fallback failed: {fallback_error}") + + # Batch the conditioning tensors properly for FLUX + try: + # Stack the conditioning tensors along batch dimension + batched_cond = torch.cat(cond_tensors, dim=0) + batched_pooled = torch.cat(pooled_tensors, dim=0) + + if print_output: + print(f"Created FLUX batched conditioning: {batched_cond.shape}") + print(f"Created FLUX batched pooled: {batched_pooled.shape}") + + # FLUX-specific conditioning with guidance + conditioning = [[batched_cond, { + "pooled_output": batched_pooled, + "guidance": guidance, + "guidance_scale": guidance # Some FLUX implementations use this key + }]] + + except Exception as e: + print(f"Error creating FLUX batched conditioning: {e}") + print("Falling back to list format...") + # Fallback to list format if batching fails + conditioning = [] + for i in range(len(cond_tensors)): + flux_conditioning = [cond_tensors[i], { + "pooled_output": pooled_tensors[i], + "guidance": guidance, + "guidance_scale": guidance + }] + conditioning.append(flux_conditioning) + + prompt_list_str = "|".join(prompt_lines) + return (conditioning, prompt_list_str, prompt_count) + + +class EndlessNode_SDXLBatchPrompts: + """ + Specialized batch prompt encoder for SDXL models + Handles dual text encoders and SDXL-specific conditioning requirements + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "prompts": ("STRING", {"multiline": True, "default": "beautiful landscape\nmountain sunset\nocean waves"}), + "clip": ("CLIP", ), + "print_output": ("BOOLEAN", {"default": True}), + "max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), + }} + + RETURN_TYPES = ("CONDITIONING", "STRING", "INT") + RETURN_NAMES = ("CONDITIONING", "PROMPT_LIST", "PROMPT_COUNT") + FUNCTION = "batch_encode_sdxl" + CATEGORY = "Endless 🌊✨/BatchProcessing" + + def batch_encode_sdxl(self, prompts, clip, print_output, max_batch_size=0): + # Split prompts by lines and clean them + prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()] + prompt_count = len(prompt_lines) + + if not prompt_lines: + raise ValueError("No valid prompts found. Please enter at least one prompt.") + + # Handle batch size logic + if max_batch_size > 0 and max_batch_size < len(prompt_lines): + prompt_lines = prompt_lines[:max_batch_size] + if print_output: + print(f"Limited to first {max_batch_size} prompts due to max_batch_size setting") + elif max_batch_size > len(prompt_lines) and max_batch_size > 0: + original_count = len(prompt_lines) + while len(prompt_lines) < max_batch_size: + prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))]) + if print_output: + print(f"Cycling through {original_count} prompts to fill batch size of {max_batch_size}") + + if print_output: + print(f"Processing {len(prompt_lines)} SDXL prompts in batch:") + for i, prompt in enumerate(prompt_lines): + print(f" {i+1}: {prompt}") + + # Encode each prompt with SDXL-specific conditioning + cond_tensors = [] + pooled_tensors = [] + + for i, prompt in enumerate(prompt_lines): + try: + tokens = clip.tokenize(prompt) + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + except Exception as e: + print(f"Error encoding SDXL prompt {i+1} '{prompt}': {e}") + # Use a fallback empty prompt + try: + tokens = clip.tokenize("") + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + print(f" Using empty fallback for SDXL prompt {i+1}") + except Exception as fallback_error: + raise ValueError(f"Failed to encode SDXL prompt {i+1} and fallback failed: {fallback_error}") + + # Batch the conditioning tensors properly for SDXL + try: + # Stack the conditioning tensors along batch dimension + batched_cond = torch.cat(cond_tensors, dim=0) + batched_pooled = torch.cat(pooled_tensors, dim=0) + + if print_output: + print(f"Created SDXL batched conditioning: {batched_cond.shape}") + print(f"Created SDXL batched pooled: {batched_pooled.shape}") + + # SDXL-specific conditioning - simplified without size parameters + conditioning = [[batched_cond, {"pooled_output": batched_pooled}]] + + except Exception as e: + print(f"Error creating SDXL batched conditioning: {e}") + print("Falling back to list format...") + # Fallback to list format if batching fails + conditioning = [] + for i in range(len(cond_tensors)): + sdxl_conditioning = [cond_tensors[i], {"pooled_output": pooled_tensors[i]}] + conditioning.append(sdxl_conditioning) + + prompt_list_str = "|".join(prompt_lines) + return (conditioning, prompt_list_str, prompt_count) + + +class EndlessNode_BatchNegativePrompts: + """ + Handles batch negative prompts - simplified version without unnecessary parameters + """ + @classmethod + def INPUT_TYPES(s): + return {"required": { + "negative_prompts": ("STRING", {"multiline": True, "default": "blurry, low quality\nartifacts, distorted\nnoise, bad anatomy"}), + "clip": ("CLIP", ), + "print_output": ("BOOLEAN", {"default": True}), + "max_batch_size": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), + }} + + RETURN_TYPES = ("CONDITIONING", "STRING") + RETURN_NAMES = ("NEGATIVE_CONDITIONING", "NEGATIVE_PROMPT_LIST") + FUNCTION = "batch_encode_negative" + CATEGORY = "Endless 🌊✨/BatchProcessing" + + def batch_encode_negative(self, negative_prompts, clip, print_output, max_batch_size=0): + # Split prompts by lines and clean them + prompt_lines = [line.strip() for line in negative_prompts.split('\n') if line.strip()] + + if not prompt_lines: + # Use empty negative prompt if none provided + prompt_lines = [""] + + # Handle batch size logic + if max_batch_size > 0 and max_batch_size < len(prompt_lines): + prompt_lines = prompt_lines[:max_batch_size] + if print_output: + print(f"Limited to first {max_batch_size} negative prompts due to max_batch_size setting") + elif max_batch_size > len(prompt_lines) and max_batch_size > 0: + original_count = len(prompt_lines) + while len(prompt_lines) < max_batch_size: + prompt_lines.extend(prompt_lines[:min(original_count, max_batch_size - len(prompt_lines))]) + if print_output: + print(f"Cycling through {original_count} negative prompts to fill batch size of {max_batch_size}") + + if print_output: + print(f"Processing {len(prompt_lines)} negative prompts in batch:") + for i, prompt in enumerate(prompt_lines): + print(f" {i+1}: {prompt if prompt else '(empty)'}") + + # Encode each negative prompt + cond_tensors = [] + pooled_tensors = [] + + for i, prompt in enumerate(prompt_lines): + try: + tokens = clip.tokenize(prompt) + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + except Exception as e: + print(f"Error encoding negative prompt {i+1} '{prompt}': {e}") + # Use fallback empty prompt + try: + tokens = clip.tokenize("") + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + cond_tensors.append(cond) + pooled_tensors.append(pooled) + print(f" Using empty fallback for negative prompt {i+1}") + except Exception as fallback_error: + raise ValueError(f"Failed to encode negative prompt {i+1} and fallback failed: {fallback_error}") + + # Batch the conditioning tensors - simplified without model-specific parameters + try: + # Stack the conditioning tensors along batch dimension + batched_cond = torch.cat(cond_tensors, dim=0) + batched_pooled = torch.cat(pooled_tensors, dim=0) + + if print_output: + print(f"Created negative batched conditioning: {batched_cond.shape}") + print(f"Created negative batched pooled: {batched_pooled.shape}") + + # Simple conditioning format that works with all model types + conditioning = [[batched_cond, {"pooled_output": batched_pooled}]] + + except Exception as e: + print(f"Error creating negative batched conditioning: {e}") + print("Falling back to list format...") + # Fallback to list format if batching fails + conditioning = [] + for i in range(len(cond_tensors)): + cond_item = [cond_tensors[i], {"pooled_output": pooled_tensors[i]}] + conditioning.append(cond_item) + + prompt_list_str = "|".join(prompt_lines) + return (conditioning, prompt_list_str) + + +class EndlessNode_PromptCounter: + """ + Utility node to count prompts from input text and display a preview. + The preview will be shown in the console output and returned as a string output. + """ + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "prompts": ("STRING", {"multiline": True, "forceInput": True}), + "print_to_console": ("BOOLEAN", {"default": True}), + } + } + + RETURN_TYPES = ("INT", "STRING") + RETURN_NAMES = ("count", "preview") + FUNCTION = "count_prompts" + CATEGORY = "Endless 🌊✨/BatchProcessing" + + def count_prompts(self, prompts, print_to_console): + prompt_lines = [line.strip() for line in prompts.split('\n') if line.strip()] + count = len(prompt_lines) + + preview = f"Found {count} prompt{'s' if count != 1 else ''}:\n" + for i, prompt in enumerate(prompt_lines[:5]): + preview += f"{i+1}. {prompt}\n" + if count > 5: + preview += f"... and {count - 5} more" + + if print_to_console: + print(f"\n=== Prompt Counter ===") + print(preview) + print("======================\n") + + return (count, preview) + +NODE_CLASS_MAPPINGS = { + "EndlessNode_SimpleBatchPrompts": EndlessNode_SimpleBatchPrompts, + "EndlessNode_FluxBatchPrompts": EndlessNode_FluxBatchPrompts, + "EndlessNode_SDXLBatchPrompts": EndlessNode_SDXLBatchPrompts, + "EndlessNode_BatchNegativePrompts": EndlessNode_BatchNegativePrompts, + "EndlessNode_PromptCounter": EndlessNode_PromptCounter, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "EndlessNode_SimpleBatchPrompts": "Simple Batch Prompts", + "EndlessNode_FluxBatchPrompts": "Flux Batch Prompts", + "EndlessNode_SDXLBatchPrompts": "SDXL Batch Prompts", + "EndlessNode_BatchNegativePrompts": "Batch Negative Prompts", + "EndlessNode_PromptCounter": "Prompt Counter", +} \ No newline at end of file diff --git a/image_analysis/__init__.py b/image_analysis/__init__.py new file mode 100644 index 0000000..46d7154 --- /dev/null +++ b/image_analysis/__init__.py @@ -0,0 +1,14 @@ +from .endless_image_analysis import ( + EndlessNode_ImageNoveltyScorer, + EndlessNode_ImageComplexityScorer, +) + +NODE_CLASS_MAPPINGS = { + "ImageNoveltyScorer": EndlessNode_ImageNoveltyScorer, + "ImageComplexityScorer": EndlessNode_ImageComplexityScorer, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ImageNoveltyScorer": "Novelty Score (CLIP)", + "ImageComplexityScorer": "Complexity Score (Edge Density)", +} \ No newline at end of file diff --git a/image_analysis/endless_image_analysis.py b/image_analysis/endless_image_analysis.py new file mode 100644 index 0000000..d9642c1 --- /dev/null +++ b/image_analysis/endless_image_analysis.py @@ -0,0 +1,131 @@ +import torch +import torchvision.transforms as transforms +import torchvision.models as models +import torch.nn.functional as F +import numpy as np +from PIL import Image, ImageFilter +import os +import hashlib + +CLIP_MODEL_NAME = "ViT-B/32" +CLIP_DOWNLOAD_PATH = os.path.join(os.path.expanduser("~"), ".cache", "clip") + +# Helper to download/load CLIP model from OpenAI +def load_clip_model(): + import clip # requires `clip` package from OpenAI + model, preprocess = clip.load(CLIP_MODEL_NAME, device="cpu", download_root=CLIP_DOWNLOAD_PATH) + return model.eval(), preprocess + +# Image Complexity via Edge Density +def compute_edge_density(image: Image.Image) -> float: + grayscale = image.convert("L") + edges = grayscale.filter(ImageFilter.FIND_EDGES) + edge_array = np.asarray(edges, dtype=np.uint8) + edge_density = np.mean(edge_array > 20) # percentage of edge pixels + return round(edge_density * 10, 3) # scale 0-10 + +# Image Novelty via distance from reference CLIP embeddings +class ClipImageEmbedder: + def __init__(self): + self.model, self.preprocess = load_clip_model() + + def get_embedding(self, image: Image.Image) -> torch.Tensor: + image_input = self.preprocess(image).unsqueeze(0) + with torch.no_grad(): + embedding = self.model.encode_image(image_input).float() + return F.normalize(embedding, dim=-1) + +# You could preload this from reference images +REFERENCE_EMBEDDINGS = [] + +class EndlessNode_ImageNoveltyScorer: + def __init__(self): + self.embedder = ClipImageEmbedder() + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "reference_images": ("IMAGE", {"default": None, "optional": True}), + } + } + + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("novelty_score",) + FUNCTION = "score_novelty" + CATEGORY = "Endless 🌊✨/Image Scoring" + + def score_novelty(self, image, reference_images=None): + img = self._to_pil(image) + img_emb = self.embedder.get_embedding(img) + + references = REFERENCE_EMBEDDINGS + if reference_images is not None: + references = [self.embedder.get_embedding(self._to_pil(ref)) for ref in reference_images] + + if not references: + return (0.0,) + + sims = [F.cosine_similarity(img_emb, ref_emb).item() for ref_emb in references] + avg_sim = sum(sims) / len(sims) + novelty = round((1.0 - avg_sim) * 10, 3) # higher = more novel + return (novelty,) + + def _to_pil(self, img): + if isinstance(img, torch.Tensor): + img = img.squeeze().detach().cpu().numpy() + if isinstance(img, np.ndarray): + if img.max() <= 1.0: + img = (img * 255).astype(np.uint8) + else: + img = img.astype(np.uint8) + if img.ndim == 3: + return Image.fromarray(img) + elif img.ndim == 2: + return Image.fromarray(img, mode='L') + elif isinstance(img, Image.Image): + return img + else: + raise ValueError(f"Unsupported image type: {type(img)}") + + +class EndlessNode_ImageComplexityScorer: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",) + } + } + + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("complexity_score",) + FUNCTION = "score_complexity" + CATEGORY = "Endless 🌊✨/Image Scoring" + + def score_complexity(self, image): + img = self._to_pil(image) + complexity = compute_edge_density(img) + return (complexity,) + + def _to_pil(self, img): + if isinstance(img, torch.Tensor): + img = img.squeeze().detach().cpu().numpy() + if isinstance(img, np.ndarray): + if img.max() <= 1.0: + img = (img * 255).astype(np.uint8) + else: + img = img.astype(np.uint8) + if img.ndim == 3: + return Image.fromarray(img) + elif img.ndim == 2: + return Image.fromarray(img, mode='L') + elif isinstance(img, Image.Image): + return img + else: + raise ValueError(f"Unsupported image type: {type(img)}") + diff --git a/image_analysis/requirements.txt b/image_analysis/requirements.txt new file mode 100644 index 0000000..34e99a0 --- /dev/null +++ b/image_analysis/requirements.txt @@ -0,0 +1,8 @@ +torch>=1.13.1 +torchvision>=0.14.1 +Pillow>=9.0.0 +numpy>=1.21.0 +ftfy +regex +tqdm +clip @ git+https://github.com/openai/CLIP.git \ No newline at end of file diff --git a/image_saver/__init__.py b/image_saver/__init__.py new file mode 100644 index 0000000..cce545d --- /dev/null +++ b/image_saver/__init__.py @@ -0,0 +1,8 @@ +from .endless_image_saver import EndlessNode_Imagesaver + +NODE_CLASS_MAPPINGS = { + "Image_saver": EndlessNode_Imagesaver, +} +NODE_DISPLAY_NAME_MAPPINGS = { + "Image_saver": "Image Saver", +} \ No newline at end of file diff --git a/image_saver/endless_image_saver.py b/image_saver/endless_image_saver.py new file mode 100644 index 0000000..94b0a81 --- /dev/null +++ b/image_saver/endless_image_saver.py @@ -0,0 +1,594 @@ +import os +import json +import re +from datetime import datetime +from PIL import Image, PngImagePlugin +import numpy as np +import torch +import folder_paths +from PIL.PngImagePlugin import PngInfo +import platform + +class EndlessNode_Imagesaver: + """ + Enhanced batch image saver with comprehensive metadata support + Saves batched images with individual prompt names in filenames + Automatically handles multiple images from batch processing + Enhanced with workflow embedding, JSON export, and robust filename handling + """ + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.compress_level = 4 + # OS-specific filename length limits + self.max_filename_length = self._get_max_filename_length() + + def _get_max_filename_length(self): + """Get maximum filename length based on OS""" + system = platform.system().lower() + if system == 'windows': + return 255 # NTFS limit + elif system in ['linux', 'darwin']: # Linux and macOS + return 255 # ext4/APFS limit + else: + return 200 # Conservative fallback + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "prompt_list": ("STRING", {"forceInput": True}), + "include_timestamp": ("BOOLEAN", {"default": True}), + "timestamp_format": ("STRING", {"default": "%Y-%m-%d_%H-%M-%S", "description": "Use Python strftime format.\nExample: %Y-%m-%d %H-%M-%S\nSee: strftime.org for full options."}), + "image_format": (["PNG", "JPEG", "WEBP"], {"default": "PNG"}), + "jpeg_quality": ("INT", {"default": 95, "min": 1, "max": 100, "step": 1}), + "delimiter": ("STRING", {"default": "_"}), + "prompt_words_limit": ("INT", {"default": 8, "min": 1, "max": 16, "step": 1}), + "embed_workflow": ("BOOLEAN", {"default": True}), + "save_json_metadata": ("BOOLEAN", {"default": False}), + # ITEM #2: Enable/disable number padding + "enable_filename_numbering": ("BOOLEAN", {"default": True}), + # ITEM #1: Filename Number Padding Control + "filename_number_padding": ("INT", {"default": 2, "min": 1, "max": 9, "step": 1}), + "filename_number_start": ("BOOLEAN", {"default": False}), + # ITEM #3: Conditional PNG Metadata Embedding + "embed_png_metadata": ("BOOLEAN", {"default": True}), + }, + "optional": + {"output_path": ("STRING", {"default": ""}), + "filename_prefix": ("STRING", {"default": "Batch"}), + "negative_prompt_list": ("STRING", {"default": ""}), + "json_folder": ("STRING", {"default": ""}), + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO" + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("saved_paths",) + FUNCTION = "save_batch_images" + OUTPUT_NODE = True + CATEGORY = "Endless 🌊✨/IO" + + def encode_emoji(self, obj): + """Properly encode emojis and special characters""" + if isinstance(obj, str): + return obj.encode('utf-8', 'surrogatepass').decode('utf-8') + return obj + + def clean_filename(self, text, max_words=8, delimiter="_"): + """Clean text for use in filenames with word limit and emoji support""" + # Limit to specified number of words + words = text.split()[:max_words] + text = ' '.join(words) + + # Handle emojis by encoding them properly + text = self.encode_emoji(text) + + # Replace illegal characters with delimiter, then clean up spaces + illegal_chars = r'[<>:"/\\|?*]' + clean_text = re.sub(illegal_chars, delimiter, text) + clean_text = re.sub(r'\s+', delimiter, clean_text) # Replace spaces with delimiter + clean_text = re.sub(r'[^\w\-_.{}]'.format(re.escape(delimiter)), '', clean_text) # Keep only safe chars + + return clean_text + + def format_timestamp(self, dt, format_string, delimiter='_'): + try: + formatted = dt.strftime(format_string) + # Replace colons first + formatted = formatted.replace(':', '-') + # Then replace all whitespace with the user's delimiter + if delimiter: + formatted = re.sub(r'\s+', delimiter, formatted) + return formatted + except Exception as e: + print(f"Invalid timestamp format: {e}") + return dt.strftime("%Y-%m-%d_%H-%M-%S") + + def validate_and_process_path(self, path, delimiter="_"): + if not path or path.strip() == "": + return path + + now = datetime.now() + + # Normalize path separators + path = path.replace("/", os.sep).replace("\\", os.sep) + + # Handle UNC or drive prefix + unc_prefix = "" + parts = path.split(os.sep) + + if path.startswith("\\\\"): # UNC path + if len(parts) >= 4: + unc_prefix = os.sep.join(parts[:4]) # \\server\share + parts = parts[4:] + else: + raise ValueError(f"Invalid UNC path: {path}") + elif re.match(r"^[A-Za-z]:$", parts[0]): # Drive letter + unc_prefix = parts[0] + parts = parts[1:] + + # Process the remaining subfolders + processed_parts = [] + for part in parts: + if not part: + continue + if "%" in part: + # Format date placeholders + formatted = self.format_timestamp(now, part, delimiter) + else: + # Sanitize folder names + formatted = re.sub(r'[<>:"/\\|?*]', delimiter, part) + processed_parts.append(formatted) + + # Reconstruct full path + full_path = os.path.join(unc_prefix, *processed_parts) + return full_path + + def ensure_filename_length(self, full_path, base_name, extension): + """Ensure the full filename doesn't exceed OS limits""" + directory = os.path.dirname(full_path) + + # Calculate available space for filename + dir_length = len(directory) + 1 # +1 for path separator + available_length = self.max_filename_length - len(extension) + max_base_length = available_length - dir_length + + if len(base_name) > max_base_length: + # Truncate base name to fit + base_name = base_name[:max_base_length-3] + "..." # -3 for ellipsis + + return os.path.join(directory, base_name + extension) + + def get_unique_filename(self, file_path): + """Generate unique filename by adding incremental numbers if file exists""" + if not os.path.exists(file_path): + return file_path + + directory = os.path.dirname(file_path) + filename = os.path.basename(file_path) + name, ext = os.path.splitext(filename) + + counter = 1 + while True: + new_name = f"{name}_{counter:03d}{ext}" + new_path = os.path.join(directory, new_name) + + # Check length constraints + if len(new_name) > self.max_filename_length: + # Truncate original name to make room for counter + available = self.max_filename_length - len(f"_{counter:03d}{ext}") + truncated_name = name[:available-3] + "..." + new_name = f"{truncated_name}_{counter:03d}{ext}" + new_path = os.path.join(directory, new_name) + + if not os.path.exists(new_path): + return new_path + counter += 1 + + def save_json_metadata(self, json_path, prompt_text, negative_text, + batch_index, creation_time, prompt=None, extra_pnginfo=None): + """Save JSON metadata file""" + metadata = { + "prompt": prompt_text, + "negative_prompt": negative_text, + "batch_index": batch_index, + "creation_time": creation_time, + "workflow_prompt": prompt, + "extra_pnginfo": extra_pnginfo + } + + try: + with open(json_path, 'w', encoding='utf-8', newline='\n') as f: + json.dump(metadata, f, indent=2, default=self.encode_emoji, ensure_ascii=False) + return True + except Exception as e: + print(f"Failed to save JSON metadata: {e}") + return False + + def generate_numbered_filename(self, filename_prefix, delimiter, counter, + filename_number_padding, filename_number_start, + enable_filename_numbering, date_str, clean_prompt, ext): + """Generate filename with configurable number positioning and padding""" + # ITEM #3: Build filename parts in the correct order based on settings + filename_parts = [] + + # Always add timestamp first if provided + if date_str: + filename_parts.append(date_str) + + # Add number after timestamp if number_start is True AND numbering is enabled + if enable_filename_numbering and filename_number_start: + counter_str = f"{counter:0{filename_number_padding}}" + filename_parts.append(counter_str) + + # Add filename prefix if provided + if filename_prefix: + filename_parts.append(filename_prefix) + + # Add cleaned prompt + filename_parts.append(clean_prompt) + + # Add number at the end if number_start is False AND numbering is enabled + if enable_filename_numbering and not filename_number_start: + counter_str = f"{counter:0{filename_number_padding}}" + filename_parts.append(counter_str) + + # Join all parts with delimiter + filename = delimiter.join(filename_parts) + ext + + return filename + + def save_batch_images(self, images, prompt_list, include_timestamp=True, + timestamp_format="%Y-%m-%d_%H-%M-%S", image_format="PNG", + jpeg_quality=95, delimiter="_", + prompt_words_limit=8, embed_workflow=True, save_json_metadata=False, + enable_filename_numbering=True, filename_number_padding=2, + filename_number_start=False, embed_png_metadata=True, + output_path="", filename_prefix="batch", + negative_prompt_list="", json_folder="", prompt=None, extra_pnginfo=None): + + # Debug: Print tensor information + print(f"DEBUG: Images tensor shape: {images.shape}") + print(f"DEBUG: Images tensor type: {type(images)}") + + # Process output path with date/time validation (always process regardless of timestamp toggle) + processed_output_path = self.validate_and_process_path(output_path, delimiter) + + # Set output directory + if processed_output_path.strip() != "": + if not os.path.isabs(processed_output_path): + output_dir = os.path.join(self.output_dir, processed_output_path) + else: + output_dir = processed_output_path + else: + output_dir = self.output_dir + + # Create directory if it doesn't exist + try: + os.makedirs(output_dir, exist_ok=True) + except Exception as e: + raise ValueError(f"Could not create output directory {output_dir}: {e}") + + # Set up JSON directory + if save_json_metadata: + if json_folder.strip(): + processed_json_folder = self.validate_and_process_path(json_folder, delimiter) + if not os.path.isabs(processed_json_folder): + json_dir = os.path.join(self.output_dir, processed_json_folder) + else: + json_dir = processed_json_folder + else: + json_dir = output_dir + + try: + os.makedirs(json_dir, exist_ok=True) + except Exception as e: + print(f"Warning: Could not create JSON directory {json_dir}: {e}") + json_dir = output_dir + + # Generate datetime string if timestamp is enabled + now = datetime.now() + if include_timestamp: + date_str = self.format_timestamp(now, timestamp_format, delimiter) + else: + date_str = None + + # Parse individual prompts from the prompt list + individual_prompts = prompt_list.split('|') + individual_negatives = negative_prompt_list.split('|') if negative_prompt_list else [] + + # Set file extension + if image_format == "PNG": + ext = ".png" + elif image_format == "JPEG": + ext = ".jpg" + elif image_format == "WEBP": + ext = ".webp" + else: + ext = ".png" + + saved_paths = [] + + # Handle different tensor formats + if isinstance(images, torch.Tensor): + # Convert to numpy for easier handling + images_np = images.cpu().numpy() + print(f"DEBUG: Converted to numpy shape: {images_np.shape}") + + # Check if we have a batch dimension + if len(images_np.shape) == 4: # Batch format: [B, H, W, C] or [B, C, H, W] + batch_size = images_np.shape[0] + print(f"DEBUG: Found batch of {batch_size} images") + + for i in range(batch_size): + try: + # Extract single image from batch + img_array = images_np[i] + + # Validate and process the image array + if len(img_array.shape) != 3: + raise ValueError(f"Expected 3D tensor for image {i+1}, got shape {img_array.shape}") + + # Convert to 0-255 range if needed + if img_array.max() <= 1.0: + img_array = img_array * 255.0 + + img_array = np.clip(img_array, 0, 255).astype(np.uint8) + + # Handle different channel orders (HWC vs CHW) + if img_array.shape[0] == 3 or img_array.shape[0] == 4: # CHW format + img_array = np.transpose(img_array, (1, 2, 0)) # Convert to HWC + + img = Image.fromarray(img_array) + + # Get the corresponding prompt for this image + if i < len(individual_prompts): + prompt_text = individual_prompts[i].strip() + else: + # Cycle through prompts if we have more images than prompts + prompt_text = individual_prompts[i % len(individual_prompts)].strip() + print(f"Note: Cycling prompt for image {i+1} (using prompt {(i % len(individual_prompts)) + 1})") + + # Get corresponding negative prompt + negative_text = "" + if individual_negatives: + if i < len(individual_negatives): + negative_text = individual_negatives[i].strip() + else: + negative_text = individual_negatives[i % len(individual_negatives)].strip() + + # Clean the prompt for filename use + clean_prompt = self.clean_filename(prompt_text, prompt_words_limit, delimiter) + + # Generate filename using the new method + filename = self.generate_numbered_filename( + filename_prefix, delimiter, i+1, + filename_number_padding, filename_number_start, + enable_filename_numbering, date_str, clean_prompt, ext + ) + + # Create full file path and ensure length constraints + base_filename = os.path.splitext(filename)[0] + temp_path = os.path.join(output_dir, filename) + file_path = self.ensure_filename_length(temp_path, base_filename, ext) + + # Ensure unique filename + file_path = self.get_unique_filename(file_path) + + # Create JSON path if needed + if save_json_metadata: + json_base = os.path.splitext(os.path.basename(file_path))[0] + json_path = os.path.join(json_dir, json_base + ".json") + json_path = self.get_unique_filename(json_path) + + # Save image based on format + if image_format == "PNG": + # ITEM #3: Conditional PNG metadata embedding + if embed_png_metadata: + # Prepare PNG metadata + metadata = PngImagePlugin.PngInfo() + metadata.add_text("prompt", prompt_text) + metadata.add_text("negative_prompt", negative_text) + metadata.add_text("batch_index", str(i+1)) + metadata.add_text("creation_time", now.isoformat()) + + # Add workflow data if requested + if embed_workflow: + if prompt is not None: + metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji)) + if extra_pnginfo is not None: + for key, value in extra_pnginfo.items(): + metadata.add_text(key, json.dumps(value, default=self.encode_emoji)) + + img.save(file_path, format="PNG", optimize=True, + compress_level=self.compress_level, pnginfo=metadata) + else: + # ITEM #3: Save clean PNG without metadata + img.save(file_path, format="PNG", optimize=True, + compress_level=self.compress_level) + + elif image_format == "JPEG": + # Convert RGBA to RGB for JPEG + if img.mode == 'RGBA': + background = Image.new('RGB', img.size, (255, 255, 255)) + background.paste(img, mask=img.split()[-1]) + img = background + img.save(file_path, format="JPEG", quality=jpeg_quality, optimize=True) + + elif image_format == "WEBP": + img.save(file_path, format="WEBP", quality=jpeg_quality, method=6) + + # Save JSON metadata if requested + if save_json_metadata: + self.save_json_metadata(json_path, prompt_text, negative_text, + i+1, now.isoformat(), prompt, extra_pnginfo) + + saved_paths.append(file_path) + print(f"Saved: {os.path.basename(file_path)}") + print(f" Prompt: {prompt_text}") + if negative_text: + print(f" Negative: {negative_text}") + if save_json_metadata: + print(f" JSON: {os.path.basename(json_path)}") + + except Exception as e: + error_msg = f"Failed to save image {i+1}: {e}" + print(error_msg) + # Continue with other images rather than failing completely + saved_paths.append(f"ERROR: {error_msg}") + + elif len(images_np.shape) == 3: # Single image format: [H, W, C] + print("DEBUG: Single image detected, processing as batch of 1") + # Process as single image + img_array = images_np + + # Convert to 0-255 range if needed + if img_array.max() <= 1.0: + img_array = img_array * 255.0 + + img_array = np.clip(img_array, 0, 255).astype(np.uint8) + img = Image.fromarray(img_array) + + prompt_text = individual_prompts[0].strip() if individual_prompts else "no_prompt" + negative_text = individual_negatives[0].strip() if individual_negatives else "" + clean_prompt = self.clean_filename(prompt_text, prompt_words_limit, delimiter) + + # Generate filename using the new method + filename = self.generate_numbered_filename( + filename_prefix, delimiter, 1, + filename_number_padding, filename_number_start, + enable_filename_numbering, date_str, clean_prompt, ext + ) + + base_filename = os.path.splitext(filename)[0] + temp_path = os.path.join(output_dir, filename) + file_path = self.ensure_filename_length(temp_path, base_filename, ext) + file_path = self.get_unique_filename(file_path) + + if save_json_metadata: + json_base = os.path.splitext(os.path.basename(file_path))[0] + json_path = os.path.join(json_dir, json_base + ".json") + json_path = self.get_unique_filename(json_path) + + if image_format == "PNG": + # ITEM #3: Conditional PNG metadata embedding + if embed_png_metadata: + metadata = PngImagePlugin.PngInfo() + metadata.add_text("prompt", prompt_text) + metadata.add_text("negative_prompt", negative_text) + metadata.add_text("batch_index", "1") + metadata.add_text("creation_time", now.isoformat()) + + if embed_workflow: + if prompt is not None: + metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji)) + if extra_pnginfo is not None: + for key, value in extra_pnginfo.items(): + metadata.add_text(key, json.dumps(value, default=self.encode_emoji)) + + img.save(file_path, format="PNG", optimize=True, + compress_level=self.compress_level, pnginfo=metadata) + else: + # ITEM #3: Save clean PNG without metadata + img.save(file_path, format="PNG", optimize=True, + compress_level=self.compress_level) + elif image_format == "JPEG": + if img.mode == 'RGBA': + background = Image.new('RGB', img.size, (255, 255, 255)) + background.paste(img, mask=img.split()[-1]) + img = background + img.save(file_path, format="JPEG", quality=jpeg_quality, optimize=True) + elif image_format == "WEBP": + img.save(file_path, format="WEBP", quality=jpeg_quality, method=6) + + if save_json_metadata: + self.save_json_metadata(json_path, prompt_text, negative_text, + 1, now.isoformat(), prompt, extra_pnginfo) + + saved_paths.append(file_path) + print(f"Saved: {os.path.basename(file_path)}") + print(f" Prompt: {prompt_text}") + + else: + raise ValueError(f"Unexpected image tensor shape: {images_np.shape}") + + else: + # Handle case where images might be a list + print(f"DEBUG: Images is not a tensor, type: {type(images)}") + for i, image in enumerate(images): + try: + if isinstance(image, torch.Tensor): + img_array = image.cpu().numpy() + else: + img_array = np.array(image) + + # Process similar to above... + if img_array.max() <= 1.0: + img_array = img_array * 255.0 + + img_array = np.clip(img_array, 0, 255).astype(np.uint8) + + if len(img_array.shape) == 3 and (img_array.shape[0] == 3 or img_array.shape[0] == 4): + img_array = np.transpose(img_array, (1, 2, 0)) + + img = Image.fromarray(img_array) + + prompt_text = individual_prompts[i % len(individual_prompts)].strip() if individual_prompts else "no_prompt" + negative_text = individual_negatives[i % len(individual_negatives)].strip() if individual_negatives else "" + clean_prompt = self.clean_filename(prompt_text, prompt_words_limit, delimiter) + + # Generate filename using the new method + filename = self.generate_numbered_filename( + filename_prefix, delimiter, i+1, + filename_number_padding, filename_number_start, + enable_filename_numbering, date_str, clean_prompt, ext + ) + + base_filename = os.path.splitext(filename)[0] + temp_path = os.path.join(output_dir, filename) + file_path = self.ensure_filename_length(temp_path, base_filename, ext) + file_path = self.get_unique_filename(file_path) + + if save_json_metadata: + json_base = os.path.splitext(os.path.basename(file_path))[0] + json_path = os.path.join(json_dir, json_base + ".json") + json_path = self.get_unique_filename(json_path) + + # ITEM #3: Apply conditional PNG metadata for all image formats logic + if image_format == "PNG" and embed_png_metadata: + metadata = PngImagePlugin.PngInfo() + metadata.add_text("prompt", prompt_text) + metadata.add_text("negative_prompt", negative_text) + metadata.add_text("batch_index", str(i+1)) + metadata.add_text("creation_time", now.isoformat()) + + if embed_workflow: + if prompt is not None: + metadata.add_text("workflow", json.dumps(prompt, default=self.encode_emoji)) + if extra_pnginfo is not None: + for key, value in extra_pnginfo.items(): + metadata.add_text(key, json.dumps(value, default=self.encode_emoji)) + + img.save(file_path, format="PNG", optimize=True, + compress_level=self.compress_level, pnginfo=metadata) + else: + img.save(file_path, format=image_format.upper()) + + if save_json_metadata: + self.save_json_metadata(json_path, prompt_text, negative_text, + i+1, now.isoformat(), prompt, extra_pnginfo) + + saved_paths.append(file_path) + print(f"Saved: {os.path.basename(file_path)}") + + except Exception as e: + error_msg = f"Failed to save image {i+1}: {e}" + print(error_msg) + saved_paths.append(f"ERROR: {error_msg}") + + # Return all saved paths joined with newlines + return ("\n".join(saved_paths),) \ No newline at end of file diff --git a/int_switches/__init__.py b/int_switches/__init__.py new file mode 100644 index 0000000..2ac9566 --- /dev/null +++ b/int_switches/__init__.py @@ -0,0 +1,17 @@ +from .endless_int_switches import ( + EndlessNode_FourInputIntSwitch, + EndlessNode_SixInputIntSwitch, + EndlessNode_EightInputIntSwitch, +) + +NODE_CLASS_MAPPINGS = { + "Four_Input_Int_Switch": EndlessNode_FourInputIntSwitch, + "Six_Input_Int_Switch": EndlessNode_SixInputIntSwitch, + "Eight_Input_Int_Switch": EndlessNode_EightInputIntSwitch, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Four_Input_Int_Switch": "Four Input Integer Switch", + "Six_Input_Int_Switch": "Six Input Integer Switch", + "Eight_Input_Int_Switch": "Eight Input Integer Switch", +} diff --git a/int_switches/endless_int_switches.py b/int_switches/endless_int_switches.py new file mode 100644 index 0000000..37b8086 --- /dev/null +++ b/int_switches/endless_int_switches.py @@ -0,0 +1,116 @@ +class EndlessNode_FourInputIntSwitch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "switch": ("INT", {"default": 1, "min": 1, "max": 4}), + }, + "optional": { + "int1": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int2": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int3": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int4": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + } + } + + RETURN_TYPES = ("INT",) + FUNCTION = "switch_int" + CATEGORY = "Endless 🌊✨/Integer Switches" + OUTPUT_NODE = True + + def switch_int(self, switch, int1=None, int2=None, int3=None, int4=None): + ints = [int1, int2, int3, int4] + + # Check if the selected switch position has a connected input + if 1 <= switch <= 4: + selected_value = ints[switch - 1] + if selected_value is not None: + return (selected_value,) + + # If no valid input is connected at the switch position, return 0 + return (0,) + + +class EndlessNode_SixInputIntSwitch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "switch": ("INT", {"default": 1, "min": 1, "max": 6}), + }, + "optional": { + "int1": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int2": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int3": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int4": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int5": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int6": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + } + } + + RETURN_TYPES = ("INT",) + FUNCTION = "switch_int" + CATEGORY = "Endless 🌊✨/Integer Switches" + OUTPUT_NODE = True + + def switch_int(self, switch, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None): + ints = [int1, int2, int3, int4, int5, int6] + + # Check if the selected switch position has a connected input + if 1 <= switch <= 6: + selected_value = ints[switch - 1] + if selected_value is not None: + return (selected_value,) + + # If no valid input is connected at the switch position, return 0 + return (0,) + + +class EndlessNode_EightInputIntSwitch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "switch": ("INT", {"default": 1, "min": 1, "max": 8}), + }, + "optional": { + "int1": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int2": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int3": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int4": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int5": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int6": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int7": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + "int8": ("INT", {"default": 0, "max": 999999999999, "forceInput": True}), + } + } + + RETURN_TYPES = ("INT",) + FUNCTION = "switch_int" + CATEGORY = "Endless 🌊✨/Integer Switches" + OUTPUT_NODE = True + + def switch_int(self, switch, int1=None, int2=None, int3=None, int4=None, int5=None, int6=None, int7=None, int8=None): + ints = [int1, int2, int3, int4, int5, int6, int7, int8] + + # Check if the selected switch position has a connected input + if 1 <= switch <= 8: + selected_value = ints[switch - 1] + if selected_value is not None: + return (selected_value,) + + # If no valid input is connected at the switch position, return 0 + return (0,) + + +NODE_CLASS_MAPPINGS = { + "Four_Input_Int_Switch": EndlessNode_FourInputIntSwitch, + "Six_Input_Int_Switch": EndlessNode_SixInputIntSwitch, + "Eight_Input_Int_Switch": EndlessNode_EightInputIntSwitch, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Four_Input_Int_Switch": "Four Input Integer Switch", + "Six_Input_Int_Switch": "Six Input Integer Switch", + "Eight_Input_Int_Switch": "Eight Input Integer Switch", +} \ No newline at end of file diff --git a/int_switches_widget/__init__.py b/int_switches_widget/__init__.py new file mode 100644 index 0000000..6c7f6e1 --- /dev/null +++ b/int_switches_widget/__init__.py @@ -0,0 +1,17 @@ +from .endless_int_switches_widget import ( + EndlessNode_FourInputIntSwitch_Widget, + EndlessNode_SixInputIntSwitch_Widget, + EndlessNode_EightInputIntSwitch_Widget, +) + +NODE_CLASS_MAPPINGS = { + "Four_Input_Int_Switch_Widget": EndlessNode_FourInputIntSwitch_Widget, + "Six_Input_Int_Switch_Widget": EndlessNode_SixInputIntSwitch_Widget, + "Eight_Input_Int_Switch_Widget": EndlessNode_EightInputIntSwitch_Widget, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "Four_Input_Int_Switch_Widget": "Four Input Integer Switch (Widget)", + "Six_Input_Int_Switch_Widget": "Six Input Integer Switch (Widget)", + "Eight_Input_Int_Switch_Widget": "Eight Input Integer Switch (Widget)", +} diff --git a/int_switches_widget/__pycache__/__init__.cpython-311.pyc b/int_switches_widget/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6ca6cd5e56e46bf9a916de93fe7b49357054906 GIT binary patch literal 761 zcmb7CO-sW-5Zz5u6`^|YAoxYKR_w)tM-d^kRiZXlf`}!sXm{H#hMi&)WE!RpG5R2#rw#+oi;cq%5E$-`y6$;YV^g75<6t zttHC2R2fjR77KDi_1xz0GR591=>L(DPhEabGK&+@A{+LPaJYp?d&UI0PP-PU(BmK( zRgeLQ=FF3Fg_MqU%OX|1Rx`{4D_Tf5%Z61u)-Q;uS1X^~BYJ}}G`K45*m|eq@W!R& zz9(!;US2t@%$v7fK6vt;jyk4&z97ssOfye=p)`tedw-73|5|bK&6Jn=TTuUq0vreo!cGz7D zh>Vm&4yZyZJ%vh<%84f8&|WHW?6F6ys#I%_?I|}$p%+elZ`Mwn4`Y`Sb?p7`?7W?? zH*bGC`$JosLLhzn#PMN|XfLsaoTBuJyEe4K9|6M&cYLab6QBw?tk* z*-N~OrMO7>xPuCDiHdP2b;M;VEs?m3I-ynB>JQ~;Rx^x9o@(mNd_fQA<_l&xXGZU5 z%+##d$%2NS;B6Yxk#WWAs&=Hka_!jf76-S1H;V2PH z*0pyd8QYbUR)zFX7V{B5jB%vX0?$_ zI$bcJATgYuOFx_nCsGB&%+IMgaGH^*dQe3d8VTr0wR_bJ)gD@vkOdaliEaS}2{G!oTmc^6}o);H#r0 z{|)>f`{dtVSKQL^LluG|$n8pn$S!3Qx&#O65}Yi>o-3%z`G$WS6YPEzb%tGoGWR^*hFA7Dm=$H{7Nt7#cDgFWf6$?RKqP3hi$8#j- zDN03-atrXx9!WO3_PcC=S`mjl8CWJ4g*;iWI^f-cf|VsWhO?OFV^ z>>UC*(zSTIB8raA3fU`mci2e^j;-|m=y#@mC8L=K&Ebc1!2xYjkoN+=MzD0PIIB?9 z9ncqh%vdz9`&W*-_mNxq$QS@qCFz^<{e{yP<-OJ$pSdE_OzCBY)Jh2gA_# z2IK(Q(~MOWuZ(O$5fo&T;!^C$IROK+gSQuQu@qvUme7f8AtR94Mh@V_IU_xin_@?9 z0Xt`*Y2A;)pySf}FyDwd`Z=@?p_~FSaD}Vz@%!uWbGDVB9o(kRAU1Xfq>etT;wmk9 zFZ^c$>8GLH-Dvm-%TO?;_Ctdk(EzfA1}yNs0~)ZC146?WFerit z0pn=|8204=lfi@zXTgB)#+`sE%0<_V{77jE8Knitpa<(9<8B!;SFqw$6owmovu)vK z2XJ<5NgF>3b;!95Z5ek#b|L4dkyo(sxp6SqxrkFixrDv%vm