This commit is contained in:
justumen
2025-05-01 18:55:01 +02:00
parent e4ab8a25be
commit 0555362521
7 changed files with 276 additions and 6 deletions

View File

@@ -1,6 +1,6 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.80 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.0 🔗
A list of 163 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
A list of 167 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
⚠️ Warning : Very active development. Work in progress. 🏗

View File

@@ -51,7 +51,7 @@ from .loop_write_text import LoopWriteText
from .load_images_from_folder import LoadImagesFromSelectedFolder
from .select_image_from_list import SelectImageFromList
from .random_model_selector import RandomModelSelector
from .if_else import IfElse
from .if_else import IfElse, MatchTextToInput
from .image_details import ImageDetails
from .video_details import VideoDetails
from .combine_images import CombineImages
@@ -127,6 +127,10 @@ from .style_selector import StyleSelector
from .split_image import SplitImageGrid, ReassembleImageGrid
from .API_openai import APIGenerateGPT4o
from .masks_nodes import LargestMaskOnly
from .openai_nodes import OpenAIVisionNode
from .loop_random_seed import LoopRandomSeed
# from .video_text_generator import VideoTextGenerator
# from .run_workflow_from_api import ExecuteWorkflowNode, ApiDynamicTextInputs
# from .remote_nodes import RemoteVAEDecoderNodeTiled, RemoteVAEDecoderNode, LoadFromBase64, SaveTensors, LoadTensor
@@ -135,6 +139,10 @@ from .API_openai import APIGenerateGPT4o
#RemoteTextEncodingWithCLIPs
NODE_CLASS_MAPPINGS = {
"Bjornulf_MatchTextToInput": MatchTextToInput,
"Bjornulf_LargestMaskOnly": LargestMaskOnly,
"Bjornulf_OpenAIVisionNode": OpenAIVisionNode,
"Bjornulf_LoopRandomSeed": LoopRandomSeed,
# "Bjornulf_PurgeCLIPNode": PurgeCLIPNode,
# "Bjornulf_RemoteTextEncodingWithCLIPs": RemoteTextEncodingWithCLIPs,
@@ -316,6 +324,10 @@ NODE_CLASS_MAPPINGS = {
}
NODE_DISPLAY_NAME_MAPPINGS = {
"Bjornulf_MatchTextToInput": "🔛📝 Match 10 Text to Input",
"Bjornulf_LargestMaskOnly": "🖼🔪 Largest Mask Only",
"Bjornulf_OpenAIVisionNode": "🔮 OpenAI Vision Node",
"Bjornulf_LoopRandomSeed": "♻🎲 Loop Random Seed",
# "Bjornulf_RemoteTextEncodingWithCLIPs": "[BETA] 🔮 Remote Text Encoding with CLIPs",
# "Bjornulf_ConditionalSwitch": "ConditionalSwitch",
# "Bjornulf_PurgeCLIPNode": "🧹📎 Purge CLIP",

View File

@@ -104,4 +104,79 @@ class IfElse:
@classmethod
def IS_CHANGED(cls, input, send_if_true, compare_with, input_type, send_if_false=None):
return float("NaN")
return float("NaN")
import re
class MatchTextToInput:
@classmethod
def INPUT_TYPES(cls):
inputs = {
"required": {
"input_text": ("STRING", {"forceInput": True}),
},
"optional": {
"input_1": (Everything("*"), {"forceInput": True}),
"input_2": (Everything("*"), {"forceInput": True}),
"input_3": (Everything("*"), {"forceInput": True}),
"input_4": (Everything("*"), {"forceInput": True}),
"input_5": (Everything("*"), {"forceInput": True}),
"input_6": (Everything("*"), {"forceInput": True}),
"input_7": (Everything("*"), {"forceInput": True}),
"input_8": (Everything("*"), {"forceInput": True}),
"input_9": (Everything("*"), {"forceInput": True}),
"input_10": (Everything("*"), {"forceInput": True}),
"text_1": ("STRING", {"default": ""}),
"text_2": ("STRING", {"default": ""}),
"text_3": ("STRING", {"default": ""}),
"text_4": ("STRING", {"default": ""}),
"text_5": ("STRING", {"default": ""}),
"text_6": ("STRING", {"default": ""}),
"text_7": ("STRING", {"default": ""}),
"text_8": ("STRING", {"default": ""}),
"text_9": ("STRING", {"default": ""}),
"text_10": ("STRING", {"default": ""}),
"use_regex": ("BOOLEAN", {"default": True}),
}
}
return inputs
RETURN_TYPES = (Everything("*"),)
FUNCTION = "match_text"
CATEGORY = "text"
def match_text(self, input_text, input_1=None, input_2=None, input_3=None, input_4=None, input_5=None,
input_6=None, input_7=None, input_8=None, input_9=None, input_10=None,
text_1="", text_2="", text_3="", text_4="", text_5="",
text_6="", text_7="", text_8="", text_9="", text_10="",
use_regex=True):
# Collect inputs and texts in lists
inputs = [input_1, input_2, input_3, input_4, input_5, input_6, input_7, input_8, input_9, input_10]
texts = [text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10]
# Find matching text and return corresponding input
for i, text in enumerate(texts):
if text == "": # Skip empty patterns
continue
if use_regex:
# Convert wildcard pattern to regex pattern
# Replace * with .* for regex
pattern = text.replace("*", ".*")
# Ensure it's a full match by adding ^ and $
pattern = f"^{pattern}$"
try:
if re.match(pattern, input_text):
return (inputs[i],)
except re.error:
# If there's an error in the regex pattern, try exact match instead
if input_text == text:
return (inputs[i],)
else:
# Use exact matching
if input_text == text:
return (inputs[i],)
# If no match found, return input_1
return (input_1,)

20
loop_random_seed.py Normal file
View File

@@ -0,0 +1,20 @@
class LoopRandomSeed:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"num_seeds": ("INT", {"default": 10, "min": 1, "max": 1000, "step": 1}),
"generator_seed": ("INT", {"default": 0, "min": 0, "max": 1000000, "step": 1}),
}
}
RETURN_TYPES = ("INT",)
OUTPUT_IS_LIST = (True,)
FUNCTION = "create_random_seeds"
CATEGORY = "Bjornulf"
def create_random_seeds(self, num_seeds, generator_seed):
import random
rng = random.Random(generator_seed)
seeds = [rng.randint(0, 4294967295) for _ in range(num_seeds)]
return (seeds,)

52
masks_nodes.py Normal file
View File

@@ -0,0 +1,52 @@
import numpy as np
import scipy.ndimage as ndi
import torch
class LargestMaskOnly:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"mask": ("MASK",),
}
}
RETURN_TYPES = ("MASK",)
FUNCTION = "process"
CATEGORY = "mask"
def process(self, mask):
# Convert mask to numpy array
mask_np = mask.cpu().numpy()
# Print debug info about mask
print(f"Mask shape: {mask_np.shape}")
print(f"Mask dtype: {mask_np.dtype}")
print(f"Mask min: {mask_np.min()}, max: {mask_np.max()}")
# Ensure binary mask (0 and 1)
binary_mask = (mask_np > 0.5).astype(np.uint8)
# Use scipy's label function instead of OpenCV
labeled_array, num_features = ndi.label(binary_mask)
print(f"Found {num_features} connected components")
if num_features <= 1: # No components or just one
return (mask,)
# Find sizes of all labeled regions
sizes = np.bincount(labeled_array.ravel())
# Skip background (label 0)
if len(sizes) > 1:
sizes = sizes[1:]
# Find the label of the largest component (add 1 because we skipped background)
largest_label = np.argmax(sizes) + 1
# Create a mask with only the largest component
largest_mask = (labeled_array == largest_label).astype(np.float32)
else:
# Fallback if something went wrong with the labeling
largest_mask = binary_mask.astype(np.float32)
# Convert back to tensor and return
result = torch.from_numpy(largest_mask)
return (result,)

111
openai_nodes.py Normal file
View File

@@ -0,0 +1,111 @@
import os
import base64
import io
import json
import numpy as np
import torch
from PIL import Image
from openai import OpenAI
class OpenAIVisionNode:
"""
ComfyUI node for OpenAI's Vision API processing
"""
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image": ("IMAGE",),
"prompt": ("STRING", {"multiline": True, "default": "Output one line, exactly 7 lowercase fields, separated by semicolons, no spaces:\nsex;race;age;hair_length;body;eye_wear;head_wear\n\nField values:\nsex: male/female\nrace: pale/caucasian/hispanic/black/asian\nage: adult/child\nhair_length: none/long\nbody: average/skinny/fat/obese/muscular\neye_wear: none/glasses\nhead_wear: none/hat/cap\nExamples:\nfemale;black;unknown;long;fat;none;none\nmale;hispanic;unknown;none;muscular;glasses;cap\nmale;asian;unknown;none;kid;none;hat"}),
"model": (["GPT-4.1 ($2.00/$8.00 per 1M tokens)",
"GPT-4.1 mini ($0.40/$1.60 per 1M tokens)",
"GPT-4.1 nano ($0.10/$0.40 per 1M tokens)"],),
"api_key": ("STRING", {"default": "", "multiline": False})
}
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("analysis",)
FUNCTION = "analyze_image"
CATEGORY = "Bjornulf"
def analyze_image(self, image, prompt, model, api_key=""):
"""Process the image with OpenAI's Vision API"""
# Get API key from environment if not provided
if not api_key:
api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
return ("No OpenAI API key provided. Please enter your API key or set the OPENAI_API_KEY environment variable.",)
# Map selected model to actual model identifier
model_mapping = {
"GPT-4.1 ($2.00/$8.00 per 1M tokens)": "gpt-4.1",
"GPT-4.1 mini ($0.40/$1.60 per 1M tokens)": "gpt-4.1-mini",
"GPT-4.1 nano ($0.10/$0.40 per 1M tokens)": "gpt-4.1-nano"
}
model_id = model_mapping[model]
try:
# ComfyUI images are in BCHW format with float values [0,1]
# Extract the first image if we have a batch
if len(image.shape) == 4:
image_tensor = image[0] # Get the first image from the batch
else:
image_tensor = image
# Convert from BCHW/CHW to HWC format for PIL
if image_tensor.shape[0] in [1, 3, 4]: # If first dimension is channels
image_tensor = image_tensor.permute(1, 2, 0)
# Convert to numpy and scale to 0-255 range
np_image = image_tensor.cpu().numpy()
if np_image.max() <= 1.0:
np_image = (np_image * 255).astype(np.uint8)
# Handle different channel configurations
if np_image.shape[2] == 1: # Grayscale
np_image = np.repeat(np_image, 3, axis=2)
elif np_image.shape[2] == 4: # RGBA
np_image = np_image[:, :, :3] # Remove alpha channel
# Create PIL image from numpy array
pil_image = Image.fromarray(np_image)
# Encode the PIL image to base64
image_bytes = io.BytesIO()
pil_image.save(image_bytes, format='PNG')
image_bytes.seek(0)
base64_image = base64.b64encode(image_bytes.getvalue()).decode('utf-8')
# Create OpenAI client
client = OpenAI(api_key=api_key)
# Create completion with the Vision API
response = client.responses.create(
model=model_id,
input=[
{
"role": "user",
"content": [
{
"type": "input_text",
"text": prompt,
},
{
"type": "input_image",
"image_url": f"data:image/png;base64,{base64_image}",
},
],
}
]
)
analysis = response.output_text.strip()
return (analysis,)
except Exception as e:
import traceback
error_details = traceback.format_exc()
return (f"Error processing image: {str(e)}\n\nDetails: {error_details}",)

View File

@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "163 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "0.80"
description = "167 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "1.1.0"
license = {file = "LICENSE"}
[project.urls]