This commit is contained in:
justumen
2025-05-28 17:55:44 +02:00
parent 2a502cf161
commit 412bc279a8
6 changed files with 404 additions and 60 deletions

View File

@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.5 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.6 🔗
A list of 168 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.

View File

@@ -127,8 +127,9 @@ from .style_selector import StyleSelector
from .split_image import SplitImageGrid, ReassembleImageGrid
from .API_openai import APIGenerateGPT4o
from .masks_nodes import LargestMaskOnly, BoundingRectangleMask
from .masks_nodes import LargestMaskOnly, BoundingRectangleMask, BoundingRectangleMaskBlur
from .openai_nodes import OpenAIVisionNode
# MultiOpenAIVisionNode
from .loop_random_seed import LoopRandomSeed
# from .video_text_generator import VideoTextGenerator
@@ -136,16 +137,25 @@ from .loop_random_seed import LoopRandomSeed
# from .remote_nodes import RemoteVAEDecoderNodeTiled, RemoteVAEDecoderNode, LoadFromBase64, SaveTensors, LoadTensor
# from .fix_face import FixFace, FaceSettings
from .image_cut_and_shift import HorizontalCutAndShift
from .load_image_from_path import LoadImageWithTransparencyFromPath
# from .kofi_nodes import CivitAILoraSelectorWanVideo, CivitAILoraSelectorHunyuan
# from .json_prompt_extractor import JSONImagePromptExtractor
#RemoteTextEncodingWithCLIPs
NODE_CLASS_MAPPINGS = {
# "Bjornulf_LoraSelectorHunyuan": CivitAILoraSelectorHunyuan,
# "Bjornulf_LoraSelectorWanVideo": CivitAILoraSelectorWanVideo,
# "Bjornulf_JSONImagePromptExtractor": JSONImagePromptExtractor,
"Bjornulf_MatchTextToInput": MatchTextToInput,
"Bjornulf_LargestMaskOnly": LargestMaskOnly,
"Bjornulf_BoundingRectangleMask": BoundingRectangleMask,
"Bjornulf_BoundingRectangleMaskBlur": BoundingRectangleMaskBlur,
"Bjornulf_OpenAIVisionNode": OpenAIVisionNode,
# "Bjornulf_MultiOpenAIVisionNode": MultiOpenAIVisionNode,
"Bjornulf_LoopRandomSeed": LoopRandomSeed,
"Bjornulf_HorizontalCutAndShift": HorizontalCutAndShift,
"Bjornulf_LoadImageWithTransparencyFromPath": LoadImageWithTransparencyFromPath,
# "Bjornulf_PurgeCLIPNode": PurgeCLIPNode,
# "Bjornulf_RemoteTextEncodingWithCLIPs": RemoteTextEncodingWithCLIPs,
@@ -327,11 +337,17 @@ NODE_CLASS_MAPPINGS = {
}
NODE_DISPLAY_NAME_MAPPINGS = {
#"Bjornulf_LoraSelectorHunyuan": "☕ Lora Selector Hunyuan",
#"Bjornulf_LoraSelectorWanVideo": "☕ Lora Selector WanVideo",
#"Bjornulf_JSONImagePromptExtractor": "JSONImagePromptExtractor",
"Bjornulf_MatchTextToInput": "🔛📝 Match 10 Text to Input",
"Bjornulf_LargestMaskOnly": "👺🔪 Largest Mask Only",
"Bjornulf_BoundingRectangleMask": "👺➜▢ Convert mask to rectangle",
"Bjornulf_BoundingRectangleMaskBlur": "👺➜▢ Convert mask to rectangle (with Blur)",
"Bjornulf_OpenAIVisionNode": "🔮 OpenAI Vision Node",
#"Bjornulf_MultiOpenAIVisionNode": "🔮 OpenAI Vision Node (⚠️ Multiple images accepted as input ⚠️)",
"Bjornulf_LoopRandomSeed": "♻🎲 Loop Random Seed",
"Bjornulf_LoadImageWithTransparencyFromPath": "📥🖼 Load Image with Transparency From Path",
# "Bjornulf_RemoteTextEncodingWithCLIPs": "[BETA] 🔮 Remote Text Encoding with CLIPs",
# "Bjornulf_ConditionalSwitch": "ConditionalSwitch",
# "Bjornulf_PurgeCLIPNode": "🧹📎 Purge CLIP",

View File

@@ -9,24 +9,24 @@ class CombineBackgroundOverlay:
"required": {
"background": ("IMAGE",),
"overlay": ("IMAGE",),
"mask": ("MASK",),
"horizontal_position": ("FLOAT", {"default": 50, "min": -50, "max": 150, "step": 0.1}),
"vertical_position": ("FLOAT", {"default": 50, "min": -50, "max": 150, "step": 0.1}),
},
"optional": {
"mask": ("MASK",),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "combine_background_overlay"
CATEGORY = "Bjornulf"
def combine_background_overlay(self, background, overlay, mask, horizontal_position, vertical_position):
def combine_background_overlay(self, background, overlay, horizontal_position, vertical_position, mask=None):
results = []
# Use the first background image for all overlays
# Process the first background image
bg = background[0].cpu().numpy()
bg = np.clip(bg * 255, 0, 255).astype(np.uint8)
# Check if background has alpha channel (4 channels)
if bg.shape[2] == 4:
bg_img = Image.fromarray(bg, 'RGBA')
bg_has_alpha = True
@@ -34,88 +34,74 @@ class CombineBackgroundOverlay:
bg_img = Image.fromarray(bg, 'RGB')
bg_has_alpha = False
# Process each overlay image with the same background
# Process each overlay
for i in range(overlay.shape[0]):
# Get overlay and corresponding mask
ov = overlay[i].cpu().numpy()
ov = np.clip(ov * 255, 0, 255).astype(np.uint8)
# Use corresponding mask or repeat last mask if fewer masks
mask_idx = min(i, mask.shape[0] - 1)
m = mask[mask_idx].cpu().numpy()
m = np.clip(m * 255, 0, 255).astype(np.uint8)
# Ensure overlay has correct shape (height, width, 3)
if len(ov.shape) == 2:
ov = np.stack([ov, ov, ov], axis=2)
elif ov.shape[2] != 3:
ov = ov[:, :, :3]
# Create PIL Image for overlay
ov_img = Image.fromarray(ov, 'RGB')
# Ensure mask has correct shape and create alpha channel
if len(m.shape) == 2:
alpha = Image.fromarray(m, 'L')
# Check if overlay has an alpha channel
if ov.shape[2] == 4:
ov_img = Image.fromarray(ov, 'RGBA')
else:
# If mask has multiple channels, use the first one
alpha = Image.fromarray(m[:, :, 0] if len(m.shape) > 2 else m, 'L')
ov_img = Image.fromarray(ov, 'RGB')
# Resize alpha to match overlay if needed
if alpha.size != ov_img.size:
alpha = alpha.resize(ov_img.size, Image.LANCZOS)
# Apply mask if provided
if mask is not None:
mask_idx = min(i, mask.shape[0] - 1)
m = mask[mask_idx].cpu().numpy()
m = np.clip(m * 255, 0, 255).astype(np.uint8)
mask_img = Image.fromarray(m, 'L')
# Combine RGB overlay with alpha mask
ov_img.putalpha(alpha)
# Resize mask to match overlay if needed
if mask_img.size != ov_img.size:
mask_img = mask_img.resize(ov_img.size, Image.LANCZOS)
# Calculate positions
if ov_img.mode == 'RGBA':
# Combine overlays alpha with mask
ov_alpha = np.array(ov_img.split()[3], dtype=np.float32) / 255.0
mask_alpha = np.array(mask_img, dtype=np.float32) / 255.0
effective_alpha = (ov_alpha * mask_alpha * 255).astype(np.uint8)
ov_img.putalpha(Image.fromarray(effective_alpha, 'L'))
else:
# Use mask as alpha for RGB overlay
ov_img.putalpha(mask_img)
else:
if ov_img.mode == 'RGB':
# Add fully opaque alpha for RGB overlay
ov_img.putalpha(Image.new('L', ov_img.size, 255))
# For RGBA, keep the existing alpha
# Calculate paste position
x = int((horizontal_position / 100) * bg_img.width - (horizontal_position / 100) * ov_img.width)
y = int((vertical_position / 100) * bg_img.height - (vertical_position / 100) * ov_img.height)
# Start with a fresh copy of the background for each overlay
# Prepare the result image
if bg_has_alpha:
result = bg_img.copy()
else:
# Convert to RGBA for compositing
result = Image.new('RGBA', bg_img.size, (0, 0, 0, 0))
result.paste(bg_img, (0, 0))
# Paste the overlay with alpha blending
# Paste overlay with alpha blending
if x + ov_img.width > 0 and y + ov_img.height > 0 and x < result.width and y < result.height:
# Create a temporary image for positioning
temp = Image.new('RGBA', result.size, (0, 0, 0, 0))
temp.paste(ov_img, (x, y), ov_img)
# Composite the overlay onto the result
result = Image.alpha_composite(result.convert('RGBA'), temp)
# Convert back to numpy array and then to torch tensor
# Convert result back to tensor
result_np = np.array(result)
# Determine output format based on background
if bg_has_alpha:
# Keep RGBA format if background had alpha
if result_np.shape[2] == 4:
result_tensor = torch.from_numpy(result_np).float() / 255.0
else:
# Add alpha channel if somehow lost
alpha_channel = np.ones((result_np.shape[0], result_np.shape[1], 1), dtype=np.uint8) * 255
result_np = np.concatenate([result_np, alpha_channel], axis=2)
result_tensor = torch.from_numpy(result_np).float() / 255.0
result_tensor = torch.from_numpy(result_np).float() / 255.0
else:
# Convert RGBA to RGB if background was RGB
# Convert RGBA to RGB, blending with white only where needed
if result_np.shape[2] == 4:
# Alpha blend with white background
alpha = result_np[:, :, 3:4] / 255.0
rgb = result_np[:, :, :3]
white_bg = np.ones_like(rgb) * 255
result_np = (rgb * alpha + white_bg * (1 - alpha)).astype(np.uint8)
result_tensor = torch.from_numpy(result_np).float() / 255.0
results.append(result_tensor)
# Stack all results into a single tensor
final_result = torch.stack(results)
return (final_result,)

66
load_image_from_path.py Normal file
View File

@@ -0,0 +1,66 @@
import torch
import numpy as np
from PIL import Image, ImageOps, ImageSequence
import node_helpers
class LoadImageWithTransparencyFromPath:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image_path": ("STRING", {"default": "", "multiline": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
RETURN_NAMES = ("image", "mask", "image_path")
FUNCTION = "load_image_alpha"
CATEGORY = "Bjornulf"
def load_image_alpha(self, image_path):
# Validate that image_path is not None or empty
if not image_path:
raise ValueError("image_path cannot be None or empty")
# Load the image using the provided path
img = node_helpers.pillow(Image.open, image_path)
output_images = []
output_masks = []
w, h = None, None
excluded_formats = ['MPO']
# Process each frame in the image sequence
for i in ImageSequence.Iterator(img):
i = node_helpers.pillow(ImageOps.exif_transpose, i)
if i.mode == 'I':
i = i.point(lambda i: i * (1 / 255))
image = i.convert("RGBA")
if len(output_images) == 0:
w = image.size[0]
h = image.size[1]
if image.size[0] != w or image.size[1] != h:
continue
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask) # Invert mask as per ComfyUI convention
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
output_images.append(image)
output_masks.append(mask.unsqueeze(0))
# Handle multi-frame images
if len(output_images) > 1 and img.format not in excluded_formats:
output_image = torch.cat(output_images, dim=0)
output_mask = torch.cat(output_masks, dim=0)
else:
output_image = output_images[0]
output_mask = output_masks[0]
return (output_image, output_mask, image_path)

View File

@@ -219,3 +219,279 @@ class BoundingRectangleMask:
raise ValueError("Mask must be 2D (H, W) or 3D (N, H, W)")
return (torch.from_numpy(result),)
class BoundingRectangleMaskBlur:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
"up": ("INT", {"default": 0, "min": -10000, "max": 10000}),
"down": ("INT", {"default": 0, "min": -10000, "max": 10000}),
"right": ("INT", {"default": 0, "min": -10000, "max": 10000}),
"left": ("INT", {"default": 0, "min": -10000, "max": 10000}),
"blur_up": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
"blur_down": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
"blur_left": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
"blur_right": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}),
"tapered_corners": ("BOOLEAN", {"default": True}),
}
}
RETURN_TYPES = ("MASK",)
FUNCTION = "process"
CATEGORY = "Bjornulf"
def _get_bounding_box(self, mask_np):
"""Extract bounding box coordinates from active mask pixels."""
active = mask_np > 0.5
if not np.any(active):
return None
rows_with_active = np.any(active, axis=1)
cols_with_active = np.any(active, axis=0)
min_row = np.where(rows_with_active)[0][0]
max_row = np.where(rows_with_active)[0][-1]
min_col = np.where(cols_with_active)[0][0]
max_col = np.where(cols_with_active)[0][-1]
return min_row, max_row, min_col, max_col
def _expand_bounding_box(self, bbox, up, down, left, right, shape):
"""Expand bounding box by specified amounts, clamped to image bounds."""
min_row, max_row, min_col, max_col = bbox
H, W = shape
min_row_adj = max(0, min_row - up)
max_row_adj = min(H - 1, max_row + down)
min_col_adj = max(0, min_col - left)
max_col_adj = min(W - 1, max_col + right)
# Check for invalid bounds
if min_row_adj > max_row_adj or min_col_adj > max_col_adj:
return None
return min_row_adj, max_row_adj, min_col_adj, max_col_adj
def _create_directional_blur_mask(self, mask, direction, blur_amount):
"""Create a mask blurred in one specific direction."""
if blur_amount <= 0:
return np.zeros_like(mask)
H, W = mask.shape
# Find the edge of the mask in the specified direction
mask_binary = mask > 0.5
if not np.any(mask_binary):
return np.zeros_like(mask)
# Create blur based on direction
if direction == 'up':
# Find top edge
top_rows = np.any(mask_binary, axis=1)
if not np.any(top_rows):
return np.zeros_like(mask)
top_edge = np.where(top_rows)[0][0]
# Create gradient going upward from top edge
result = np.zeros_like(mask)
for row in range(top_edge + 1):
distance = top_edge - row
strength = np.exp(-(distance ** 2) / (2 * blur_amount ** 2))
result[row, :] = strength * np.any(mask_binary[top_edge:, :], axis=0)
elif direction == 'down':
# Find bottom edge
top_rows = np.any(mask_binary, axis=1)
if not np.any(top_rows):
return np.zeros_like(mask)
bottom_edge = np.where(top_rows)[0][-1]
# Create gradient going downward from bottom edge
result = np.zeros_like(mask)
for row in range(bottom_edge, H):
distance = row - bottom_edge
strength = np.exp(-(distance ** 2) / (2 * blur_amount ** 2))
result[row, :] = strength * np.any(mask_binary[:bottom_edge+1, :], axis=0)
elif direction == 'left':
# Find left edge
left_cols = np.any(mask_binary, axis=0)
if not np.any(left_cols):
return np.zeros_like(mask)
left_edge = np.where(left_cols)[0][0]
# Create gradient going leftward from left edge
result = np.zeros_like(mask)
for col in range(left_edge + 1):
distance = left_edge - col
strength = np.exp(-(distance ** 2) / (2 * blur_amount ** 2))
result[:, col] = strength * np.any(mask_binary[:, left_edge:], axis=1)
elif direction == 'right':
# Find right edge
left_cols = np.any(mask_binary, axis=0)
if not np.any(left_cols):
return np.zeros_like(mask)
right_edge = np.where(left_cols)[0][-1]
# Create gradient going rightward from right edge
result = np.zeros_like(mask)
for col in range(right_edge, W):
distance = col - right_edge
strength = np.exp(-(distance ** 2) / (2 * blur_amount ** 2))
result[:, col] = strength * np.any(mask_binary[:, :right_edge+1], axis=1)
return result
def _create_corner_blend(self, mask, blur_up, blur_down, blur_left, blur_right):
"""Create smooth corner blending for diagonal blur combinations using individual blur values."""
H, W = mask.shape
result = np.zeros_like(mask)
# Find mask boundaries
mask_binary = mask > 0.5
if not np.any(mask_binary):
return result
rows_with_mask = np.any(mask_binary, axis=1)
cols_with_mask = np.any(mask_binary, axis=0)
if not np.any(rows_with_mask) or not np.any(cols_with_mask):
return result
top_edge = np.where(rows_with_mask)[0][0]
bottom_edge = np.where(rows_with_mask)[0][-1]
left_edge = np.where(cols_with_mask)[0][0]
right_edge = np.where(cols_with_mask)[0][-1]
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:H, 0:W]
# Top-left corner
if blur_up > 0 and blur_left > 0:
# Calculate separate distances and strengths for each direction
dist_from_top = np.maximum(0, top_edge - y_coords)
dist_from_left = np.maximum(0, left_edge - x_coords)
# Calculate strength based on individual blur values
strength_top = np.exp(-(dist_from_top**2) / (2 * blur_up**2))
strength_left = np.exp(-(dist_from_left**2) / (2 * blur_left**2))
# Combine strengths multiplicatively for smooth corner transition
strength = strength_top * strength_left
# Only apply in the top-left quadrant
corner_mask = (y_coords <= top_edge) & (x_coords <= left_edge)
result = np.maximum(result, strength * corner_mask)
# Top-right corner
if blur_up > 0 and blur_right > 0:
dist_from_top = np.maximum(0, top_edge - y_coords)
dist_from_right = np.maximum(0, x_coords - right_edge)
strength_top = np.exp(-(dist_from_top**2) / (2 * blur_up**2))
strength_right = np.exp(-(dist_from_right**2) / (2 * blur_right**2))
strength = strength_top * strength_right
corner_mask = (y_coords <= top_edge) & (x_coords >= right_edge)
result = np.maximum(result, strength * corner_mask)
# Bottom-left corner
if blur_down > 0 and blur_left > 0:
dist_from_bottom = np.maximum(0, y_coords - bottom_edge)
dist_from_left = np.maximum(0, left_edge - x_coords)
strength_bottom = np.exp(-(dist_from_bottom**2) / (2 * blur_down**2))
strength_left = np.exp(-(dist_from_left**2) / (2 * blur_left**2))
strength = strength_bottom * strength_left
corner_mask = (y_coords >= bottom_edge) & (x_coords <= left_edge)
result = np.maximum(result, strength * corner_mask)
# Bottom-right corner
if blur_down > 0 and blur_right > 0:
dist_from_bottom = np.maximum(0, y_coords - bottom_edge)
dist_from_right = np.maximum(0, x_coords - right_edge)
strength_bottom = np.exp(-(dist_from_bottom**2) / (2 * blur_down**2))
strength_right = np.exp(-(dist_from_right**2) / (2 * blur_right**2))
strength = strength_bottom * strength_right
corner_mask = (y_coords >= bottom_edge) & (x_coords >= right_edge)
result = np.maximum(result, strength * corner_mask)
return result
def _apply_directional_blur(self, mask, blur_up, blur_down, blur_left, blur_right, tapered_corners):
"""Apply independent directional blur with optional smooth corner blending."""
result = mask.copy()
# Create blur masks for each direction
blur_masks = []
if blur_up > 0:
blur_masks.append(self._create_directional_blur_mask(mask, 'up', blur_up))
if blur_down > 0:
blur_masks.append(self._create_directional_blur_mask(mask, 'down', blur_down))
if blur_left > 0:
blur_masks.append(self._create_directional_blur_mask(mask, 'left', blur_left))
if blur_right > 0:
blur_masks.append(self._create_directional_blur_mask(mask, 'right', blur_right))
# Combine all blur masks with the original
for blur_mask in blur_masks:
result = np.maximum(result, blur_mask)
# Add smooth corner blending only if tapered_corners is enabled
if tapered_corners:
corner_blend = self._create_corner_blend(mask, blur_up, blur_down, blur_left, blur_right)
result = np.maximum(result, corner_blend)
return result
def process_single(self, mask_np, up, down, right, left, blur_up, blur_down, blur_left, blur_right, tapered_corners):
"""Process a single mask with bounding box expansion and directional blur."""
# Get bounding box of active pixels
bbox = self._get_bounding_box(mask_np)
if bbox is None:
return np.zeros_like(mask_np, dtype=np.float32)
# Expand bounding box
expanded_bbox = self._expand_bounding_box(bbox, up, down, left, right, mask_np.shape)
if expanded_bbox is None:
return np.zeros_like(mask_np, dtype=np.float32)
# Create base rectangular mask
min_row_adj, max_row_adj, min_col_adj, max_col_adj = expanded_bbox
new_mask = np.zeros_like(mask_np, dtype=np.float32)
new_mask[min_row_adj:max_row_adj + 1, min_col_adj:max_col_adj + 1] = 1.0
# Apply directional blur
new_mask = self._apply_directional_blur(new_mask, blur_up, blur_down, blur_left, blur_right, tapered_corners)
return new_mask
def process(self, mask, up, down, right, left, blur_up, blur_down, blur_left, blur_right, tapered_corners):
"""Main processing function supporting both 2D and 3D masks."""
mask_np = mask.cpu().numpy()
if mask_np.ndim == 2:
result = self.process_single(mask_np, up, down, right, left, blur_up, blur_down, blur_left, blur_right, tapered_corners)
result = result[None, ...]
elif mask_np.ndim == 3:
results = []
for i in range(mask_np.shape[0]):
single_result = self.process_single(
mask_np[i], up, down, right, left, blur_up, blur_down, blur_left, blur_right, tapered_corners
)
results.append(single_result)
result = np.stack(results, axis=0)
else:
raise ValueError("Mask must be 2D (H, W) or 3D (N, H, W)")
return (torch.from_numpy(result),)

View File

@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "169 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "1.1.5"
version = "1.1.6"
license = {file = "LICENSE"}
[project.urls]