This commit is contained in:
justumen
2025-05-22 21:27:35 +02:00
parent fd66298414
commit 2a502cf161
3 changed files with 77 additions and 32 deletions

View File

@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.4 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.5 🔗
A list of 168 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.

View File

@@ -20,53 +20,98 @@ class CombineBackgroundOverlay:
CATEGORY = "Bjornulf"
def combine_background_overlay(self, background, overlay, mask, horizontal_position, vertical_position):
# Convert background from torch tensor to numpy array
bg = background[0].numpy()
bg = (bg * 255).astype(np.uint8)
bg_img = Image.fromarray(bg, 'RGB')
results = []
for ov, m in zip(overlay, mask):
# Convert overlay from torch tensor to numpy array
ov = ov.numpy()
ov = (ov * 255).astype(np.uint8)
# Use the first background image for all overlays
bg = background[0].cpu().numpy()
bg = np.clip(bg * 255, 0, 255).astype(np.uint8)
# Check if background has alpha channel (4 channels)
if bg.shape[2] == 4:
bg_img = Image.fromarray(bg, 'RGBA')
bg_has_alpha = True
else:
bg_img = Image.fromarray(bg, 'RGB')
bg_has_alpha = False
# Convert mask from torch tensor to numpy array
m = m.numpy()
m = (m * 255).astype(np.uint8)
# Process each overlay image with the same background
for i in range(overlay.shape[0]):
# Get overlay and corresponding mask
ov = overlay[i].cpu().numpy()
ov = np.clip(ov * 255, 0, 255).astype(np.uint8)
# Use corresponding mask or repeat last mask if fewer masks
mask_idx = min(i, mask.shape[0] - 1)
m = mask[mask_idx].cpu().numpy()
m = np.clip(m * 255, 0, 255).astype(np.uint8)
# Ensure overlay has correct shape (height, width, 3)
if len(ov.shape) == 2:
ov = np.stack([ov, ov, ov], axis=2)
elif ov.shape[2] != 3:
ov = ov[:, :, :3]
# Create PIL Image for overlay
ov_img = Image.fromarray(ov, 'RGB')
# Create alpha channel from mask
alpha = Image.fromarray(m, 'L')
# Ensure mask has correct shape and create alpha channel
if len(m.shape) == 2:
alpha = Image.fromarray(m, 'L')
else:
# If mask has multiple channels, use the first one
alpha = Image.fromarray(m[:, :, 0] if len(m.shape) > 2 else m, 'L')
# Resize alpha to match overlay if needed
if alpha.size != ov_img.size:
alpha = alpha.resize(ov_img.size, Image.LANCZOS)
# Combine RGB overlay with alpha mask
ov_img.putalpha(alpha)
# Calculate horizontal position
x = int((horizontal_position / 100) * (bg_img.width - ov_img.width))
# Calculate positions
x = int((horizontal_position / 100) * bg_img.width - (horizontal_position / 100) * ov_img.width)
y = int((vertical_position / 100) * bg_img.height - (vertical_position / 100) * ov_img.height)
# Calculate vertical position
y = int((vertical_position / 100) * (bg_img.height - ov_img.height))
# Start with a fresh copy of the background for each overlay
if bg_has_alpha:
result = bg_img.copy()
else:
# Convert to RGBA for compositing
result = Image.new('RGBA', bg_img.size, (0, 0, 0, 0))
result.paste(bg_img, (0, 0))
# Create a new image for this overlay
result = Image.new('RGBA', bg_img.size, (0, 0, 0, 0))
# Paste the background
result.paste(bg_img, (0, 0))
# Paste the overlay in the calculated position
result.paste(ov_img, (x, y), ov_img)
# Paste the overlay with alpha blending
if x + ov_img.width > 0 and y + ov_img.height > 0 and x < result.width and y < result.height:
# Create a temporary image for positioning
temp = Image.new('RGBA', result.size, (0, 0, 0, 0))
temp.paste(ov_img, (x, y), ov_img)
# Composite the overlay onto the result
result = Image.alpha_composite(result.convert('RGBA'), temp)
# Convert back to numpy array and then to torch tensor
result_np = np.array(result)
# Convert RGBA to RGB
result_np = result_np[:,:,:3]
result_tensor = torch.from_numpy(result_np).float() / 255.0
# Determine output format based on background
if bg_has_alpha:
# Keep RGBA format if background had alpha
if result_np.shape[2] == 4:
result_tensor = torch.from_numpy(result_np).float() / 255.0
else:
# Add alpha channel if somehow lost
alpha_channel = np.ones((result_np.shape[0], result_np.shape[1], 1), dtype=np.uint8) * 255
result_np = np.concatenate([result_np, alpha_channel], axis=2)
result_tensor = torch.from_numpy(result_np).float() / 255.0
else:
# Convert RGBA to RGB if background was RGB
if result_np.shape[2] == 4:
# Alpha blend with white background
alpha = result_np[:, :, 3:4] / 255.0
rgb = result_np[:, :, :3]
white_bg = np.ones_like(rgb) * 255
result_np = (rgb * alpha + white_bg * (1 - alpha)).astype(np.uint8)
result_tensor = torch.from_numpy(result_np).float() / 255.0
results.append(result_tensor)

View File

@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "169 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "1.1.4"
version = "1.1.5"
license = {file = "LICENSE"}
[project.urls]