This commit is contained in:
justumen
2025-06-08 22:50:18 +02:00
parent 412bc279a8
commit 3480d8a4ce
5 changed files with 160 additions and 4 deletions

View File

@@ -1,6 +1,6 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.6 🔗 # 🔗 Comfyui : Bjornulf_custom_nodes v1.1.7 🔗
A list of 168 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more. A list of 170 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech. You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
⚠️ Warning : Very active development. Work in progress. 🏗 ⚠️ Warning : Very active development. Work in progress. 🏗

View File

@@ -141,9 +141,13 @@ from .load_image_from_path import LoadImageWithTransparencyFromPath
# from .kofi_nodes import CivitAILoraSelectorWanVideo, CivitAILoraSelectorHunyuan # from .kofi_nodes import CivitAILoraSelectorWanVideo, CivitAILoraSelectorHunyuan
# from .json_prompt_extractor import JSONImagePromptExtractor # from .json_prompt_extractor import JSONImagePromptExtractor
from .upscaler_transparency import ImageUpscaleWithModelTransparency
from .load_base64_transparency import loadImageBase64Transparency
#RemoteTextEncodingWithCLIPs #RemoteTextEncodingWithCLIPs
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"Bjornulf_ImageUpscaleWithModelTransparency": ImageUpscaleWithModelTransparency,
"Bjornulf_loadImageBase64Transparency": loadImageBase64Transparency,
# "Bjornulf_LoraSelectorHunyuan": CivitAILoraSelectorHunyuan, # "Bjornulf_LoraSelectorHunyuan": CivitAILoraSelectorHunyuan,
# "Bjornulf_LoraSelectorWanVideo": CivitAILoraSelectorWanVideo, # "Bjornulf_LoraSelectorWanVideo": CivitAILoraSelectorWanVideo,
# "Bjornulf_JSONImagePromptExtractor": JSONImagePromptExtractor, # "Bjornulf_JSONImagePromptExtractor": JSONImagePromptExtractor,
@@ -337,6 +341,8 @@ NODE_CLASS_MAPPINGS = {
} }
NODE_DISPLAY_NAME_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = {
"Bjornulf_loadImageBase64Transparency": "📥🖼 Load Image Base64 (Transparency)",
"Bjornulf_ImageUpscaleWithModelTransparency": "🖼 Upscale Image with Transparency (with model)",
#"Bjornulf_LoraSelectorHunyuan": "☕ Lora Selector Hunyuan", #"Bjornulf_LoraSelectorHunyuan": "☕ Lora Selector Hunyuan",
#"Bjornulf_LoraSelectorWanVideo": "☕ Lora Selector WanVideo", #"Bjornulf_LoraSelectorWanVideo": "☕ Lora Selector WanVideo",
#"Bjornulf_JSONImagePromptExtractor": "JSONImagePromptExtractor", #"Bjornulf_JSONImagePromptExtractor": "JSONImagePromptExtractor",

View File

@@ -0,0 +1,67 @@
import base64
import cv2
import numpy as np
import torch
class loadImageBase64Transparency:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"base64_data": ("STRING", {"default": ""}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ("IMAGE", "MASK")
OUTPUT_NODE = True
FUNCTION = "load_image"
CATEGORY = "EasyUse/Image/LoadImage"
def convert_color(self, image):
"""Convert image color space while preserving alpha channel"""
if len(image.shape) > 2 and image.shape[2] >= 4:
# Handle BGRA to RGBA conversion
return cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
elif len(image.shape) > 2 and image.shape[2] == 3:
# Handle BGR to RGB conversion
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
# Grayscale image
return image
def load_image(self, base64_data, prompt=None, extra_pnginfo=None):
# Decode base64 data to image
nparr = np.frombuffer(base64.b64decode(base64_data), np.uint8)
result = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)
# Split channels to check for alpha
channels = cv2.split(result)
has_alpha = len(channels) > 3
if has_alpha:
# Extract alpha channel as mask
mask = channels[3].astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
else:
# Create full opacity mask if no alpha channel
mask = torch.ones(channels[0].shape, dtype=torch.float32, device="cpu")
# Convert color space while preserving channels
result = self.convert_color(result)
# Normalize to 0-1 range
result = result.astype(np.float32) / 255.0
# Convert to tensor and add batch dimension
if has_alpha:
# Keep all 4 channels (RGBA)
new_images = torch.from_numpy(result)[None,]
else:
# RGB only
new_images = torch.from_numpy(result)[None,]
# Ensure mask has batch dimension
mask = mask.unsqueeze(0)
return (new_images, mask)

View File

@@ -1,7 +1,7 @@
[project] [project]
name = "bjornulf_custom_nodes" name = "bjornulf_custom_nodes"
description = "169 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..." description = "170 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "1.1.6" version = "1.1.7"
license = {file = "LICENSE"} license = {file = "LICENSE"}
[project.urls] [project.urls]

83
upscaler_transparency.py Normal file
View File

@@ -0,0 +1,83 @@
import comfy
from comfy import model_management
import torch
import comfy.utils
class ImageUpscaleWithModelTransparency:
@classmethod
def INPUT_TYPES(s):
return {"required": { "upscale_model": ("UPSCALE_MODEL",),
"image": ("IMAGE",),
}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "upscale"
CATEGORY = "image/upscaling"
def upscale(self, upscale_model, image):
device = model_management.get_torch_device()
# Check if image has alpha channel (4 channels)
has_alpha = image.shape[-1] == 4
if has_alpha:
# Split RGB and alpha channels
rgb_image = image[..., :3]
alpha_channel = image[..., 3:4]
else:
rgb_image = image
alpha_channel = None
# Calculate memory requirements (based on RGB channels)
memory_required = model_management.module_size(upscale_model.model)
memory_required += (512 * 512 * 3) * rgb_image.element_size() * max(upscale_model.scale, 1.0) * 384.0
memory_required += rgb_image.nelement() * rgb_image.element_size()
# Add memory for alpha channel processing if present
if has_alpha:
memory_required += alpha_channel.nelement() * alpha_channel.element_size() * max(upscale_model.scale, 1.0) ** 2
model_management.free_memory(memory_required, device)
upscale_model.to(device)
# Upscale RGB channels
in_img = rgb_image.movedim(-1,-3).to(device)
tile = 512
overlap = 32
oom = True
while oom:
try:
steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap)
pbar = comfy.utils.ProgressBar(steps)
upscaled_rgb = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar)
oom = False
except model_management.OOM_EXCEPTION as e:
tile //= 2
if tile < 128:
raise e
upscale_model.to("cpu")
upscaled_rgb = torch.clamp(upscaled_rgb.movedim(-3,-1), min=0, max=1.0)
# Handle alpha channel if present
if has_alpha:
# Upscale alpha channel using simple interpolation
alpha_upscaled = torch.nn.functional.interpolate(
alpha_channel.movedim(-1,-3).to(device),
size=(upscaled_rgb.shape[1], upscaled_rgb.shape[2]),
mode='bilinear',
align_corners=False
).movedim(-3,-1).to(upscaled_rgb.device)
# Clamp alpha values to [0, 1]
alpha_upscaled = torch.clamp(alpha_upscaled, min=0, max=1.0)
# Combine RGB and alpha channels
result = torch.cat([upscaled_rgb, alpha_upscaled], dim=-1)
else:
result = upscaled_rgb
return (result,)