mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-21 12:42:11 -03:00
v0.8, greenscreen, slash for combine text..
This commit is contained in:
13
README.md
13
README.md
@@ -1,4 +1,4 @@
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.7 🔗
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.8 🔗
|
||||
|
||||
# Dependencies
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
- **v0.5**: New node : Image to grayscale (black & white) - Convert an image to grayscale.
|
||||
- **v0.6**: New node : Combine images (Background + Overlay) - Combine two images into a single image.
|
||||
- **v0.7**: Replace Save API node with Save Bjornulf Lobechat node. (For my custom lobe-chat)
|
||||
- **v0.8**: Combine images : add an option to put image top, bottom or center.
|
||||
- **v0.8**: Combine texts : add option for slashes /
|
||||
- **v0.8**: Add basic node to transform greenscreen in to transparency.
|
||||
|
||||
# 📝 Nodes descriptions
|
||||
|
||||
@@ -167,4 +170,12 @@ But you can sometimes also want a black and white image...
|
||||
|
||||
**Description:**
|
||||
Combine two images into a single image : a background and one (or several) transparent overlay. (allow for video frames.)
|
||||
Update 0.8 : Also have a option to put image top, bottom or center.
|
||||
❗ Warning : For now, `background` is a static image. (I will allow video there later too.)
|
||||
|
||||
## 25 - 🟩➜▢ Green Screen to Transparency
|
||||

|
||||
|
||||
**Description:**
|
||||
Transform greenscreen into transparency.
|
||||
Need clean greenscreen ofc. (Can adjust threshold but very basic node.)
|
||||
@@ -29,6 +29,7 @@ from .remove_transparency import RemoveTransparency
|
||||
from .image_to_grayscale import GrayscaleTransform
|
||||
from .combine_background_overlay import CombineBackgroundOverlay
|
||||
from .save_bjornulf_lobechat import SaveBjornulfLobeChat
|
||||
from .green_to_transparency import GreenScreenToTransparency
|
||||
# from .check_black_image import CheckBlackImage
|
||||
# from .clear_vram import ClearVRAM
|
||||
|
||||
@@ -37,6 +38,7 @@ from .save_bjornulf_lobechat import SaveBjornulfLobeChat
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
# "Bjornulf_CustomStringType": CustomStringType,
|
||||
"Bjornulf_ollamaLoader": ollamaLoader,
|
||||
"Bjornulf_GreenScreenToTransparency": GreenScreenToTransparency,
|
||||
# "Bjornulf_CheckBlackImage": CheckBlackImage,
|
||||
# "Bjornulf_ClearVRAM": ClearVRAM,
|
||||
"Bjornulf_SaveBjornulfLobeChat": SaveBjornulfLobeChat,
|
||||
@@ -74,6 +76,7 @@ NODE_CLASS_MAPPINGS = {
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
# "Bjornulf_CustomStringType": "!!! CUSTOM STRING TYPE !!!",
|
||||
"Bjornulf_ollamaLoader": "🦙 Ollama (Description)",
|
||||
"Bjornulf_GreenScreenToTransparency": "🟩➜▢ Green Screen to Transparency",
|
||||
# "Bjornulf_CheckBlackImage": "🔲 Check Black Image (Empty mask)",
|
||||
"Bjornulf_SaveBjornulfLobeChat": "🖼💬 Save image for Bjornulf LobeChat",
|
||||
# "Bjornulf_ClearVRAM": "🧹 Clear VRAM",
|
||||
@@ -81,8 +84,9 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"Bjornulf_ShowInt": "👁 Show (Int)",
|
||||
"Bjornulf_ShowFloat": "👁 Show (Float)",
|
||||
"Bjornulf_CombineBackgroundOverlay": "🖼+🖼 Combine images (Background+Overlay alpha)",
|
||||
"Bjornulf_GrayscaleTransform": "🔲 Image to grayscale (black & white)",
|
||||
"Bjornulf_RemoveTransparency": "🔲 Remove image Transparency (alpha)",
|
||||
"Bjornulf_GrayscaleTransform": "🖼➜🔲 Image to grayscale (black & white)",
|
||||
"Bjornulf_RemoveTransparency": "▢➜⬛ Remove image Transparency (alpha)",
|
||||
# "🔲➜⬛ Transparency to color",
|
||||
"Bjornulf_ResizeImage": "📏 Resize Image",
|
||||
"Bjornulf_SaveImagePath": "🖼 Save Image (exact path, exact name) ⚠️💣",
|
||||
"Bjornulf_SaveImageToFolder": "🖼📁 Save Image to a folder",
|
||||
|
||||
@@ -9,6 +9,7 @@ class CombineBackgroundOverlay:
|
||||
"required": {
|
||||
"background": ("IMAGE",),
|
||||
"overlay_alpha": ("IMAGE",),
|
||||
"position": (["middle", "top", "bottom"],),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -16,7 +17,7 @@ class CombineBackgroundOverlay:
|
||||
FUNCTION = "combine_background_overlay"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def combine_background_overlay(self, background, overlay_alpha):
|
||||
def combine_background_overlay(self, background, overlay_alpha, position):
|
||||
# Convert background from torch tensor to numpy array
|
||||
bg = background[0].numpy()
|
||||
bg = (bg * 255).astype(np.uint8)
|
||||
@@ -36,9 +37,14 @@ class CombineBackgroundOverlay:
|
||||
ov_img = Image.fromarray(ov, 'RGB')
|
||||
ov_img = ov_img.convert('RGBA')
|
||||
|
||||
# Calculate position to center the overlay
|
||||
# Calculate position based on user selection
|
||||
x = (bg_img.width - ov_img.width) // 2
|
||||
y = (bg_img.height - ov_img.height) // 2
|
||||
if position == "middle":
|
||||
y = (bg_img.height - ov_img.height) // 2
|
||||
elif position == "top":
|
||||
y = 0
|
||||
else: # bottom
|
||||
y = bg_img.height - ov_img.height
|
||||
|
||||
# Create a new image for this overlay
|
||||
result = Image.new('RGBA', bg_img.size, (0, 0, 0, 0))
|
||||
@@ -46,7 +52,7 @@ class CombineBackgroundOverlay:
|
||||
# Paste the background
|
||||
result.paste(bg_img, (0, 0))
|
||||
|
||||
# Paste the overlay in the center
|
||||
# Paste the overlay in the selected position
|
||||
result.paste(ov_img, (x, y), ov_img)
|
||||
|
||||
# Convert back to numpy array and then to torch tensor
|
||||
|
||||
@@ -4,7 +4,7 @@ class CombineTexts:
|
||||
return {
|
||||
"required": {
|
||||
"number_of_inputs": ("INT", {"default": 2, "min": 2, "max": 10, "step": 1}),
|
||||
"delimiter": (["newline", "comma", "space"], {"default": "newline"}),
|
||||
"delimiter": (["newline", "comma", "space", "slash"], {"default": "newline"}),
|
||||
"text_1": ("STRING", {"forceInput": True}),
|
||||
"text_2": ("STRING", {"forceInput": True}),
|
||||
},
|
||||
@@ -42,5 +42,7 @@ class CombineTexts:
|
||||
return ","
|
||||
elif delimiter == "space":
|
||||
return " "
|
||||
elif delimiter == "slash":
|
||||
return "/"
|
||||
else:
|
||||
return "\n"
|
||||
76
green_to_transparency.py
Normal file
76
green_to_transparency.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import torch
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
class GreenScreenToTransparency:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE", {}),
|
||||
"threshold": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||
}
|
||||
|
||||
FUNCTION = "remove_green_screen"
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def remove_green_screen(self, image, threshold=0.1, prompt=None, extra_pnginfo=None):
|
||||
# Ensure the input image is on CPU and convert to numpy array
|
||||
image_np = image.cpu().numpy()
|
||||
|
||||
# Check if the image is in the format [batch, height, width, channel]
|
||||
if image_np.ndim == 4:
|
||||
# If so, we'll process each image in the batch
|
||||
processed_images = []
|
||||
for img in image_np:
|
||||
processed_img = self._process_single_image(img, threshold)
|
||||
processed_images.append(processed_img)
|
||||
|
||||
# Stack the processed images back into a batch
|
||||
processed_batch = np.stack(processed_images)
|
||||
# Convert to torch tensor
|
||||
processed_tensor = torch.from_numpy(processed_batch)
|
||||
else:
|
||||
# If it's a single image, process it directly
|
||||
processed_np = self._process_single_image(image_np, threshold)
|
||||
# Add batch dimension if it was originally present
|
||||
if image.dim() == 4:
|
||||
processed_np = np.expand_dims(processed_np, axis=0)
|
||||
# Convert to torch tensor
|
||||
processed_tensor = torch.from_numpy(processed_np)
|
||||
|
||||
# Update metadata if needed
|
||||
if extra_pnginfo is not None:
|
||||
extra_pnginfo["green_screen_removed"] = True
|
||||
|
||||
return (processed_tensor, prompt, extra_pnginfo)
|
||||
|
||||
def _process_single_image(self, img, threshold):
|
||||
# Convert to PIL Image
|
||||
pil_img = Image.fromarray((img * 255).astype(np.uint8))
|
||||
|
||||
# Convert the image to RGBA mode
|
||||
pil_img = pil_img.convert("RGBA")
|
||||
|
||||
# Get image data as numpy array
|
||||
data = np.array(pil_img)
|
||||
|
||||
# Create a mask for green pixels
|
||||
r, g, b, a = data[:,:,0], data[:,:,1], data[:,:,2], data[:,:,3]
|
||||
mask = (g > r + threshold * 255) & (g > b + threshold * 255)
|
||||
|
||||
# Set alpha channel to 0 for green pixels
|
||||
data[:,:,3] = np.where(mask, 0, a)
|
||||
|
||||
# Create a new image with the updated data
|
||||
result = Image.fromarray(data)
|
||||
|
||||
# Convert back to numpy and normalize
|
||||
processed_np = np.array(result).astype(np.float32) / 255.0
|
||||
|
||||
return processed_np
|
||||
@@ -15,7 +15,7 @@ class ResizeImage:
|
||||
}
|
||||
|
||||
FUNCTION = "resize_image"
|
||||
RETURN_TYPES = ("IMAGE", "PROMPT", "EXTRA_PNGINFO")
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
|
||||
BIN
screenshots/greeenscreen_to_transparency.png
Normal file
BIN
screenshots/greeenscreen_to_transparency.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 215 KiB |
@@ -251,9 +251,9 @@
|
||||
"t5xxl_fp16.safetensors",
|
||||
"clip_l.safetensors",
|
||||
"flux",
|
||||
"flux1-schnell.sft",
|
||||
"flux1-schnell.safetensors",
|
||||
"fp8_e5m2",
|
||||
"ae.sft"
|
||||
"ae.safetensors"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
Reference in New Issue
Block a user