mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-21 20:52:11 -03:00
fix pingpong, new node combine image background + overlay
This commit is contained in:
16
README.md
16
README.md
@@ -1,4 +1,4 @@
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.5 🔗
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.6 🔗
|
||||
|
||||
# Dependencies
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
- **v0.4**: Support transparency option with webm format, options encoders. As well as input for audio stream.
|
||||
- **v0.5**: New node : Remove image transparency (alpha) - Fill alpha channel with solid color.
|
||||
- **v0.5**: New node : Image to grayscale (black & white) - Convert an image to grayscale.
|
||||
- **v0.6**: New node : Combine images (Background + Overlay) - Combine two images into a single image.
|
||||
|
||||
# 📝 Nodes descriptions
|
||||
|
||||
@@ -144,7 +145,7 @@ Combine a sequence of images into a video file.
|
||||
Temporary images are stored in the folder `ComfyUI/temp_images_imgs2video/` as well as the wav audio file.
|
||||
|
||||
## 22 - 🔲 Remove image Transparency (alpha)
|
||||

|
||||

|
||||
|
||||
**Description:**
|
||||
Remove transparency from an image by filling the alpha channel with a solid color. (black, white or greenscreen)
|
||||
@@ -152,9 +153,16 @@ Of course it takes in an image with transparency, like from rembg nodes.
|
||||
Necessary for some nodes that don't support transparency.
|
||||
|
||||
## 23 - 🔲 Image to grayscale (black & white)
|
||||

|
||||

|
||||
|
||||
**Description:**
|
||||
Convert an image to grayscale (black & white)
|
||||
Example : I sometimes use it with Ipadapter to disable color influence.
|
||||
But you can sometimes also want a black and white image...
|
||||
But you can sometimes also want a black and white image...
|
||||
|
||||
## 24 - 🖼+🖼 Combine images (Background + Overlay)
|
||||

|
||||
|
||||
**Description:**
|
||||
Combine two images into a single image : a background and one (or several) transparent overlay. (allow for video frames.)
|
||||
❗ Warning : For now, `background` is a static image. (I will allow video there later too.)
|
||||
@@ -27,6 +27,7 @@ from .resize_image import ResizeImage
|
||||
from .loop_my_combos_samplers_schedulers import LoopCombosSamplersSchedulers
|
||||
from .remove_transparency import RemoveTransparency
|
||||
from .image_to_grayscale import GrayscaleTransform
|
||||
from .combine_background_overlay import CombineBackgroundOverlay
|
||||
|
||||
# from .CUSTOM_STRING import CustomStringType
|
||||
|
||||
@@ -36,6 +37,7 @@ NODE_CLASS_MAPPINGS = {
|
||||
"Bjornulf_WriteText": WriteText,
|
||||
"Bjornulf_RemoveTransparency": RemoveTransparency,
|
||||
"Bjornulf_GrayscaleTransform": GrayscaleTransform,
|
||||
"Bjornulf_CombineBackgroundOverlay": CombineBackgroundOverlay,
|
||||
# "Bjornulf_WriteImageEnvironment": WriteImageEnvironment,
|
||||
# "Bjornulf_WriteImageCharacters": WriteImageCharacters,
|
||||
# "Bjornulf_WriteImageCharacter": WriteImageCharacter,
|
||||
@@ -69,6 +71,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"Bjornulf_ShowText": "👁 Show (Text)",
|
||||
"Bjornulf_ShowInt": "👁 Show (Int)",
|
||||
"Bjornulf_ShowFloat": "👁 Show (Float)",
|
||||
"Bjornulf_CombineBackgroundOverlay": "🖼+🖼 Combine images (Background+Overlay alpha)",
|
||||
"Bjornulf_GrayscaleTransform": "🔲 Image to grayscale (black & white)",
|
||||
"Bjornulf_RemoveTransparency": "🔲 Remove image Transparency (alpha)",
|
||||
"Bjornulf_ResizeImage": "📏 Resize Image",
|
||||
|
||||
66
combine_background_overlay.py
Normal file
66
combine_background_overlay.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
class CombineBackgroundOverlay:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"background": ("IMAGE",),
|
||||
"overlay_alpha": ("IMAGE",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "combine_background_overlay"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def combine_background_overlay(self, background, overlay_alpha):
|
||||
# Convert background from torch tensor to numpy array
|
||||
bg = background[0].numpy()
|
||||
bg = (bg * 255).astype(np.uint8)
|
||||
bg_img = Image.fromarray(bg, 'RGB')
|
||||
|
||||
results = []
|
||||
|
||||
for overlay in overlay_alpha:
|
||||
# Convert overlay from torch tensor to numpy array
|
||||
ov = overlay.numpy()
|
||||
ov = (ov * 255).astype(np.uint8)
|
||||
|
||||
# Create PIL Image for overlay
|
||||
if ov.shape[2] == 4:
|
||||
ov_img = Image.fromarray(ov, 'RGBA')
|
||||
else:
|
||||
ov_img = Image.fromarray(ov, 'RGB')
|
||||
ov_img = ov_img.convert('RGBA')
|
||||
|
||||
# Calculate position to center the overlay
|
||||
x = (bg_img.width - ov_img.width) // 2
|
||||
y = (bg_img.height - ov_img.height) // 2
|
||||
|
||||
# Create a new image for this overlay
|
||||
result = Image.new('RGBA', bg_img.size, (0, 0, 0, 0))
|
||||
|
||||
# Paste the background
|
||||
result.paste(bg_img, (0, 0))
|
||||
|
||||
# Paste the overlay in the center
|
||||
result.paste(ov_img, (x, y), ov_img)
|
||||
|
||||
# Convert back to numpy array and then to torch tensor
|
||||
result_np = np.array(result)
|
||||
|
||||
# If the result is RGBA, convert to RGB
|
||||
if result_np.shape[2] == 4:
|
||||
result_np = result_np[:,:,:3]
|
||||
|
||||
result_tensor = torch.from_numpy(result_np).float() / 255.0
|
||||
|
||||
results.append(result_tensor)
|
||||
|
||||
# Stack all results into a single tensor
|
||||
final_result = torch.stack(results)
|
||||
|
||||
return (final_result,)
|
||||
BIN
screenshots/combine_background_overlay.png
Normal file
BIN
screenshots/combine_background_overlay.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 631 KiB |
@@ -1,4 +1,8 @@
|
||||
import torch
|
||||
import os
|
||||
import shutil
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
class VideoPingPong:
|
||||
@classmethod
|
||||
@@ -14,10 +18,55 @@ class VideoPingPong:
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def pingpong_images(self, images):
|
||||
if isinstance(images, torch.Tensor):
|
||||
reversed_images = torch.flip(images, [0])
|
||||
combined_images = torch.cat((images, reversed_images[1:]), dim=0)
|
||||
else:
|
||||
reversed_images = images[::-1]
|
||||
combined_images = images + reversed_images[1:]
|
||||
return (combined_images,)
|
||||
# Create a clean folder to store the images
|
||||
temp_dir = "temp_pingpong"
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
os.makedirs(temp_dir)
|
||||
|
||||
try:
|
||||
# Save each image in the temporary directory
|
||||
num_images = images.shape[0]
|
||||
for i in range(num_images):
|
||||
img_tensor = images[i]
|
||||
img_pil = Image.fromarray((img_tensor.cpu().numpy() * 255).astype('uint8'))
|
||||
img_path = os.path.join(temp_dir, f"image_{i:04d}.png")
|
||||
img_pil.save(img_path)
|
||||
|
||||
# Create the pingpong sequence
|
||||
pingpong_list = list(range(num_images)) + list(range(num_images - 2, 0, -1))
|
||||
|
||||
# Process images in batches
|
||||
batch_size = 10
|
||||
pingpong_tensors = []
|
||||
|
||||
for i in range(0, len(pingpong_list), batch_size):
|
||||
batch = pingpong_list[i:i+batch_size]
|
||||
batch_tensors = []
|
||||
|
||||
for j in batch:
|
||||
img_path = os.path.join(temp_dir, f"image_{j:04d}.png")
|
||||
img_pil = Image.open(img_path)
|
||||
img_np = np.array(img_pil).astype(np.float32) / 255.0
|
||||
img_tensor = torch.from_numpy(img_np)
|
||||
batch_tensors.append(img_tensor)
|
||||
|
||||
# Close the image to free up memory
|
||||
img_pil.close()
|
||||
|
||||
# Stack the batch tensors
|
||||
batch_tensor = torch.stack(batch_tensors)
|
||||
pingpong_tensors.append(batch_tensor)
|
||||
|
||||
# Clear unnecessary variables
|
||||
del batch_tensors
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Concatenate all batches
|
||||
pingpong_tensor = torch.cat(pingpong_tensors, dim=0)
|
||||
|
||||
finally:
|
||||
# Clean up the temporary directory
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
return (pingpong_tensor,)
|
||||
Reference in New Issue
Block a user