diff --git a/README.md b/README.md index 08c4d07..7db3011 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# πŸ”— Comfyui : Bjornulf_custom_nodes v0.7 πŸ”— +# πŸ”— Comfyui : Bjornulf_custom_nodes v0.8 πŸ”— # Dependencies @@ -14,6 +14,9 @@ - **v0.5**: New node : Image to grayscale (black & white) - Convert an image to grayscale. - **v0.6**: New node : Combine images (Background + Overlay) - Combine two images into a single image. - **v0.7**: Replace Save API node with Save Bjornulf Lobechat node. (For my custom lobe-chat) +- **v0.8**: Combine images : add an option to put image top, bottom or center. +- **v0.8**: Combine texts : add option for slashes / +- **v0.8**: Add basic node to transform greenscreen in to transparency. # πŸ“ Nodes descriptions @@ -167,4 +170,12 @@ But you can sometimes also want a black and white image... **Description:** Combine two images into a single image : a background and one (or several) transparent overlay. (allow for video frames.) -❗ Warning : For now, `background` is a static image. (I will allow video there later too.) \ No newline at end of file +Update 0.8 : Also have a option to put image top, bottom or center. +❗ Warning : For now, `background` is a static image. (I will allow video there later too.) + +## 25 - πŸŸ©βžœβ–’ Green Screen to Transparency +![Combine Images](screenshots/greeenscreen_to_transparency.png) + +**Description:** +Transform greenscreen into transparency. +Need clean greenscreen ofc. (Can adjust threshold but very basic node.) \ No newline at end of file diff --git a/__init__.py b/__init__.py index 53bf379..584faa2 100644 --- a/__init__.py +++ b/__init__.py @@ -29,6 +29,7 @@ from .remove_transparency import RemoveTransparency from .image_to_grayscale import GrayscaleTransform from .combine_background_overlay import CombineBackgroundOverlay from .save_bjornulf_lobechat import SaveBjornulfLobeChat +from .green_to_transparency import GreenScreenToTransparency # from .check_black_image import CheckBlackImage # from .clear_vram import ClearVRAM @@ -37,6 +38,7 @@ from .save_bjornulf_lobechat import SaveBjornulfLobeChat NODE_CLASS_MAPPINGS = { # "Bjornulf_CustomStringType": CustomStringType, "Bjornulf_ollamaLoader": ollamaLoader, + "Bjornulf_GreenScreenToTransparency": GreenScreenToTransparency, # "Bjornulf_CheckBlackImage": CheckBlackImage, # "Bjornulf_ClearVRAM": ClearVRAM, "Bjornulf_SaveBjornulfLobeChat": SaveBjornulfLobeChat, @@ -74,6 +76,7 @@ NODE_CLASS_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = { # "Bjornulf_CustomStringType": "!!! CUSTOM STRING TYPE !!!", "Bjornulf_ollamaLoader": "πŸ¦™ Ollama (Description)", + "Bjornulf_GreenScreenToTransparency": "πŸŸ©βžœβ–’ Green Screen to Transparency", # "Bjornulf_CheckBlackImage": "πŸ”² Check Black Image (Empty mask)", "Bjornulf_SaveBjornulfLobeChat": "πŸ–ΌπŸ’¬ Save image for Bjornulf LobeChat", # "Bjornulf_ClearVRAM": "🧹 Clear VRAM", @@ -81,8 +84,9 @@ NODE_DISPLAY_NAME_MAPPINGS = { "Bjornulf_ShowInt": "πŸ‘ Show (Int)", "Bjornulf_ShowFloat": "πŸ‘ Show (Float)", "Bjornulf_CombineBackgroundOverlay": "πŸ–Ό+πŸ–Ό Combine images (Background+Overlay alpha)", - "Bjornulf_GrayscaleTransform": "πŸ”² Image to grayscale (black & white)", - "Bjornulf_RemoveTransparency": "πŸ”² Remove image Transparency (alpha)", + "Bjornulf_GrayscaleTransform": "πŸ–ΌβžœπŸ”² Image to grayscale (black & white)", + "Bjornulf_RemoveTransparency": "β–’βžœβ¬› Remove image Transparency (alpha)", + # "πŸ”²βžœβ¬› Transparency to color", "Bjornulf_ResizeImage": "πŸ“ Resize Image", "Bjornulf_SaveImagePath": "πŸ–Ό Save Image (exact path, exact name) βš οΈπŸ’£", "Bjornulf_SaveImageToFolder": "πŸ–ΌπŸ“ Save Image to a folder", diff --git a/combine_background_overlay.py b/combine_background_overlay.py index 9677457..f0bdd6c 100644 --- a/combine_background_overlay.py +++ b/combine_background_overlay.py @@ -9,6 +9,7 @@ class CombineBackgroundOverlay: "required": { "background": ("IMAGE",), "overlay_alpha": ("IMAGE",), + "position": (["middle", "top", "bottom"],), }, } @@ -16,7 +17,7 @@ class CombineBackgroundOverlay: FUNCTION = "combine_background_overlay" CATEGORY = "Bjornulf" - def combine_background_overlay(self, background, overlay_alpha): + def combine_background_overlay(self, background, overlay_alpha, position): # Convert background from torch tensor to numpy array bg = background[0].numpy() bg = (bg * 255).astype(np.uint8) @@ -36,9 +37,14 @@ class CombineBackgroundOverlay: ov_img = Image.fromarray(ov, 'RGB') ov_img = ov_img.convert('RGBA') - # Calculate position to center the overlay + # Calculate position based on user selection x = (bg_img.width - ov_img.width) // 2 - y = (bg_img.height - ov_img.height) // 2 + if position == "middle": + y = (bg_img.height - ov_img.height) // 2 + elif position == "top": + y = 0 + else: # bottom + y = bg_img.height - ov_img.height # Create a new image for this overlay result = Image.new('RGBA', bg_img.size, (0, 0, 0, 0)) @@ -46,7 +52,7 @@ class CombineBackgroundOverlay: # Paste the background result.paste(bg_img, (0, 0)) - # Paste the overlay in the center + # Paste the overlay in the selected position result.paste(ov_img, (x, y), ov_img) # Convert back to numpy array and then to torch tensor diff --git a/combine_texts.py b/combine_texts.py index a00f3e5..4d2767a 100644 --- a/combine_texts.py +++ b/combine_texts.py @@ -4,7 +4,7 @@ class CombineTexts: return { "required": { "number_of_inputs": ("INT", {"default": 2, "min": 2, "max": 10, "step": 1}), - "delimiter": (["newline", "comma", "space"], {"default": "newline"}), + "delimiter": (["newline", "comma", "space", "slash"], {"default": "newline"}), "text_1": ("STRING", {"forceInput": True}), "text_2": ("STRING", {"forceInput": True}), }, @@ -42,5 +42,7 @@ class CombineTexts: return "," elif delimiter == "space": return " " + elif delimiter == "slash": + return "/" else: return "\n" \ No newline at end of file diff --git a/green_to_transparency.py b/green_to_transparency.py new file mode 100644 index 0000000..d2da046 --- /dev/null +++ b/green_to_transparency.py @@ -0,0 +1,76 @@ +import numpy as np +from PIL import Image +import torch +import torchvision.transforms as transforms + +class GreenScreenToTransparency: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE", {}), + "threshold": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + FUNCTION = "remove_green_screen" + RETURN_TYPES = ("IMAGE",) + OUTPUT_NODE = True + CATEGORY = "Bjornulf" + + def remove_green_screen(self, image, threshold=0.1, prompt=None, extra_pnginfo=None): + # Ensure the input image is on CPU and convert to numpy array + image_np = image.cpu().numpy() + + # Check if the image is in the format [batch, height, width, channel] + if image_np.ndim == 4: + # If so, we'll process each image in the batch + processed_images = [] + for img in image_np: + processed_img = self._process_single_image(img, threshold) + processed_images.append(processed_img) + + # Stack the processed images back into a batch + processed_batch = np.stack(processed_images) + # Convert to torch tensor + processed_tensor = torch.from_numpy(processed_batch) + else: + # If it's a single image, process it directly + processed_np = self._process_single_image(image_np, threshold) + # Add batch dimension if it was originally present + if image.dim() == 4: + processed_np = np.expand_dims(processed_np, axis=0) + # Convert to torch tensor + processed_tensor = torch.from_numpy(processed_np) + + # Update metadata if needed + if extra_pnginfo is not None: + extra_pnginfo["green_screen_removed"] = True + + return (processed_tensor, prompt, extra_pnginfo) + + def _process_single_image(self, img, threshold): + # Convert to PIL Image + pil_img = Image.fromarray((img * 255).astype(np.uint8)) + + # Convert the image to RGBA mode + pil_img = pil_img.convert("RGBA") + + # Get image data as numpy array + data = np.array(pil_img) + + # Create a mask for green pixels + r, g, b, a = data[:,:,0], data[:,:,1], data[:,:,2], data[:,:,3] + mask = (g > r + threshold * 255) & (g > b + threshold * 255) + + # Set alpha channel to 0 for green pixels + data[:,:,3] = np.where(mask, 0, a) + + # Create a new image with the updated data + result = Image.fromarray(data) + + # Convert back to numpy and normalize + processed_np = np.array(result).astype(np.float32) / 255.0 + + return processed_np \ No newline at end of file diff --git a/resize_image.py b/resize_image.py index b2e42bb..432f2ae 100644 --- a/resize_image.py +++ b/resize_image.py @@ -15,7 +15,7 @@ class ResizeImage: } FUNCTION = "resize_image" - RETURN_TYPES = ("IMAGE", "PROMPT", "EXTRA_PNGINFO") + RETURN_TYPES = ("IMAGE",) OUTPUT_NODE = True CATEGORY = "Bjornulf" diff --git a/screenshots/greeenscreen_to_transparency.png b/screenshots/greeenscreen_to_transparency.png new file mode 100644 index 0000000..26110a6 Binary files /dev/null and b/screenshots/greeenscreen_to_transparency.png differ diff --git a/workflows/FLUX_troll_test.json b/workflows/FLUX_troll_test.json index 8223eb3..b769262 100644 --- a/workflows/FLUX_troll_test.json +++ b/workflows/FLUX_troll_test.json @@ -251,9 +251,9 @@ "t5xxl_fp16.safetensors", "clip_l.safetensors", "flux", - "flux1-schnell.sft", + "flux1-schnell.safetensors", "fp8_e5m2", - "ae.sft" + "ae.safetensors" ] } ], @@ -733,4 +733,4 @@ } }, "version": 0.4 -} \ No newline at end of file +}