diff --git a/README.md b/README.md index d232f98..f5900ce 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# ๐Ÿ”— Comfyui : Bjornulf_custom_nodes v1.1.3 ๐Ÿ”— +# ๐Ÿ”— Comfyui : Bjornulf_custom_nodes v1.1.4 ๐Ÿ”— A list of 168 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more. You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech. diff --git a/__init__.py b/__init__.py index 065b51e..0b793ae 100644 --- a/__init__.py +++ b/__init__.py @@ -135,6 +135,7 @@ from .loop_random_seed import LoopRandomSeed # from .run_workflow_from_api import ExecuteWorkflowNode, ApiDynamicTextInputs # from .remote_nodes import RemoteVAEDecoderNodeTiled, RemoteVAEDecoderNode, LoadFromBase64, SaveTensors, LoadTensor # from .fix_face import FixFace, FaceSettings +from .image_cut_and_shift import HorizontalCutAndShift #RemoteTextEncodingWithCLIPs @@ -144,6 +145,7 @@ NODE_CLASS_MAPPINGS = { "Bjornulf_BoundingRectangleMask": BoundingRectangleMask, "Bjornulf_OpenAIVisionNode": OpenAIVisionNode, "Bjornulf_LoopRandomSeed": LoopRandomSeed, + "Bjornulf_HorizontalCutAndShift": HorizontalCutAndShift, # "Bjornulf_PurgeCLIPNode": PurgeCLIPNode, # "Bjornulf_RemoteTextEncodingWithCLIPs": RemoteTextEncodingWithCLIPs, @@ -333,6 +335,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { # "Bjornulf_RemoteTextEncodingWithCLIPs": "[BETA] ๐Ÿ”ฎ Remote Text Encoding with CLIPs", # "Bjornulf_ConditionalSwitch": "ConditionalSwitch", # "Bjornulf_PurgeCLIPNode": "๐Ÿงน๐Ÿ“Ž Purge CLIP", + "Bjornulf_HorizontalCutAndShift": "๐Ÿ”ช๐Ÿ–ผ Horizontal Cut and Shift ๐Ÿ”ผ๐Ÿ”ฝ", # "Bjornulf_FixFace": "[BETA] ๐Ÿ”ง๐Ÿง‘ Fix Face", # "Bjornulf_FaceSettings": "[BETA] ๐Ÿง‘ Face Settings [Fix Face] โš™", diff --git a/image_cut_and_shift.py b/image_cut_and_shift.py new file mode 100644 index 0000000..6f465c4 --- /dev/null +++ b/image_cut_and_shift.py @@ -0,0 +1,69 @@ +import torch +import numpy as np +from PIL import Image + +class HorizontalCutAndShift: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), # Input image tensor + "X": ("INT", {"default": 0, "min": 0, "max": 4096}), # Cut position + "Y": ("INT", {"default": 0, "min": 0, "max": 4096}), # Upward shift for bottom + "Z": ("INT", {"default": 0, "min": 0, "max": 4096}), # Downward shift for top + "fill_color": (["black", "white"],), # New option for fill color + } + } + + RETURN_TYPES = ("IMAGE",) # Output is an image tensor + FUNCTION = "process" # Processing function name + CATEGORY = "image" # Node category in ComfyUI + + def process(self, image, X, Y, Z, fill_color): + # Get image dimensions: batch size, height, width, channels + batch, H, W, channels = image.shape + + # Initialize output tensor based on the selected fill color + if fill_color == "black": + output = torch.zeros_like(image) # Zeros for black + elif fill_color == "white": + output = torch.ones_like(image) # Ones for white + else: + raise ValueError("Invalid fill_color: must be 'black' or 'white'") + + # Process the bottom part: shift upward by Y pixels + bottom_dest_start = max(X - Y, 0) + bottom_dest_end = min(H - 1 - Y, H - 1) + if bottom_dest_start <= bottom_dest_end: + num_rows_bottom = bottom_dest_end - bottom_dest_start + 1 + bottom_src_start = bottom_dest_start + Y + bottom_src_end = bottom_src_start + num_rows_bottom - 1 + if bottom_src_start < X: + offset = X - bottom_src_start + bottom_dest_start += offset + bottom_src_start = X + num_rows_bottom -= offset + if bottom_src_end >= H: + excess = bottom_src_end - (H - 1) + num_rows_bottom -= excess + bottom_dest_end = bottom_dest_start + num_rows_bottom - 1 + if num_rows_bottom > 0: + output[:, bottom_dest_start:bottom_dest_end + 1, :, :] = \ + image[:, bottom_src_start:bottom_src_end + 1, :, :] + + # Process the top part: shift downward by Z pixels + top_dest_start = max(Z, 0) + top_dest_end = min(X - 1 + Z, H - 1) + if top_dest_start <= top_dest_end: + num_rows_top = top_dest_end - top_dest_start + 1 + top_src_start = top_dest_start - Z + top_src_end = top_src_start + num_rows_top - 1 + if top_src_end >= X: + excess = top_src_end - (X - 1) + num_rows_top -= excess + top_dest_end = top_dest_start + num_rows_top - 1 + if num_rows_top > 0: + output[:, top_dest_start:top_dest_end + 1, :, :] = \ + image[:, top_src_start:top_src_end + 1, :, :] + + return (output,) # Return the output tensor as a tuple \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 9f66b0d..b24b403 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "bjornulf_custom_nodes" -description = "168 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..." -version = "1.1.3" +description = "169 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..." +version = "1.1.4" license = {file = "LICENSE"} [project.urls]