v1.1.4 cut and shift

This commit is contained in:
justumen
2025-05-20 15:47:06 +02:00
parent 18b59b9343
commit fd66298414
4 changed files with 75 additions and 3 deletions

View File

@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.3 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v1.1.4 🔗
A list of 168 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.

View File

@@ -135,6 +135,7 @@ from .loop_random_seed import LoopRandomSeed
# from .run_workflow_from_api import ExecuteWorkflowNode, ApiDynamicTextInputs
# from .remote_nodes import RemoteVAEDecoderNodeTiled, RemoteVAEDecoderNode, LoadFromBase64, SaveTensors, LoadTensor
# from .fix_face import FixFace, FaceSettings
from .image_cut_and_shift import HorizontalCutAndShift
#RemoteTextEncodingWithCLIPs
@@ -144,6 +145,7 @@ NODE_CLASS_MAPPINGS = {
"Bjornulf_BoundingRectangleMask": BoundingRectangleMask,
"Bjornulf_OpenAIVisionNode": OpenAIVisionNode,
"Bjornulf_LoopRandomSeed": LoopRandomSeed,
"Bjornulf_HorizontalCutAndShift": HorizontalCutAndShift,
# "Bjornulf_PurgeCLIPNode": PurgeCLIPNode,
# "Bjornulf_RemoteTextEncodingWithCLIPs": RemoteTextEncodingWithCLIPs,
@@ -333,6 +335,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
# "Bjornulf_RemoteTextEncodingWithCLIPs": "[BETA] 🔮 Remote Text Encoding with CLIPs",
# "Bjornulf_ConditionalSwitch": "ConditionalSwitch",
# "Bjornulf_PurgeCLIPNode": "🧹📎 Purge CLIP",
"Bjornulf_HorizontalCutAndShift": "🔪🖼 Horizontal Cut and Shift 🔼🔽",
# "Bjornulf_FixFace": "[BETA] 🔧🧑 Fix Face",
# "Bjornulf_FaceSettings": "[BETA] 🧑 Face Settings [Fix Face] ⚙",

69
image_cut_and_shift.py Normal file
View File

@@ -0,0 +1,69 @@
import torch
import numpy as np
from PIL import Image
class HorizontalCutAndShift:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image": ("IMAGE",), # Input image tensor
"X": ("INT", {"default": 0, "min": 0, "max": 4096}), # Cut position
"Y": ("INT", {"default": 0, "min": 0, "max": 4096}), # Upward shift for bottom
"Z": ("INT", {"default": 0, "min": 0, "max": 4096}), # Downward shift for top
"fill_color": (["black", "white"],), # New option for fill color
}
}
RETURN_TYPES = ("IMAGE",) # Output is an image tensor
FUNCTION = "process" # Processing function name
CATEGORY = "image" # Node category in ComfyUI
def process(self, image, X, Y, Z, fill_color):
# Get image dimensions: batch size, height, width, channels
batch, H, W, channels = image.shape
# Initialize output tensor based on the selected fill color
if fill_color == "black":
output = torch.zeros_like(image) # Zeros for black
elif fill_color == "white":
output = torch.ones_like(image) # Ones for white
else:
raise ValueError("Invalid fill_color: must be 'black' or 'white'")
# Process the bottom part: shift upward by Y pixels
bottom_dest_start = max(X - Y, 0)
bottom_dest_end = min(H - 1 - Y, H - 1)
if bottom_dest_start <= bottom_dest_end:
num_rows_bottom = bottom_dest_end - bottom_dest_start + 1
bottom_src_start = bottom_dest_start + Y
bottom_src_end = bottom_src_start + num_rows_bottom - 1
if bottom_src_start < X:
offset = X - bottom_src_start
bottom_dest_start += offset
bottom_src_start = X
num_rows_bottom -= offset
if bottom_src_end >= H:
excess = bottom_src_end - (H - 1)
num_rows_bottom -= excess
bottom_dest_end = bottom_dest_start + num_rows_bottom - 1
if num_rows_bottom > 0:
output[:, bottom_dest_start:bottom_dest_end + 1, :, :] = \
image[:, bottom_src_start:bottom_src_end + 1, :, :]
# Process the top part: shift downward by Z pixels
top_dest_start = max(Z, 0)
top_dest_end = min(X - 1 + Z, H - 1)
if top_dest_start <= top_dest_end:
num_rows_top = top_dest_end - top_dest_start + 1
top_src_start = top_dest_start - Z
top_src_end = top_src_start + num_rows_top - 1
if top_src_end >= X:
excess = top_src_end - (X - 1)
num_rows_top -= excess
top_dest_end = top_dest_start + num_rows_top - 1
if num_rows_top > 0:
output[:, top_dest_start:top_dest_end + 1, :, :] = \
image[:, top_src_start:top_src_end + 1, :, :]
return (output,) # Return the output tensor as a tuple

View File

@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "168 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "1.1.3"
description = "169 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
version = "1.1.4"
license = {file = "LICENSE"}
[project.urls]