diff --git a/efficiency_nodes.py b/efficiency_nodes.py index 0235715..91dcd03 100644 --- a/efficiency_nodes.py +++ b/efficiency_nodes.py @@ -2451,62 +2451,81 @@ class TSC_ImageOverlay: } RETURN_TYPES = ("IMAGE",) - FUNCTION = "overlayimage" + FUNCTION = "apply_overlay_image" CATEGORY = "Efficiency Nodes/Image" - def overlayimage(self, base_image, overlay_image, overlay_resize, resize_method, rescale_factor, width, height, x_offset, y_offset, rotation, opacity, optional_mask=None): - result = self.apply_overlay(tensor2pil(base_image), overlay_image, overlay_resize, resize_method, rescale_factor, (int(width), int(height)), - (int(x_offset), int(y_offset)), int(rotation), opacity, optional_mask) - return (pil2tensor(result),) + def apply_overlay_image(self, base_image, overlay_image, overlay_resize, resize_method, rescale_factor, + width, height, x_offset, y_offset, rotation, opacity, optional_mask): - def apply_overlay(self, base, overlay, size_option, resize_method, rescale_factor, size, location, rotation, opacity, mask): + # Pack tuples and assign variables + size = width, height + location = x_offset, y_offset + mask = optional_mask # Check for different sizing options - if size_option != "None": - #Extract overlay size and store in Tuple "overlay_size" (WxH) - overlay_size = overlay.size() - overlay_size = (overlay_size[2], overlay_size[1]) - if size_option == "Fit": - overlay_size = (base.size[0],base.size[1]) - elif size_option == "Resize by rescale_factor": - overlay_size = tuple(int(dimension * rescale_factor) for dimension in overlay_size) - elif size_option == "Resize to width & heigth": - overlay_size = (size[0], size[1]) + if overlay_resize != "None": + #Extract overlay_image size and store in Tuple "overlay_image_size" (WxH) + overlay_image_size = overlay_image.size() + overlay_image_size = (overlay_image_size[2], overlay_image_size[1]) + if overlay_resize == "Fit": + overlay_image_size = (base_image.size[0],base_image.size[1]) + elif overlay_resize == "Resize by rescale_factor": + overlay_image_size = tuple(int(dimension * rescale_factor) for dimension in overlay_image_size) + elif overlay_resize == "Resize to width & heigth": + overlay_image_size = (size[0], size[1]) - samples = overlay.movedim(-1, 1) - overlay = comfy.utils.common_upscale(samples, overlay_size[0], overlay_size[1], resize_method, False) - overlay = overlay.movedim(1, -1) + samples = overlay_image.movedim(-1, 1) + overlay_image = comfy.utils.common_upscale(samples, overlay_image_size[0], overlay_image_size[1], resize_method, False) + overlay_image = overlay_image.movedim(1, -1) - overlay = tensor2pil(overlay) + overlay_image = tensor2pil(overlay_image) # Add Alpha channel to overlay - overlay = overlay.convert('RGBA') - overlay.putalpha(Image.new("L", overlay.size, 255)) + overlay_image = overlay_image.convert('RGBA') + overlay_image.putalpha(Image.new("L", overlay_image.size, 255)) - # If mask connected, check if the overlay image has an alpha channel + # If mask connected, check if the overlay_image image has an alpha channel if mask is not None: # Convert mask to pil and resize mask = tensor2pil(mask) - mask = mask.resize(overlay.size) + mask = mask.resize(overlay_image.size) # Apply mask as overlay's alpha - overlay.putalpha(ImageOps.invert(mask)) + overlay_image.putalpha(ImageOps.invert(mask)) # Rotate the overlay image - overlay = overlay.rotate(rotation, expand=True) + overlay_image = overlay_image.rotate(rotation, expand=True) # Apply opacity on overlay image - r, g, b, a = overlay.split() + r, g, b, a = overlay_image.split() a = a.point(lambda x: max(0, int(x * (1 - opacity / 100)))) - overlay.putalpha(a) + overlay_image.putalpha(a) - # Paste the overlay image onto the base image - if mask is None: - base.paste(overlay, location) - else: - base.paste(overlay, location, overlay) + # Split the base_image tensor along the first dimension to get a list of tensors + base_image_list = torch.unbind(base_image, dim=0) + + # Convert each tensor to a PIL image, apply the overlay, and then convert it back to a tensor + processed_base_image_list = [] + for tensor in base_image_list: + # Convert tensor to PIL Image + image = tensor2pil(tensor) + + # Paste the overlay image onto the base image + if mask is None: + image.paste(overlay_image, location) + else: + image.paste(overlay_image, location, overlay_image) + + # Convert PIL Image back to tensor + processed_tensor = pil2tensor(image) + + # Append to list + processed_base_image_list.append(processed_tensor) + + # Combine the processed images back into a single tensor + base_image = torch.stack([tensor.squeeze() for tensor in processed_base_image_list]) # Return the edited base image - return base + return (base_image,) ######################################################################################################################## # Install simple_eval if missing from packages