mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-21 20:52:11 -03:00
Better images to video
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,2 +1,3 @@
|
|||||||
__pycache__/
|
__pycache__/
|
||||||
SaveText/
|
SaveText/
|
||||||
|
API_example/
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.3 🔗
|
# 🔗 Comfyui : Bjornulf_custom_nodes v0.4 🔗
|
||||||
|
|
||||||
# Dependencies
|
# Dependencies
|
||||||
|
|
||||||
@@ -9,6 +9,7 @@
|
|||||||
- **v0.2 Ollama**: Improve ollama node with system prompt + model selection.
|
- **v0.2 Ollama**: Improve ollama node with system prompt + model selection.
|
||||||
- **v0.3 Save Image to Folder**: Add a new node : Save image to a chosen folder.
|
- **v0.3 Save Image to Folder**: Add a new node : Save image to a chosen folder.
|
||||||
- **v0.3 Save Images**: Add comfyui Metadata / workflow to all my image-related nodes.
|
- **v0.3 Save Images**: Add comfyui Metadata / workflow to all my image-related nodes.
|
||||||
|
- **v0.3 Images to video**: Support transparency with webm format. As well as an audio stream.
|
||||||
|
|
||||||
# 📝 Nodes descriptions
|
# 📝 Nodes descriptions
|
||||||
|
|
||||||
@@ -137,4 +138,4 @@ Create a ping-pong effect from a list of images (from a video) by reversing the
|
|||||||
|
|
||||||
**Description:**
|
**Description:**
|
||||||
Combine a sequence of images into a video file.
|
Combine a sequence of images into a video file.
|
||||||
❓ I made this node because it supports transparency with webm format. (Good for rembg)
|
❓ I made this node because it supports transparency with webm format. (Need for rembg)
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from .create_video import imgs2vid
|
from .images_to_video import imagesToVideo
|
||||||
from .write_text import WriteText
|
from .write_text import WriteText
|
||||||
from .write_image_environment import WriteImageEnvironment
|
from .write_image_environment import WriteImageEnvironment
|
||||||
from .write_image_characters import WriteImageCharacters
|
from .write_image_characters import WriteImageCharacters
|
||||||
@@ -49,7 +49,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"Bjornulf_LoopTexts": LoopTexts,
|
"Bjornulf_LoopTexts": LoopTexts,
|
||||||
"Bjornulf_RandomTexts": RandomTexts,
|
"Bjornulf_RandomTexts": RandomTexts,
|
||||||
"Bjornulf_RandomModelClipVae": RandomModelClipVae,
|
"Bjornulf_RandomModelClipVae": RandomModelClipVae,
|
||||||
"Bjornulf_imgs2vid": imgs2vid,
|
"Bjornulf_imagesToVideo": imagesToVideo,
|
||||||
"Bjornulf_VideoPingPong": VideoPingPong,
|
"Bjornulf_VideoPingPong": VideoPingPong,
|
||||||
"Bjornulf_LoopFloat": LoopFloat,
|
"Bjornulf_LoopFloat": LoopFloat,
|
||||||
"Bjornulf_LoopInteger": LoopInteger,
|
"Bjornulf_LoopInteger": LoopInteger,
|
||||||
@@ -81,7 +81,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
|||||||
"Bjornulf_LoopTexts": "♻ Loop (Texts)",
|
"Bjornulf_LoopTexts": "♻ Loop (Texts)",
|
||||||
"Bjornulf_RandomTexts": "🎲 Random (Texts)",
|
"Bjornulf_RandomTexts": "🎲 Random (Texts)",
|
||||||
"Bjornulf_RandomModelClipVae": "🎲 Random (Model+Clip+Vae)",
|
"Bjornulf_RandomModelClipVae": "🎲 Random (Model+Clip+Vae)",
|
||||||
"Bjornulf_imgs2vid": "📹 imgs2vid (FFmpeg)",
|
"Bjornulf_imagesToVideo": "📹 images to video (FFmpeg)",
|
||||||
"Bjornulf_VideoPingPong": "📹 video PingPong",
|
"Bjornulf_VideoPingPong": "📹 video PingPong",
|
||||||
"Bjornulf_LoopFloat": "♻ Loop (Float)",
|
"Bjornulf_LoopFloat": "♻ Loop (Float)",
|
||||||
"Bjornulf_LoopInteger": "♻ Loop (Integer)",
|
"Bjornulf_LoopInteger": "♻ Loop (Integer)",
|
||||||
|
|||||||
@@ -1,91 +0,0 @@
|
|||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
import subprocess
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
class imgs2vid:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(cls):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"images": ("IMAGE",),
|
|
||||||
"fps": ("INT", {"default": 30, "min": 1, "max": 60}),
|
|
||||||
"video_name_NO_format": ("STRING", {"default": "output"}),
|
|
||||||
"format": (["mp4", "webm"],),
|
|
||||||
"audio_path": ("STRING", {"default": "/home/umen/6sec.wav"}), # New audio input
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("STRING",)
|
|
||||||
RETURN_NAMES = ("comment",)
|
|
||||||
FUNCTION = "create_video"
|
|
||||||
OUTPUT_NODE = True
|
|
||||||
CATEGORY = "Bjornulf"
|
|
||||||
|
|
||||||
def create_video(self, images, fps, video_name_NO_format, format, audio_path):
|
|
||||||
# Remove any existing extension
|
|
||||||
video_name_NO_format = os.path.splitext(video_name_NO_format)[0]
|
|
||||||
# Add the correct extension
|
|
||||||
output_file = f"{video_name_NO_format}.{format}"
|
|
||||||
temp_dir = "temp_images"
|
|
||||||
os.makedirs(temp_dir, exist_ok=True)
|
|
||||||
# Ensure the output directory exists
|
|
||||||
os.makedirs(os.path.dirname(output_file) if os.path.dirname(output_file) else ".", exist_ok=True)
|
|
||||||
|
|
||||||
# Save the tensor images as PNG files
|
|
||||||
for i, img_tensor in enumerate(images):
|
|
||||||
img = Image.fromarray((img_tensor.cpu().numpy() * 255).astype(np.uint8))
|
|
||||||
if format == "webm":
|
|
||||||
img = img.convert("RGBA") # Ensure alpha channel for WebM
|
|
||||||
img.save(os.path.join(temp_dir, f"frame_{i:04d}.png"))
|
|
||||||
|
|
||||||
# Construct the FFmpeg command based on the selected format
|
|
||||||
if format == "mp4":
|
|
||||||
ffmpeg_cmd = [
|
|
||||||
"ffmpeg",
|
|
||||||
"-y",
|
|
||||||
"-framerate", str(fps),
|
|
||||||
"-i", os.path.join(temp_dir, "frame_%04d.png"),
|
|
||||||
"-i", str(audio_path),
|
|
||||||
"-crf", "19",
|
|
||||||
"-c:v", "libx264",
|
|
||||||
"-pix_fmt", "yuv420p",
|
|
||||||
output_file
|
|
||||||
]
|
|
||||||
comment = "MP4 format: Widely compatible, efficient compression, no transparency support."
|
|
||||||
elif format == "webm":
|
|
||||||
ffmpeg_cmd = [
|
|
||||||
"ffmpeg",
|
|
||||||
"-y",
|
|
||||||
"-framerate", str(fps),
|
|
||||||
"-i", os.path.join(temp_dir, "frame_%04d.png"),
|
|
||||||
"-i", str(audio_path),
|
|
||||||
"-crf", "19",
|
|
||||||
"-c:v", "libvpx",
|
|
||||||
"-b:v", "1M", # Set video bitrate
|
|
||||||
"-auto-alt-ref", "0", # Disable auto alt ref
|
|
||||||
"-c:a", "libvorbis",
|
|
||||||
"-pix_fmt", "yuva420p",
|
|
||||||
"-shortest",
|
|
||||||
output_file
|
|
||||||
]
|
|
||||||
comment = "WebM format: Supports transparency, open format, smaller file size, but less compatible than MP4."
|
|
||||||
|
|
||||||
# Run FFmpeg
|
|
||||||
try:
|
|
||||||
subprocess.run(ffmpeg_cmd, check=True)
|
|
||||||
print(f"Video created successfully: {output_file}")
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
print(f"Error creating video: {e}")
|
|
||||||
finally:
|
|
||||||
# Clean up temporary files
|
|
||||||
for file in os.listdir(temp_dir):
|
|
||||||
os.remove(os.path.join(temp_dir, file))
|
|
||||||
os.rmdir(temp_dir)
|
|
||||||
|
|
||||||
return (comment,)
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
# images = [torch.rand(256, 256, 3) for _ in range(10)] # Replace with actual image tensors
|
|
||||||
# imgs2vid().create_video(images, 30, "output", "webm", "/home/
|
|
||||||
111
images_to_video.py
Normal file
111
images_to_video.py
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import subprocess
|
||||||
|
from PIL import Image
|
||||||
|
import soundfile as sf
|
||||||
|
|
||||||
|
class imagesToVideo:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"images": ("IMAGE",),
|
||||||
|
"fps": ("INT", {"default": 24, "min": 1, "max": 60}),
|
||||||
|
"name_prefix": ("STRING", {"default": "output/imgs2video/me"}),
|
||||||
|
"format": (["mp4", "webm"],),
|
||||||
|
"crf": ("INT", {"default": 19, "min": 0, "max": 63}),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"audio": ("AUDIO",),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("STRING",)
|
||||||
|
RETURN_NAMES = ("comment",)
|
||||||
|
FUNCTION = "image_to_video"
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
CATEGORY = "Bjornulf"
|
||||||
|
|
||||||
|
def image_to_video(self, images, fps, name_prefix, format, crf, audio=None):
|
||||||
|
# Remove any existing extension
|
||||||
|
name_prefix = os.path.splitext(name_prefix)[0]
|
||||||
|
# Add the correct extension
|
||||||
|
output_file = f"{name_prefix}.{format}"
|
||||||
|
temp_dir = "temp_images_imgs2video"
|
||||||
|
#Clean up temp dir
|
||||||
|
if os.path.exists(temp_dir) and os.path.isdir(temp_dir):
|
||||||
|
for file in os.listdir(temp_dir):
|
||||||
|
os.remove(os.path.join(temp_dir, file))
|
||||||
|
os.rmdir(temp_dir)
|
||||||
|
|
||||||
|
os.makedirs(temp_dir, exist_ok=True)
|
||||||
|
# Ensure the output directory exists
|
||||||
|
os.makedirs(os.path.dirname(output_file) if os.path.dirname(output_file) else ".", exist_ok=True)
|
||||||
|
|
||||||
|
# Save the tensor images as PNG files
|
||||||
|
for i, img_tensor in enumerate(images):
|
||||||
|
img = Image.fromarray((img_tensor.cpu().numpy() * 255).astype(np.uint8))
|
||||||
|
if format == "webm":
|
||||||
|
img = img.convert("RGBA") # Ensure alpha channel for WebM
|
||||||
|
img.save(os.path.join(temp_dir, f"frame_{i:04d}.png"))
|
||||||
|
|
||||||
|
# Handle audio
|
||||||
|
temp_audio_file = None
|
||||||
|
if audio is not None:
|
||||||
|
temp_audio_file = os.path.join(temp_dir, "temp_audio.wav")
|
||||||
|
waveform = audio['waveform'].squeeze().numpy()
|
||||||
|
sample_rate = audio['sample_rate']
|
||||||
|
sf.write(temp_audio_file, waveform, sample_rate)
|
||||||
|
|
||||||
|
# Construct the FFmpeg command based on the selected format
|
||||||
|
ffmpeg_cmd = [
|
||||||
|
"ffmpeg",
|
||||||
|
"-y",
|
||||||
|
"-framerate", str(fps),
|
||||||
|
"-i", os.path.join(temp_dir, "frame_%04d.png"),
|
||||||
|
]
|
||||||
|
|
||||||
|
if temp_audio_file:
|
||||||
|
ffmpeg_cmd.extend(["-i", temp_audio_file])
|
||||||
|
|
||||||
|
if format == "mp4":
|
||||||
|
ffmpeg_cmd.extend([
|
||||||
|
"-filter_complex", "[0:v]scale=iw:ih,format=rgba,split[s0][s1];[s0]lutrgb=r=0:g=0:b=0:a=0[transparent];[transparent][s1]overlay",
|
||||||
|
"-crf", str(crf),
|
||||||
|
"-c:v", "libx264",
|
||||||
|
"-pix_fmt", "yuv420p",
|
||||||
|
])
|
||||||
|
comment = "MP4 format: Widely compatible, efficient compression, no transparency support."
|
||||||
|
elif format == "webm":
|
||||||
|
# Fake transparency bug/feature with Inspyre.
|
||||||
|
# Code to fix tat : creates a fully transparent background and then overlays your image on top of it, which forces the transparency to be preserved... wth is this guys?
|
||||||
|
ffmpeg_cmd.extend([
|
||||||
|
"-filter_complex", "[0:v]scale=iw:ih,format=rgba,split[s0][s1];[s0]lutrgb=r=0:g=0:b=0:a=0[transparent];[transparent][s1]overlay",
|
||||||
|
"-c:v", "libvpx-vp9",
|
||||||
|
"-pix_fmt", "yuva420p",
|
||||||
|
"-b:v", "0",
|
||||||
|
"-crf", str(crf),
|
||||||
|
"-auto-alt-ref", "0",
|
||||||
|
])
|
||||||
|
comment = "WebM format: Supports transparency, open format, smaller file size, but less compatible than MP4."
|
||||||
|
|
||||||
|
if temp_audio_file:
|
||||||
|
ffmpeg_cmd.extend(["-c:a", "libvorbis", "-shortest"])
|
||||||
|
|
||||||
|
ffmpeg_cmd.append(output_file)
|
||||||
|
|
||||||
|
# Run FFmpeg
|
||||||
|
try:
|
||||||
|
subprocess.run(ffmpeg_cmd, check=True)
|
||||||
|
print(f"Video created successfully: {output_file}")
|
||||||
|
except subprocess.CalledProcessCode as e:
|
||||||
|
print(f"Error creating video: {e}")
|
||||||
|
finally:
|
||||||
|
# Clean up temporary files
|
||||||
|
# for file in os.listdir(temp_dir):
|
||||||
|
# os.remove(os.path.join(temp_dir, file))
|
||||||
|
# os.rmdir(temp_dir)
|
||||||
|
print("Temporary files not removed for debugging purposes.")
|
||||||
|
|
||||||
|
return (comment,)
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 142 KiB |
Reference in New Issue
Block a user