0.77
6
.gitignore
vendored
@@ -10,4 +10,8 @@ ScriptsPerso/
|
||||
civitai/NSFW_*
|
||||
pickme.py
|
||||
web/js/pickme.js
|
||||
todo.py
|
||||
todo.py
|
||||
video_text_generator.py
|
||||
run_workflow_from_api.py
|
||||
remote_nodes.py
|
||||
fix_face.py
|
||||
581
API_civitai.py
@@ -1,4 +1,6 @@
|
||||
import os
|
||||
from server import PromptServer
|
||||
import aiohttp.web as web
|
||||
import time
|
||||
import requests
|
||||
from PIL import Image, ImageSequence, ImageOps
|
||||
@@ -168,11 +170,27 @@ def download_file(url, destination_path, model_name, api_token=None):
|
||||
response.raise_for_status()
|
||||
file_size = int(response.headers.get('content-length', 0))
|
||||
|
||||
# Initialize progress tracking if file size is known
|
||||
if file_size > 0:
|
||||
downloaded = 0
|
||||
bar_width = 20 # Fixed width for the progress bar
|
||||
|
||||
with open(file_path, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
# Add progress reporting here if needed
|
||||
# Progress bar logic
|
||||
if file_size > 0:
|
||||
downloaded += len(chunk)
|
||||
progress = min(100, int((downloaded / file_size) * 100))
|
||||
num_hashes = int(progress / (100 / bar_width))
|
||||
bar = "[" + "#" * num_hashes + " " * (bar_width - num_hashes) + "]"
|
||||
percentage = f"{progress:3d}%"
|
||||
print(f"\r{bar} {percentage}", end="", flush=True)
|
||||
|
||||
# Add a newline after download completes to avoid overwriting
|
||||
if file_size > 0:
|
||||
print() # Moves to the next line after completion
|
||||
|
||||
return str(file_path)
|
||||
except Exception as e:
|
||||
@@ -222,95 +240,165 @@ import civitai
|
||||
# ======================
|
||||
# GENERATE WITH CIVITAI
|
||||
# ======================
|
||||
|
||||
class APIGenerateCivitAI:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
"""Define the input types for the node."""
|
||||
return {
|
||||
"required": {
|
||||
"api_token": ("STRING", {
|
||||
"multiline": False,
|
||||
"default": "",
|
||||
"placeholder": "Enter your CivitAI API token here"
|
||||
}),
|
||||
"model_urn": ("STRING", {
|
||||
"multiline": False,
|
||||
"default": "urn:air:sdxl:checkpoint:civitai:133005@782002"
|
||||
}),
|
||||
"prompt": ("STRING", {
|
||||
"multiline": True,
|
||||
"default": "RAW photo, face portrait photo of 26 y.o woman"
|
||||
}),
|
||||
"api_token": ("STRING", {"default": "", "placeholder": "CivitAI API token"}),
|
||||
"prompt": ("STRING", {"multiline": True, "default": "RAW photo, face portrait photo of 26 y.o woman"}),
|
||||
"negative_prompt": ("STRING", {
|
||||
"multiline": True,
|
||||
"default": "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime)"
|
||||
}),
|
||||
"width": ("INT", {
|
||||
"default": 1024,
|
||||
"min": 128,
|
||||
"max": 1024,
|
||||
"step": 64
|
||||
}),
|
||||
"height": ("INT", {
|
||||
"default": 768,
|
||||
"min": 128,
|
||||
"max": 1024,
|
||||
"step": 64
|
||||
}),
|
||||
"steps": ("INT", {
|
||||
"default": 20,
|
||||
"min": 1,
|
||||
"max": 50,
|
||||
"step": 1
|
||||
}),
|
||||
"cfg_scale": ("FLOAT", {
|
||||
"default": 7.0,
|
||||
"min": 1.0,
|
||||
"max": 30.0,
|
||||
"step": 0.1
|
||||
}),
|
||||
"seed": ("INT", {
|
||||
"default": -1,
|
||||
"min": -1,
|
||||
"max": 0x7FFFFFFFFFFFFFFF
|
||||
}),
|
||||
"number_of_images": ("INT", {
|
||||
"default": 1,
|
||||
"min": 1,
|
||||
"max": 10,
|
||||
"step": 1
|
||||
}),
|
||||
"timeout": ("INT", {
|
||||
"default": 300,
|
||||
"min": 60,
|
||||
"max": 1800,
|
||||
"step": 60,
|
||||
"display": "Timeout (seconds)"
|
||||
"default": "low quality, blurry, pixelated, distorted, artifacts"
|
||||
}),
|
||||
"width": ("INT", {"default": 1024, "min": 128, "max": 1024, "step": 64}),
|
||||
"height": ("INT", {"default": 768, "min": 128, "max": 1024, "step": 64}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 50, "step": 1}),
|
||||
"cfg_scale": ("FLOAT", {"default": 7.0, "min": 1.0, "max": 30.0, "step": 0.1}),
|
||||
"seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFFFFFFFFFF}),
|
||||
"number_of_images": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}),
|
||||
"timeout": ("INT", {"default": 300, "min": 60, "max": 1800, "step": 60}),
|
||||
},
|
||||
"optional":{
|
||||
"add_LORA": ("add_LORA", {"forceInput": True}),
|
||||
"optional": {
|
||||
"model_urn": ("STRING", {"default": "urn:air:sdxl:checkpoint:civitai:101055@128078"}), #SDXL default
|
||||
"add_LORA": ("STRING", {"multiline": True, "default": ""}),
|
||||
"DO_NOT_WAIT": ("BOOLEAN", {"default": False, "label_on": "Save Links Only", "label_off": "Generate Now"}),
|
||||
"links_file": ("STRING", {"default": "", "multiline": False}),
|
||||
"LIST_from_style_selector": ("STRING", {
|
||||
"default": "",
|
||||
"multiline": True,
|
||||
"placeholder": "e.g., Low Poly ;Samaritan 3D Cartoon;urn:air:sdxl:checkpoint:civitai:81270@144566;https://civitai.green/models/81270?modelVersionId=144566"
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "STRING",)
|
||||
RETURN_NAMES = ("image", "generation_info",)
|
||||
RETURN_TYPES = ("IMAGE", "STRING")
|
||||
RETURN_NAMES = ("images", "generation_info")
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Civitai"
|
||||
|
||||
def __init__(self):
|
||||
self.output_dir = "output/API/CivitAI"
|
||||
self.metadata_dir = "output/API/CivitAI/metadata"
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
os.makedirs(self.metadata_dir, exist_ok=True)
|
||||
self.links_dir = "Bjornulf/civitai_links"
|
||||
os.makedirs(self.links_dir, exist_ok=True)
|
||||
self._interrupt_event = threading.Event()
|
||||
|
||||
def get_next_number(self):
|
||||
"""Get the next available number for file naming"""
|
||||
files = [f for f in os.listdir(self.output_dir) if f.endswith('.png')]
|
||||
if not files:
|
||||
return 1
|
||||
numbers = [int(f.split('.')[0]) for f in files]
|
||||
return max(numbers) + 1
|
||||
def generate(self, api_token, prompt, negative_prompt, width, height, steps, cfg_scale, seed, number_of_images, timeout, model_urn="", add_LORA="", DO_NOT_WAIT=False, links_file="", LIST_from_style_selector=""):
|
||||
"""Generate images or save links based on DO_NOT_WAIT."""
|
||||
if not api_token:
|
||||
raise ValueError("API token is required")
|
||||
os.environ["CIVITAI_API_TOKEN"] = api_token
|
||||
civitai = get_civitai()
|
||||
|
||||
empty_image = torch.zeros((1, 512, 512, 3))
|
||||
|
||||
# Extract model_urn from LIST_from_style_selector if model_urn is empty and LIST_from_style_selector is provided
|
||||
if not model_urn and LIST_from_style_selector:
|
||||
parts = LIST_from_style_selector.split(';')
|
||||
if len(parts) >= 3:
|
||||
model_urn = parts[2].strip()
|
||||
else:
|
||||
raise ValueError("Invalid LIST_from_style_selector format: cannot extract model_urn")
|
||||
if not model_urn:
|
||||
raise ValueError("model_urn is required")
|
||||
|
||||
seed = random.randint(0, 0x7FFFFFFFFFFFFFFF) if seed == -1 else seed
|
||||
jobs = []
|
||||
|
||||
# Prepare job requests
|
||||
for i in range(number_of_images):
|
||||
current_seed = seed + i
|
||||
input_data = {
|
||||
"model": model_urn,
|
||||
"params": {
|
||||
"prompt": prompt,
|
||||
"negativePrompt": negative_prompt,
|
||||
"scheduler": "EulerA",
|
||||
"steps": steps,
|
||||
"cfgScale": cfg_scale,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"clipSkip": 2,
|
||||
"seed": current_seed
|
||||
}
|
||||
}
|
||||
if add_LORA:
|
||||
try:
|
||||
lora_data = json.loads(add_LORA)
|
||||
if "additionalNetworks" in lora_data:
|
||||
input_data["additionalNetworks"] = lora_data["additionalNetworks"]
|
||||
except Exception as e:
|
||||
print(f"Error processing LORA data: {str(e)}")
|
||||
|
||||
response = civitai.image.create(input_data)
|
||||
if 'token' not in response or 'jobs' not in response:
|
||||
raise ValueError("Invalid API response")
|
||||
jobs.append({
|
||||
'token': response['token'],
|
||||
'job_id': response['jobs'][0]['jobId'],
|
||||
'input_data': input_data
|
||||
})
|
||||
|
||||
# Save links if DO_NOT_WAIT is True
|
||||
if DO_NOT_WAIT:
|
||||
date_str = time.strftime("%d_%B_%Y").lower()
|
||||
base_name = f"{date_str}_"
|
||||
existing_files = [f for f in os.listdir(self.links_dir) if f.startswith(base_name) and f.endswith(".txt")]
|
||||
next_number = max([int(f[len(base_name):-4]) for f in existing_files] or [0]) + 1
|
||||
file_name = f"{date_str}_{next_number:03d}.txt"
|
||||
file_path = os.path.join(self.links_dir, links_file if links_file else file_name)
|
||||
mode = 'a' if links_file else 'w'
|
||||
if not file_path.endswith(".txt"):
|
||||
file_path += ".txt"
|
||||
|
||||
with open(file_path, mode) as f:
|
||||
for job in jobs:
|
||||
if LIST_from_style_selector:
|
||||
f.write(f"{LIST_from_style_selector};Token: {job['token']};Job ID: {job['job_id']}\n")
|
||||
else:
|
||||
f.write(f"Token: {job['token']};Job ID: {job['job_id']}\n")
|
||||
|
||||
generation_info = {
|
||||
"status": "links_saved",
|
||||
"links_file": file_path,
|
||||
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"number_of_jobs": len(jobs)
|
||||
}
|
||||
return (empty_image, json.dumps(generation_info, indent=2))
|
||||
|
||||
# Generate images immediately (DO_NOT_WAIT=False)
|
||||
images = []
|
||||
infos = []
|
||||
failed_jobs = []
|
||||
|
||||
for job in jobs:
|
||||
try:
|
||||
image_url = self.check_job_status(job['token'], job['job_id'], timeout)
|
||||
image_response = requests.get(image_url)
|
||||
if image_response.status_code != 200:
|
||||
raise ConnectionError(f"Image download failed: {image_response.status_code}")
|
||||
|
||||
img = Image.open(BytesIO(image_response.content)).convert('RGB')
|
||||
img_tensor = torch.from_numpy(np.array(img).astype(np.float32) / 255.0)
|
||||
images.append(img_tensor.unsqueeze(0))
|
||||
infos.append(self.format_generation_info(job['input_data'], job['token'], job['job_id'], image_url))
|
||||
|
||||
except Exception as e:
|
||||
failed_jobs.append({'job': job, 'error': str(e)})
|
||||
|
||||
if not images:
|
||||
generation_info = {"error": "All jobs failed", "failed_jobs": failed_jobs}
|
||||
return (empty_image, json.dumps(generation_info, indent=2))
|
||||
|
||||
combined_tensor = torch.cat(images, dim=0)
|
||||
combined_info = {
|
||||
"successful_generations": len(images),
|
||||
"total_requested": number_of_images,
|
||||
"individual_results": infos,
|
||||
"failed_jobs": failed_jobs if failed_jobs else None
|
||||
}
|
||||
return (combined_tensor, json.dumps(combined_info, indent=2))
|
||||
|
||||
def check_job_status(self, job_token, job_id, timeout=9999):
|
||||
"""Check job status with timeout"""
|
||||
@@ -341,196 +429,183 @@ class APIGenerateCivitAI:
|
||||
raise InterruptedError("Generation interrupted by user")
|
||||
raise TimeoutError(f"Job timed out after {timeout} seconds")
|
||||
|
||||
def save_image_and_metadata(self, img, generation_info, number):
|
||||
"""Save both image and its metadata"""
|
||||
# Save image
|
||||
filename = f"{number:04d}.png"
|
||||
filepath = os.path.join(self.output_dir, filename)
|
||||
img.save(filepath)
|
||||
def format_generation_info(self, input_data, token, job_id, image_url):
|
||||
"""Format generation info (implementation assumed)."""
|
||||
return {"token": token, "job_id": job_id, "image_url": image_url}
|
||||
|
||||
# Save metadata
|
||||
metadata_filename = f"{number:04d}_metadata.json"
|
||||
metadata_filepath = os.path.join(self.metadata_dir, metadata_filename)
|
||||
with open(metadata_filepath, 'w') as f:
|
||||
json.dump(generation_info, f, indent=4)
|
||||
|
||||
return filepath, metadata_filepath
|
||||
|
||||
def format_generation_info(self, input_data, job_token, job_id, image_url):
|
||||
"""Format generation information for recovery"""
|
||||
recovery_info = {
|
||||
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"generation_parameters": input_data,
|
||||
"job_details": {
|
||||
"token": job_token,
|
||||
"job_id": job_id,
|
||||
"image_url": image_url
|
||||
class LoadCivitAILinks:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
"""Define the input types for the node."""
|
||||
return {
|
||||
"required": {
|
||||
"api_token": ("STRING", {"default": "", "placeholder": "CivitAI API token"}),
|
||||
"links_file_path": ("STRING", {
|
||||
"default": "",
|
||||
"placeholder": "Path to links file (priority if not empty)"
|
||||
}),
|
||||
"selected_file": (["Not selected"] + cls.get_links_files(), {
|
||||
"default": "Not selected"
|
||||
}),
|
||||
"direct_links": ("STRING", {
|
||||
"multiline": True,
|
||||
"default": "",
|
||||
"placeholder": "Enter links directly (e.g., Style;Model;URN;Link;Token: <token>;Job ID: <job_id>)"
|
||||
}),
|
||||
},
|
||||
"recovery_command": f"curl -X GET '{image_url}' --output recovered_image.png",
|
||||
"recovery_instructions": """
|
||||
To recover this image:
|
||||
1. Use the provided curl command to download the image
|
||||
2. Or use the image_url directly in a browser
|
||||
3. If the image is no longer available, you can retry generation with the same parameters
|
||||
"""
|
||||
"optional": {
|
||||
"auto_save": ("BOOLEAN", {
|
||||
"default": False,
|
||||
"label_on": "Enable Auto-Save",
|
||||
"label_off": "Disable Auto-Save"
|
||||
}),
|
||||
}
|
||||
}
|
||||
return recovery_info
|
||||
|
||||
def generate_single_image(self, input_data, job_token, job_id, timeout):
|
||||
"""Generate a single image and return its tensor and info"""
|
||||
try:
|
||||
image_url = self.check_job_status(job_token, job_id, timeout)
|
||||
if not image_url:
|
||||
raise ValueError("No image URL received")
|
||||
RETURN_TYPES = ("IMAGE", "STRING", "STRING")
|
||||
RETURN_NAMES = ("images", "status_info", "LIST_style")
|
||||
OUTPUT_IS_LIST = (False, False, True)
|
||||
FUNCTION = "load_images"
|
||||
CATEGORY = "Civitai"
|
||||
|
||||
image_response = requests.get(image_url)
|
||||
if image_response.status_code != 200:
|
||||
raise ConnectionError(f"Failed to download image: Status code {image_response.status_code}")
|
||||
|
||||
img = Image.open(BytesIO(image_response.content))
|
||||
if img.mode != 'RGB':
|
||||
img = img.convert('RGB')
|
||||
|
||||
number = self.get_next_number()
|
||||
generation_info = self.format_generation_info(input_data, job_token, job_id, image_url)
|
||||
image_path, metadata_path = self.save_image_and_metadata(img, generation_info, number)
|
||||
|
||||
img_tensor = torch.from_numpy(np.array(img).astype(np.float32) / 255.0)
|
||||
img_tensor = img_tensor.unsqueeze(0)
|
||||
|
||||
return img_tensor, generation_info
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"Error generating single image: {str(e)}")
|
||||
|
||||
|
||||
def generate(self, api_token, prompt, negative_prompt, width, height, model_urn, steps=20,
|
||||
cfg_scale=7.0, seed=-1, number_of_images=1, timeout=300, add_LORA=""):
|
||||
|
||||
# Set the environment variable
|
||||
if api_token:
|
||||
os.environ["CIVITAI_API_TOKEN"] = api_token
|
||||
# Get a fresh instance of civitai with the new token
|
||||
civitai = get_civitai()
|
||||
|
||||
self._interrupt_event.clear()
|
||||
empty_image = torch.zeros((1, height, width, 3))
|
||||
|
||||
try:
|
||||
# Handle seed
|
||||
if seed == -1:
|
||||
seed = random.randint(0, 0x7FFFFFFFFFFFFFFF)
|
||||
|
||||
# Prepare jobs list
|
||||
jobs = []
|
||||
generation_tasks = []
|
||||
|
||||
for i in range(number_of_images):
|
||||
current_seed = seed + i
|
||||
input_data = {
|
||||
"model": model_urn,
|
||||
"params": {
|
||||
"prompt": prompt,
|
||||
"negativePrompt": negative_prompt,
|
||||
"scheduler": "EulerA",
|
||||
"steps": steps,
|
||||
"cfgScale": cfg_scale,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"clipSkip": 2,
|
||||
"seed": current_seed
|
||||
}
|
||||
}
|
||||
|
||||
# Handle add_LORA input if provided
|
||||
if add_LORA:
|
||||
try:
|
||||
lora_data = json.loads(add_LORA)
|
||||
if "additionalNetworks" in lora_data:
|
||||
input_data["additionalNetworks"] = lora_data["additionalNetworks"]
|
||||
except Exception as e:
|
||||
print(f"Error processing LORA data: {str(e)}")
|
||||
|
||||
# Create generation job
|
||||
response = civitai.image.create(input_data)
|
||||
if not response or 'token' not in response or 'jobs' not in response:
|
||||
raise ValueError("Invalid response from Civitai API")
|
||||
|
||||
jobs.append({
|
||||
'token': response['token'],
|
||||
'job_id': response['jobs'][0]['jobId'],
|
||||
'input_data': input_data
|
||||
})
|
||||
|
||||
# Process all jobs in parallel
|
||||
images = []
|
||||
infos = []
|
||||
failed_jobs = []
|
||||
|
||||
for job in jobs:
|
||||
try:
|
||||
img_tensor, generation_info = self.generate_single_image(
|
||||
job['input_data'],
|
||||
job['token'],
|
||||
job['job_id'],
|
||||
timeout
|
||||
)
|
||||
images.append(img_tensor)
|
||||
infos.append(generation_info)
|
||||
except Exception as e:
|
||||
failed_jobs.append({
|
||||
'job': job,
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
if not images: # If all jobs failed
|
||||
generation_info = {
|
||||
"error": "All generation jobs failed",
|
||||
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"failed_jobs": failed_jobs
|
||||
}
|
||||
return (empty_image, json.dumps(generation_info, indent=2))
|
||||
|
||||
# Combine images into a batch
|
||||
combined_tensor = torch.cat(images, dim=0)
|
||||
|
||||
# Combine generation info
|
||||
combined_info = {
|
||||
"successful_generations": len(images),
|
||||
"total_requested": number_of_images,
|
||||
"base_seed": seed,
|
||||
"generation_parameters": jobs[0]['input_data'],
|
||||
"individual_results": infos,
|
||||
"failed_jobs": failed_jobs if failed_jobs else None
|
||||
}
|
||||
|
||||
return (combined_tensor, json.dumps(combined_info, indent=2))
|
||||
|
||||
except InterruptedError:
|
||||
generation_info = {
|
||||
"error": "Generation interrupted by user",
|
||||
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"base_seed": seed
|
||||
}
|
||||
return (empty_image, json.dumps(generation_info, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
generation_info = {
|
||||
"error": f"Civitai generation failed: {str(e)}",
|
||||
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"base_seed": seed if 'seed' in locals() else None
|
||||
}
|
||||
return (empty_image, json.dumps(generation_info, indent=2))
|
||||
def __init__(self):
|
||||
"""Initialize the node with the links directory."""
|
||||
self.links_dir = "Bjornulf/civitai_links"
|
||||
os.makedirs(self.links_dir, exist_ok=True)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(cls, **kwargs):
|
||||
def get_links_files(cls):
|
||||
links_dir = "Bjornulf/civitai_links"
|
||||
if not os.path.exists(links_dir):
|
||||
return []
|
||||
files = [f for f in os.listdir(links_dir) if f.endswith(".txt")]
|
||||
return files
|
||||
|
||||
def load_images(self, api_token, links_file_path, selected_file, direct_links, auto_save=False):
|
||||
"""Load images from links and optionally save them to style-based folders."""
|
||||
if not api_token:
|
||||
raise ValueError("API token is required")
|
||||
os.environ["CIVITAI_API_TOKEN"] = api_token
|
||||
civitai = get_civitai()
|
||||
|
||||
# Determine the source of links
|
||||
lines = None
|
||||
if links_file_path:
|
||||
if not os.path.exists(links_file_path):
|
||||
raise ValueError(f"File path '{links_file_path}' does not exist")
|
||||
with open(links_file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
elif selected_file != "Not selected":
|
||||
file_path = os.path.join(self.links_dir, selected_file)
|
||||
if not os.path.exists(file_path):
|
||||
raise ValueError(f"Selected file '{file_path}' does not exist")
|
||||
with open(file_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
elif direct_links:
|
||||
lines = direct_links.splitlines()
|
||||
else:
|
||||
raise ValueError("No valid links source provided")
|
||||
|
||||
images = []
|
||||
list_styles = [] # To store LIST_style strings
|
||||
status_info = {
|
||||
"loaded": 0,
|
||||
"failed": 0,
|
||||
"attempted": 0,
|
||||
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
status_info["attempted"] += 1
|
||||
try:
|
||||
parts = line.split(";")
|
||||
if len(parts) == 6:
|
||||
style = parts[0].strip()
|
||||
model_name = parts[1].strip()
|
||||
model_urn = parts[2].strip()
|
||||
model_link = parts[3].strip()
|
||||
token = parts[4].split("Token: ")[1].strip()
|
||||
job_id = parts[5].split("Job ID: ")[1].strip()
|
||||
list_style = ';'.join(parts[:4])
|
||||
elif len(parts) == 2 and "Token: " in parts[0] and "Job ID: " in parts[1]:
|
||||
token = parts[0].split("Token: ")[1].strip()
|
||||
job_id = parts[1].split("Job ID: ")[1].strip()
|
||||
list_style = ""
|
||||
else:
|
||||
raise ValueError(f"Invalid link format: {line}")
|
||||
|
||||
# Fetch job status from CivitAI API
|
||||
response = civitai.jobs.get(token=token)
|
||||
job_status = next((job for job in response['jobs'] if job['jobId'] == job_id), None)
|
||||
|
||||
if not job_status or not job_status['result'].get('available'):
|
||||
status_info["failed"] += 1
|
||||
continue
|
||||
|
||||
# Download and process the image
|
||||
image_url = job_status['result'].get('blobUrl')
|
||||
image_response = requests.get(image_url)
|
||||
if image_response.status_code != 200:
|
||||
status_info["failed"] += 1
|
||||
continue
|
||||
|
||||
img = Image.open(BytesIO(image_response.content))
|
||||
if img.mode != 'RGB':
|
||||
img = img.convert('RGB')
|
||||
|
||||
# Auto-save if enabled and style is available
|
||||
if auto_save and len(parts) == 6:
|
||||
style_folder = style.replace(" ", "_") # Replace spaces with underscores
|
||||
save_dir = os.path.join(folder_paths.get_output_directory(), "civitai_autosave", style_folder)
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
file_name = f"{job_id}.png"
|
||||
file_path = os.path.join(save_dir, file_name)
|
||||
img.save(file_path)
|
||||
|
||||
# Convert to tensor and collect
|
||||
img_tensor = torch.from_numpy(np.array(img).astype(np.float32) / 255.0)
|
||||
images.append(img_tensor.unsqueeze(0))
|
||||
list_styles.append(list_style)
|
||||
status_info["loaded"] += 1
|
||||
|
||||
except Exception as e:
|
||||
status_info["failed"] += 1
|
||||
print(f"Error processing link '{line}': {str(e)}")
|
||||
|
||||
if not images:
|
||||
raise ValueError("No images loaded from the provided links")
|
||||
|
||||
combined_tensor = torch.cat(images, dim=0)
|
||||
return (combined_tensor, json.dumps(status_info, indent=2), list_styles)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(cls, api_token, links_file_path, selected_file, direct_links, auto_save):
|
||||
"""Force node re-execution when inputs change."""
|
||||
return float("NaN")
|
||||
|
||||
def interrupt(self):
|
||||
"""Method to handle interruption"""
|
||||
print("Interrupting CivitAI generation...")
|
||||
self._interrupt_event.set()
|
||||
|
||||
@PromptServer.instance.routes.post("/get_civitai_links_files")
|
||||
async def get_civitai_links_files(request):
|
||||
try:
|
||||
links_dir = "Bjornulf/civitai_links"
|
||||
if not os.path.exists(links_dir):
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": "Links directory does not exist"
|
||||
}, status=404)
|
||||
files = [f for f in os.listdir(links_dir) if f.endswith(".txt")]
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"files": files
|
||||
}, status=200)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error_msg
|
||||
}, status=500)
|
||||
class APIGenerateCivitAIAddLORA:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
|
||||
347
README.md
@@ -1,14 +1,19 @@
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.77 🔗
|
||||
|
||||
A list of 142 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
|
||||
A list of 162 custom nodes for Comfyui : Display, manipulate, create and edit text, images, videos, loras, generate characters and more.
|
||||
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
|
||||
|
||||
# Watch Video (Quick overview 28 minutes) :
|
||||
⚠️ Warning : Very active development. Work in progress. 🏗
|
||||
|
||||
# Watch Video (Quick overview 28 minutes) + Updates in video playlist :
|
||||
[](https://www.youtube.com/playlist?list=PL_hRi_PPLLWvUNfG_np_d3K0nBCabM3iQ)
|
||||
|
||||
# Coffee : ☕☕☕☕☕ 5/5
|
||||
|
||||
Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️❤️❤️
|
||||
Very active development, new nodes, new features, new ideas, new possibilities...
|
||||
Let me know if you have any issues, they must be solved for everyone !
|
||||
|
||||
Support me, this project and my other AI exciting projects : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️❤️❤️
|
||||
|
||||
# ☘ This project is part of my AI trio. ☘
|
||||
|
||||
@@ -29,7 +34,7 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`126.` [📒 Note](#126----note)
|
||||
`127.` [🖼📒 Image Note (Load image)](#130----image-note-load-image)
|
||||
`128.` [🖼👁 Preview (first) image](#128----preview-first-image)
|
||||
`130.` [📥🖼📒 Image Note (Load image)](#130----image-note-load-image)
|
||||
`130.` [📥🖼📒 Image Note (Load image)](#130----image-note-load-image)
|
||||
`133.` [🖼👁 Preview 1-4 images (compare)](#128----preview-first-image)
|
||||
|
||||
## ✒ Text ✒
|
||||
@@ -57,9 +62,10 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`138.` [📑👈 Select from List](#138----select-from-list)
|
||||
`141.` [🌎✒👉 Global Write Pick Me](#141----global-write-pick-me)
|
||||
`142.` [🌎📥 Load Global Pick Me](#142----load-global-pick-me)
|
||||
`144` [📊🔍 Text Analyzer](#)
|
||||
|
||||
## 🔥 Text Generator 🔥
|
||||
`81.` [🔥📝 Text Generator 📝🔥](#81----text-generator-)
|
||||
`81.` [🔥📝🖼 Image Text Generator 🖼📝🔥](#81----text-generator-)
|
||||
`82.` [👩🦰📝 Text Generator (Character Female)](#82----text-generator-character-female)
|
||||
`83.` [👨🦰📝 Text Generator (Character Male)](#83----text-generator-character-male)
|
||||
`84.` [👾📝 Text Generator (Character Creature)](#84----text-generator-character-creature)
|
||||
@@ -147,6 +153,8 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`62.` [🦙👁 Ollama Vision](#62----ollama-vision)
|
||||
`70.` [📏 Resize Image Percentage](#70----resize-image-percentage)
|
||||
`80.` [🩷 Empty Latent Selector](#80----empty-latent-selector)
|
||||
`146` [🖼📹🔪 Split Image/Video Grid](#)
|
||||
`147` [🖼📹🔨 Reassemble Image/Video Grid](#)
|
||||
|
||||
## 🅰️ Variables 🅰️
|
||||
`3.` [✒🗔🅰️ Advanced Write Text (+ 🎲 random option)](#3---🅰%EF%B8%8F-advanced-write-text---random-option)
|
||||
@@ -174,6 +182,7 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`108.` [☁👑 Add Lora (API ONLY - CivitAI) 👑☁](#108----add-lora-api-only---civitai-)
|
||||
`109.` [☁🎨 API Image Generator (Black Forest Labs - Flux) ☁](#109----api-image-generator-black-forest-labs---flux-)
|
||||
`110.` [☁🎨 API Image Generator (Stability - Stable Diffusion) ☁](#110----api-image-generator-stability---stable-diffusion-)
|
||||
`151` [📥🕑🤖 Load CivitAI Links](#)
|
||||
|
||||
## 📥 Take from CivitAI / Hugginface 📥
|
||||
`98.` [📥 Load checkpoint SD1.5 (+Download from CivitAi)](#98----load-checkpoint-sd15-download-from-civitai)
|
||||
@@ -193,7 +202,7 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`49.` [📹👁 Video Preview](#49----video-preview)
|
||||
`50.` [🖼➜📹 Images to Video path (tmp video)](#50----images-to-video-path-tmp-video)
|
||||
`51.` [📹➜🖼 Video Path to Images](#51----video-path-to-images)
|
||||
`52.` [🔊📹 Audio Video Sync](#52----audio-video-sync)
|
||||
`52.` [🔊📹 Audio Video Sync](#52----audio-video-sync)
|
||||
`58.` [📹🔗 Concat Videos](#58----concat-videos)
|
||||
`59.` [📹🔊 Combine Video + Audio](#59----combine-video--audio)
|
||||
`60.` [🖼🖼 Merge Images/Videos 📹📹 (Horizontally)](#60----merge-imagesvideos--horizontally)
|
||||
@@ -203,6 +212,9 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`78.` [📹➜📹 Convert Video](#78----convert-video)
|
||||
`79.` [📹🔗 Concat Videos from list](#79----concat-videos-from-list)
|
||||
`119.` [📥👑📹 Load Lora Hunyuan Video (+Download from CivitAi)](#119----load-lora-hunyuan-video-download-from-civitai)
|
||||
`146` [🖼📹🔪 Split Image/Video Grid](#)
|
||||
`147` [🖼📹🔨 Reassemble Image/Video Grid](#)
|
||||
`149` [💾📹 Save Video (tmp_api.mp4/mkv/webm) ⚠️💣](#)
|
||||
|
||||
## 🤖 AI 🤖
|
||||
`19.` [🦙💬 Ollama Talk](#19----ollama-talk)
|
||||
@@ -213,6 +225,7 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`65.` [🦙 Ollama Persona Selector 🧑](#65----ollama-persona-selector-)
|
||||
`66.` [🔊➜📝 STT - Speech to Text](#66----stt---speech-to-text)
|
||||
`118.` [🔊 TTS Configuration ⚙](#118----tts-configuration-)
|
||||
`160.` [🦙👁 Ollama Vision Prompt Selector](#)
|
||||
|
||||
## 🔊 Audio 🔊
|
||||
`31.` [📝➜🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
|
||||
@@ -222,10 +235,13 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
`118.` [🔊 TTS Configuration ⚙](#118----tts-configuration-)
|
||||
`120.` [📝➜🔊 Kokoro - Text to Speech](#120----kokoro---text-to-speech)
|
||||
`134.` [134 - 🔊▶ Play Audio](#134----play-audio)
|
||||
`145` [🔊▶ Audio Preview (Audio player)](#)
|
||||
`148` [💾🔊 Save Audio (tmp_api.wav/mp3) ⚠️💣](#)
|
||||
|
||||
## 💻 General / System 💻
|
||||
`34.` [🧹 Free VRAM hack](#34----free-vram-hack)
|
||||
`137.` [🌎🎲 Global Seed Manager](#137----global-seed-manager)
|
||||
`143` [🧮 Basic Math](#)
|
||||
|
||||
## 🧍 Manual user Control 🧍
|
||||
`35.` [⏸️ Paused. Resume or Stop, Pick 👇](#35---%EF%B8%8F-paused-resume-or-stop-)
|
||||
@@ -237,6 +253,19 @@ Support me and my work : ❤️❤️❤️ <https://ko-fi.com/bjornulf> ❤️
|
||||
## 🧠 Logic / Conditional Operations 🧠
|
||||
`45.` [🔀 If-Else (input / compare_with)](#45----if-else-input--compare_with)
|
||||
|
||||
## [Coming soon] - not sorted in the categories above yet...
|
||||
`150` [[Coming soon] 🎨📜 Style Selector (🎲 or ♻ or ♻📑) + Civitai urn](#)
|
||||
`152` [[Coming soon] 💾 Save Tensors (tmp_api.pt) ⚠️💣](#)
|
||||
`153` [[Coming soon] 📥 Load Tensor (tmp_api.pt)](#)
|
||||
`154` [[Coming soon] 🔮 Remote VAE Decoder](#)
|
||||
`155` [[Coming soon] 🔮 Remote VAE Decoder (Tiled)](#)
|
||||
`156` [[Coming soon] 📥🔮 Load from Base64](#)
|
||||
`157` [[Coming soon] 🔮⚡ Execute Workflow](#)
|
||||
`158` [[Coming soon] 📥🔮📝 Text Manager Api (Execute Workflow)](#)
|
||||
`159` [[Coming soon] 🔥📝📹 Video Text Generator 📹📝🔥](#)
|
||||
`161` [[Coming soon] 🔧🧑 Fix Face](#)
|
||||
`162` [[Coming soon] 🧑 Face Settings [Fix Face] ⚙](#)
|
||||
|
||||
# ☁ Usage in cloud :
|
||||
|
||||
Comfyui is great for local usage, but I sometimes need more power than what I have...
|
||||
@@ -283,18 +312,18 @@ Then you can download it from the file manager JupyterLab.
|
||||
|
||||
If you have any issues with this template from Runpod, please let me know, I'm here to help. 😊
|
||||
|
||||
# 🏗 Dependencies (nothing to do for runpod ☁)
|
||||
# 🏗 Dependencies (nothing to do for my runpod ☁)
|
||||
|
||||
Check `requirements.txt` for the latest dependencies...
|
||||
Check `requirements.txt` for the latest dependencies.
|
||||
|
||||
## 🪟🐍 Windows : Install dependencies on windows with embedded python (portable version)
|
||||
|
||||
First you need to find this python_embedded `python.exe`, then you can right click or shift + right click inside the folder in your file manager to open a terminal there.
|
||||
|
||||
This is where I have it, with the command you need :
|
||||
`H:\ComfyUI_windows_portable\python_embeded> .\python.exe -m pip install pydub ollama opencv-python`
|
||||
`H:\ComfyUI_windows_portable\python_embeded> .\python.exe -m pip install ollama pydub opencv-python ffmpeg-python civitai-py fal_client sounddevice langdetect spacy textblob dill`
|
||||
|
||||
When you have to install something you can retake the same code and install the dependency you want :
|
||||
When you have to install something else, you can retake the same code and install the dependency you want :
|
||||
`.\python.exe -m pip install whateveryouwant`
|
||||
|
||||
You can then run comfyui.
|
||||
@@ -412,7 +441,7 @@ Fix a lot of code everywhere, a little better logging system, etc...
|
||||
WIP : Rewrite of all my ffmpeg nodes. (Still need improvements and fixes, will do that in 0.71?) Maybe don't use them yet...
|
||||
- **0.71**: ❗Breaking changes for Global variable nodes. (add to global variable system a "filename", which is a a separate global variable file.) bug fix speech to text node, 5 new nodes 129-133. combine text limit raised to 100. improve Save image in folder node.
|
||||
- **0.71-0.75**: Many bug fixing. Civitai nodes are working on windows. (encoding, links problem are solved ? - at least on my machines...)
|
||||
- **0.76**: Removed kokoro_onnx from requirements.txt due to some conflict with other nodes (need to be installed manually if you want to use this node.)
|
||||
- **0.76**: Removed kokoro_onnx from requirements.txt due to some conflict with other nodes, like Impact-Pack, with old version of numpy... (kokoro_onnx need to be installed manually if you want to use this node.)
|
||||
New syntaxes for advanced text/line selector, ex: {left|right|middle|group=LMR}+{left|right|middle|group=LMR}+{left|right|middle|group=LMR} and {A(80%)|B(15%)|C(5%)}
|
||||
2 new nodes switch : 🔛✨ Anything Switch On/Off (compatible with combine images) AND 🔛📝 Text Switch On/Off (Compatible with combine texts)
|
||||
2 new pick Me global nodes, using an identifier instead of chain : 🌎✒👉 Global Write Pick Me AND 🌎📥 Load Global Pick Me
|
||||
@@ -420,6 +449,15 @@ New syntaxes for advanced text/line selector, ex: {left|right|middle|group=LMR}+
|
||||
1 new node to quickly select element from list : 📑👈 Select from List
|
||||
1 new audio node : 🔊▶ Play Audio (Just play an audio file, will default to bell.m4a if none provided.) Can take AUDIO format or audio_path.
|
||||
❗Breaking changes. Large rewrite for all FFMPEG related nodes. With options for video preview. (Still have few changes to make, next version.)
|
||||
- **0.77**: Add Refresh button to Load image from folder.
|
||||
Add new syntax for advanced write / line selector : 2 {apple|orange|banana|static_group=FRUIT}s, one {apple|orange|banana|static_group=FRUIT} on the left, one {apple|orange|banana|static_group=FRUIT} on the right.
|
||||
TTS fix request voices.
|
||||
❗Breaking changes. New vision model for Ollama.
|
||||
20 new nodes (143 - 162), Text Analyzer, basic math, audio preview, etc..... (10 not realesed yet, but included as a public "roadmap".)
|
||||
Remove faster-whisper from requirements.txt for people using python 3.13. (Need to install manually.)
|
||||
Fix preview image node for the new comfyui layout.
|
||||
Rewrite of FFMPEG nodes, COnfiguration and Video preview.
|
||||
Many other bug fixes and improvements on many nodes, thx for the reports.
|
||||
|
||||
# 📝 Nodes descriptions
|
||||
|
||||
@@ -437,7 +475,9 @@ These are convenient because these are automatically recommended on drag and dro
|
||||
## 2 - ✒ Write Text
|
||||
|
||||
**Description:**
|
||||
Simple node to write text.
|
||||
Simple node to write text / send text to another node.
|
||||
|
||||
Below is a simple example of how to use the write text node with a show node :
|
||||
|
||||

|
||||
|
||||
@@ -464,6 +504,9 @@ Usage example :
|
||||
Groups, with no duplicate, example : {left|right|middle|group=LMR}+{left|right|middle|group=LMR}+{left|right|middle|group=LMR}
|
||||
Random based on percentage : {A(80%)|B(15%)|C(5%)}
|
||||
|
||||
❗ 0.77 - New syntax available :
|
||||
2 {apple|orange|banana|static_group=FRUIT}s, one {apple|orange|banana|static_group=FRUIT} on the left, one {apple|orange|banana|static_group=FRUIT} on the right
|
||||
|
||||
## 4 - 🔗 Combine Texts
|
||||
|
||||
**Description:**
|
||||
@@ -559,7 +602,9 @@ Example of usage to see the differences between different combinations :
|
||||
## 13/14 - 📏 + 🖼 Resize and Save Exact name ⚠️💣
|
||||
|
||||
**Description:**
|
||||
Resize an image to exact dimensions. The other node will save the image to the exact path.
|
||||
Resize an image to exact dimensions. If height or width is set to 0, it will keep the aspect ratio.
|
||||
|
||||
The other node will save the image to the exact path.
|
||||
⚠️💣 Warning : The image will be overwritten if it already exists.
|
||||
|
||||

|
||||
@@ -1213,7 +1258,7 @@ Here is one possible example for videos with node 60 and 61 :
|
||||
### 62 - 🦙👁 Ollama Vision
|
||||
|
||||
**Description:**
|
||||
Take an image as input and will describe the image. Uses `moondream` by default, but can select anything with node 63.
|
||||
Take an image as input and will describe the image. Uses `moondream` by default, but can select anything with node 63, and a custom prompt with node 161.
|
||||
|
||||

|
||||
|
||||
@@ -1249,6 +1294,10 @@ Below, an example of a crazy scientist explaining gravity. (Notice that the LLM
|
||||
**Description:**
|
||||
Use `faster-whisper` to transform an AUDIO type or audio_path into text. (Autodetect language)
|
||||
|
||||
⚠️ Warning, this node is not compatible with python `3.13`, you need to use `3.12`. (As a gereneral rule, comfyui ecosystem still recommend you to use 3.12 : https://github.com/comfyanonymous/ComfyUI?tab=readme-ov-file#manual-install-windows-linux.)
|
||||
(faster-whisper was removed from requirements.txt and must be installed manually.)
|
||||
If you really want to use that node, you are responsible for installing that manually : `pip install faster-whisper`.
|
||||
|
||||

|
||||

|
||||
|
||||
@@ -1369,7 +1418,7 @@ Just connect that to your KSampler.
|
||||
|
||||

|
||||
|
||||
#### 81 - 🔥📝 Text Generator 📝🔥
|
||||
#### 81 - 🔥📝🖼 Image Text Generator 🖼📝🔥
|
||||
|
||||
**Description:**
|
||||
Main node to generate content, doesn't really do much by itself, just `camera angle` and `multicharacter action`. (For example : `... eat picnic, view from above.`)
|
||||
@@ -1728,6 +1777,9 @@ So use that if you want to ignore a line.
|
||||
Groups, with no duplicate, example : {left|right|middle|group=LMR}+{left|right|middle|group=LMR}+{left|right|middle|group=LMR}
|
||||
Random based on percentage : {A(80%)|B(15%)|C(5%)}
|
||||
|
||||
❗ 0.77 - New syntax available :
|
||||
2 {apple|orange|banana|static_group=FRUIT}s, one {apple|orange|banana|static_group=FRUIT} on the left, one {apple|orange|banana|static_group=FRUIT} on the right
|
||||
|
||||
#### 118 - 🔊 TTS Configuration ⚙
|
||||
|
||||
**Description:**
|
||||
@@ -1821,6 +1873,8 @@ It's behavior is like a "Preview image" node. (See node 130 if you want a behavi
|
||||
Sometimes I want to display an image to explain what something specific is doing visually. (For example a stack of loras will have a specific style.)
|
||||
Here is a complex example on how i use that, for a list of loras stacks. (I then "select" a style by using node `125 - Model-Clip-Vae selector`)
|
||||
|
||||
**0.77** Multiline + image is now an issue on newer comfyui versions. Replaced with single line notes, couldn't find any clean js hack to solve the layout issue...
|
||||
|
||||

|
||||
|
||||
#### 128 - 🖼👁 Preview (first) image
|
||||
@@ -1835,6 +1889,8 @@ Below is a visual example of what I just said :
|
||||
|
||||

|
||||
|
||||
**0.77** Multiline + image is now an issue on newer comfyui versions. Replaced with single line notes, couldn't find any clean js hack to solve the layout issue...
|
||||
|
||||
#### 129 - 📌🅰️ Set Variable from Text
|
||||
|
||||
**Description:**
|
||||
@@ -1919,7 +1975,7 @@ Well now you can quickly enable / disable it. (If disabled you will see it in re
|
||||
|
||||

|
||||
|
||||
If connected with my combine text node, you can use a special option `ONLY_ME_combine_text` that will tell combine text to write ONLY the selected node. It will ignore all the otehrs. (Here will appear in blue.) :
|
||||
If connected with my combine text node, you can use a special option `ONLY_ME_combine_text` that will tell combine text to write ONLY the selected node. It will ignore all the others. (Here will appear in blue.) :
|
||||
|
||||

|
||||
|
||||
@@ -1987,4 +2043,261 @@ This node also automatically return a random value from the list with the global
|
||||
|
||||
Below is an example of write + load :
|
||||
|
||||

|
||||

|
||||
|
||||
#### 143 - 🧮 Basic Math
|
||||
|
||||
**Description:**
|
||||
Basic node for basic math.
|
||||
Addition, multiplication...
|
||||
Here is an example that I use to detect the number of "missing" frames in a video, compared to another video :
|
||||
|
||||

|
||||
|
||||
#### 144 - 📊🔍 Text Analyzer
|
||||
|
||||
**Description:**
|
||||
|
||||
Display details about an input text.
|
||||
You can use it just to recover the number of characters, you can then for example use this number to avoid too long prompts and avoid OOM :
|
||||
|
||||

|
||||
|
||||
Or extract more details about dialog and sentiment polarity of it. (For example, if you want a specific TTS voice for positive/neutral/negative.) :
|
||||
|
||||

|
||||
|
||||
#### 145 - 🔊▶ Audio Preview (Audio player)
|
||||
|
||||
**Description:**
|
||||
|
||||
Simple preview audio node that can take as input an audio_path, mp3 or wav.
|
||||
|
||||

|
||||
|
||||
#### 146 - 🖼📹🔪 Split Image/Video Grid
|
||||
|
||||
**Description:**
|
||||
This node will split a grid of images or videos into separate images or videos.
|
||||
|
||||
Below is an example with a grid of 6 images, it will split them into 6 separate images (3 rows X 3 columns) :
|
||||
|
||||

|
||||
|
||||
Why do I need that ? Sometimes I have several characters in a single image/video.
|
||||
When I want to apply lip sync to a character, I need to split them into separate images/videos where4 the character is located, create the lip sync, and then reassemble them with node 147.
|
||||
Or just to save VRAM, by processing only the part of the image that I need.
|
||||
|
||||
#### 147 - 🖼📹🔨 Reassemble Image/Video Grid
|
||||
|
||||
**Description:**
|
||||
|
||||
Reassemble an image/video that was grid split with node 146.
|
||||
Will need to take the same grid size as input.
|
||||
|
||||
You can apply changes to a specific element of the grid, and then reassemble them.
|
||||
|
||||
You can select each part individually.
|
||||
But there is also a "special" part, the MODIFIED one.
|
||||
This part is selected with the MODIFIED_part and the MODIFIED_part_index (part number, if index set to 2, it will use `part_2`).
|
||||
|
||||
Below is an example where I modify part_2 and reassmbling it. (Without using part_2 output but selectionning it using 2 as MODIFIED_part_index) :
|
||||
You can also work directly with part_2, but twith this setup you can quickly switch in between parts without changing the node layout.
|
||||
|
||||

|
||||
|
||||
Below is an example where I show you that you can also influence different parts "manually". (Here, I just send part_1 to replace part 7,8,9.)
|
||||
|
||||

|
||||
|
||||
For reassembling videos it might get tricky.
|
||||
For example if you used the node `52 - 🔊📹 Audio Video Sync`, the modified part might be shorter/longer, change FPS etc...
|
||||
You can try to re-synchronize them all if the duration was modified.
|
||||
In this situation you need to use the `reference_video_part_index` to select the reference video part. (Probably the same as the MODIFIED_part_index.)
|
||||
|
||||

|
||||
|
||||
#### 148 - 💾🔊 Save Audio (tmp_api.wav/mp3) ⚠️💣
|
||||
|
||||
**Description:**
|
||||
|
||||
Temporary save for an audio file, mp3 or wav.
|
||||
Useful if you want to synchronize data in between API calls from several Comfyui instances. (Used in the background by my API nodes, like node 157 `🔮⚡ Execute Workflow`, to exchange audio files. - raw/processed -)
|
||||
|
||||
You can send audio_path (This will create file `tmp_api.mp3`) :
|
||||
|
||||

|
||||
|
||||
Or an AUDIO type (This will create file `tmp_api.wav`) :
|
||||
|
||||

|
||||
|
||||
Note that if you are sending an AUDIO type, this node will save it as a `wav` file. (Even if your selection is an mp3 file.)
|
||||
|
||||
#### 149 - 💾📹 Save Video (tmp_api.mp4/mkv/webm) ⚠️💣
|
||||
|
||||
**Description:**
|
||||
|
||||
Temporary save for a video file, mp4, mkv or webm.
|
||||
Useful if you want to synchronize data in between API calls from several Comfyui instances. (Used in the background by my API nodes, like node 157 `🔮⚡ Execute Workflow`, to exchange video files. - raw/processed -)
|
||||
|
||||

|
||||
|
||||
#### 150 - 🎨📜 Style Selector (🎲 or ♻ or ♻📑) + Civitai urn
|
||||
|
||||
**Description:**
|
||||
|
||||
A new Universal node for selecting styles.
|
||||
Also contains some informations for Civitai urn. (Need test and better "database" of good/fast models, i recommended you to send the urns you like manually.)
|
||||
|
||||
Here is an example of usage with CivitAI API node :
|
||||
|
||||

|
||||
|
||||
Here is a slightly more complex example using model urns :
|
||||
|
||||

|
||||
|
||||
NOTE : CivitAI can be particularly slow !! So you can use the option DO_NOT_WAIT instead with an optional "links_file".
|
||||
It will create a file with the links to recover the generated images later with the node 151.
|
||||
So you can make as many requests as you want, and come back another time to recover the images with this txt file.
|
||||
|
||||
Note that you can also send the LIST directly with all the values.
|
||||
|
||||

|
||||
|
||||
If you do that you can later recover the values like text and use the "autosave" option of node 151.
|
||||
It will take the style and make a folder for it. Example from the image above : `./ComfyUI/output/civitai_autosave/Cinematic_photography/`
|
||||
It is very useful if you use node 150 as a LOOP, you can recover the images later and organise them automatically in separate folders according to the selected style.
|
||||
|
||||
#### 151 - 📥🕑🤖 Load CivitAI Links
|
||||
|
||||
**Description:**
|
||||
|
||||
This node must be used after using the node 150 + DO_NOT_WAIT + links_file.
|
||||
When saving DO_NOT_WAIT and the "links_file" from the node 150, it will create a txt file in "ComfyUI/Bjornulf/civitai_links/", you can load them with this node.
|
||||
|
||||
Below are the images recovered from the file "11_mars.txt":
|
||||
|
||||

|
||||
|
||||
If you used from node 150 the `LIST_from_style_selector` you can later recover the values and use the "autosave" option of node 151.
|
||||
It will take the style and make a folder for it. Example from the image above : `./ComfyUI/output/civitai_autosave/Cinematic_photography/`
|
||||
It is very useful if you use node 150 as a LOOP, you can recover the images later and organise them automatically in separate folders according to the selected style.
|
||||
|
||||
#### 152 - [Coming soon] 💾 Save Tensors (tmp_api.pt) ⚠️💣
|
||||
|
||||
**Description:**
|
||||
|
||||
This is BETA, it will save a tensor in a file.
|
||||
The goal is to optimize the VRAM usage by saving tensors in files.
|
||||
|
||||
#### 153 - [Coming soon] 📥 Load Tensor (tmp_api.pt)
|
||||
|
||||
**Description:**
|
||||
|
||||
This is BETA, it will load a tensor.
|
||||
The goal is to optimize the VRAM usage by saving tensors in files.
|
||||
|
||||
#### 154 - 🔮 Remote VAE Decoder
|
||||
|
||||
**Description:**
|
||||
|
||||
Simple node to decode a VAE model from a remote machine.
|
||||
|
||||
The VAE decode node doesn't have to be on the same machine, so if you have a remote machine and want to save the VRAM, you can use that.
|
||||
The remote machine need to have my custom nodes too.
|
||||
|
||||

|
||||
|
||||
#### 155 - [Coming soon] 🔮 Remote VAE Decoder (Tiled)
|
||||
|
||||
**Description:**
|
||||
|
||||
Same as 154 but with tiled version.
|
||||
|
||||

|
||||
|
||||
#### 156 - [Coming soon] 📥🔮 Load from Base64
|
||||
|
||||
**Description:**
|
||||
|
||||
This is BETA, it is used to recovered some values between Comfyui instances.
|
||||
|
||||
#### 157 - [Coming soon] 🔮⚡ Remote Execute Workflow
|
||||
|
||||
**Description:**
|
||||
|
||||
This is the start of a complex new environment and this is in BETA.
|
||||
The goal of this node is to run a workflow on another Comfyui instance and recover what was generated.
|
||||
For now it supports image/sound/video.
|
||||
|
||||
It can do anything that a normal workflow can do.
|
||||
Create an image, create a video, upscale an image, etc...
|
||||
|
||||
I will share custom JSON and a lot more cool stuff for my Kofi supporters. (https://ko-fi.com/bjornulf)
|
||||
Keep in touch for more informations... Wip...
|
||||
|
||||
Basically use run heavy duty workflows on another machine, and recover the results.
|
||||
Below is an example of a lipsync workflow that I run on another Comfyui instance. (For now local network only, but will run on runpod too.)
|
||||
I use that on my laptop (192.168.1.23), that recovers the results from my "ai dedicated" desktop machine (192.168.1..100) :
|
||||
|
||||

|
||||
|
||||
#### 158 - [Coming soon] 📥🔮📝 Text Manager Api (Execute Workflow)
|
||||
|
||||
**Description:**
|
||||
|
||||
This is BETA.
|
||||
This node need to be used to create functional workflows to run with node 157.
|
||||
It is communicating with node 157 from another Comfyui instance.
|
||||
|
||||
#### 159 - [Coming soon] 🔥📝📹 Video Text Generator 📹📝🔥
|
||||
|
||||
**Description:**
|
||||
|
||||
This is BETA.
|
||||
This is the start of a new node that will generate a video prompt.
|
||||
It's goal is to be like the "Text Generator" node, but for videos.
|
||||
The "old" Text Generator main node 81 is now renamed for image only : 🔥📝🖼 Image Text Generator 🖼📝🔥.
|
||||
|
||||

|
||||
|
||||
#### 160 - 🦙👁 Ollama Vision Prompt Selector
|
||||
|
||||
**Description:**
|
||||
New layout prompt selector for Ollama Vision.
|
||||
Ollama vision is now divided in 2 separate nodes.
|
||||
|
||||

|
||||
|
||||
#### 161 - [Coming soon] 🔧🧑 Fix Face
|
||||
|
||||
**Description:**
|
||||
|
||||
This is BETA.
|
||||
First node of a new collection of "Fix Images" nodes.
|
||||
Quickly fix, control and edit faces. (and more)
|
||||

|
||||
|
||||
Here is a zoom of that :
|
||||

|
||||
|
||||
You can also give specific text and custom denoise for each faces :
|
||||

|
||||
|
||||
You can also select a specific face, if set at 0 it will do all the faces, but below I set it to 1 :
|
||||

|
||||
|
||||
This node can also fix the whole person, but i will likely make another separate node for that later :
|
||||

|
||||
|
||||
You can preview the nodes effects very well with my Preview Image node, You can clearly see here the white box that is the detected "person" :
|
||||

|
||||
|
||||
In the future I will make a system to "inject" a specific reusable character, probably connected to loras or suchs.
|
||||
|
||||
#### 162 - [Coming soon] 🧑 Face Settings [Fix Face] ⚙
|
||||
|
||||
**Description:**
|
||||
Connect to Fix Face node to modify a specific face.
|
||||
72
__init__.py
@@ -40,6 +40,7 @@ from .character_description import CharacterDescriptionGenerator
|
||||
from .text_to_speech import TextToSpeech, XTTSConfig
|
||||
from .loop_combine_texts_by_lines import CombineTextsByLines
|
||||
from .free_vram_hack import FreeVRAM
|
||||
#, PurgeCLIPNode
|
||||
from .pause_resume_stop import PauseResume
|
||||
from .pick_input import PickInput
|
||||
from .loop_images import LoopImages
|
||||
@@ -71,7 +72,7 @@ from .ffmpeg_combine_video_audio import CombineVideoAudio
|
||||
from .images_merger_horizontal import MergeImagesHorizontally
|
||||
from .images_merger_vertical import MergeImagesVertically
|
||||
from .ollama_talk import OllamaTalk
|
||||
from .ollama_image_vision import OllamaImageVision
|
||||
from .ollama_image_vision import OllamaImageVision, OllamaVisionPromptSelector
|
||||
from .ollama_config_selector import OllamaConfig
|
||||
from .ollama_system_persona import OllamaSystemPersonaSelector
|
||||
from .ollama_system_job import OllamaSystemJobSelector
|
||||
@@ -87,7 +88,7 @@ from .ffmpeg_convert import ConvertVideo
|
||||
from .text_generator import TextGenerator, TextGeneratorScene, TextGeneratorStyle, TextGeneratorCharacterFemale, TextGeneratorCharacterMale, TextGeneratorOutfitMale, TextGeneratorOutfitFemale, ListLooper, ListLooperScene, ListLooperStyle, ListLooperCharacter, ListLooperOutfitFemale, ListLooperOutfitMale, TextGeneratorCharacterPose, TextGeneratorCharacterObject, TextGeneratorCharacterCreature
|
||||
from .API_flux import APIGenerateFlux
|
||||
from .API_StableDiffusion import APIGenerateStability
|
||||
from .API_civitai import APIGenerateCivitAI, APIGenerateCivitAIAddLORA, CivitAIModelSelectorPony, CivitAIModelSelectorSD15, CivitAIModelSelectorSDXL, CivitAIModelSelectorFLUX_S, CivitAIModelSelectorFLUX_D, CivitAILoraSelectorSD15, CivitAILoraSelectorSDXL, CivitAILoraSelectorPONY, CivitAILoraSelectorHunyuan
|
||||
from .API_civitai import APIGenerateCivitAI, APIGenerateCivitAIAddLORA, CivitAIModelSelectorPony, CivitAIModelSelectorSD15, CivitAIModelSelectorSDXL, CivitAIModelSelectorFLUX_S, CivitAIModelSelectorFLUX_D, CivitAILoraSelectorSD15, CivitAILoraSelectorSDXL, CivitAILoraSelectorPONY, CivitAILoraSelectorHunyuan, LoadCivitAILinks
|
||||
from .API_falAI import APIGenerateFalAI
|
||||
from .latent_resolution_selector import LatentResolutionSelector
|
||||
from .loader_lora_with_path import LoaderLoraWithPath
|
||||
@@ -116,8 +117,48 @@ from .play_sound import PlayAudio
|
||||
from .switches import SwitchText, SwitchAnything
|
||||
from .write_pickme_global import WriteTextPickMeGlobal, LoadTextPickMeGlobal
|
||||
from .list_selector import ListSelector
|
||||
from .text_analyzer import TextAnalyzer
|
||||
from .math_node import MathNode
|
||||
from .save_tmp_audio import SaveTmpAudio
|
||||
from .save_tmp_video import SaveTmpVideo
|
||||
from .audio_preview import AudioPreview
|
||||
from .style_selector import StyleSelector
|
||||
# from .switches import ConditionalSwitch
|
||||
from .split_image import SplitImageGrid, ReassembleImageGrid
|
||||
|
||||
# from .video_text_generator import VideoTextGenerator
|
||||
# from .run_workflow_from_api import ExecuteWorkflowNode, ApiDynamicTextInputs
|
||||
# from .remote_nodes import RemoteVAEDecoderNodeTiled, RemoteVAEDecoderNode, LoadFromBase64, SaveTensors, LoadTensor
|
||||
# from .fix_face import FixFace, FaceSettings
|
||||
|
||||
#RemoteTextEncodingWithCLIPs
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
# "Bjornulf_PurgeCLIPNode": PurgeCLIPNode,
|
||||
# "Bjornulf_RemoteTextEncodingWithCLIPs": RemoteTextEncodingWithCLIPs,
|
||||
|
||||
# "Bjornulf_FixFace": FixFace,
|
||||
# "Bjornulf_FaceSettings": FaceSettings,
|
||||
# "Bjornulf_SaveTensors": SaveTensors,
|
||||
# "Bjornulf_LoadTensor": LoadTensor,
|
||||
# "Bjornulf_LoadFromBase64": LoadFromBase64,
|
||||
# "Bjornulf_RemoteVAEDecoderNode": RemoteVAEDecoderNode,
|
||||
# "Bjornulf_RemoteVAEDecoderNodeTiled": RemoteVAEDecoderNodeTiled,
|
||||
# "Bjornulf_VideoTextGenerator": VideoTextGenerator,
|
||||
# "Bjornulf_ExecuteWorkflowNode": ExecuteWorkflowNode,
|
||||
# "Bjornulf_ApiDynamicTextInputs": ApiDynamicTextInputs,
|
||||
|
||||
# "Bjornulf_ConditionalSwitch": ConditionalSwitch,
|
||||
"Bjornulf_LoadCivitAILinks": LoadCivitAILinks,
|
||||
"Bjornulf_SplitImageGrid": SplitImageGrid,
|
||||
"Bjornulf_ReassembleImageGrid": ReassembleImageGrid,
|
||||
"Bjornulf_StyleSelector": StyleSelector,
|
||||
"Bjornulf_OllamaVisionPromptSelector": OllamaVisionPromptSelector,
|
||||
"Bjornulf_AudioPreview": AudioPreview,
|
||||
"Bjornulf_SaveTmpAudio": SaveTmpAudio,
|
||||
"Bjornulf_SaveTmpVideo": SaveTmpVideo,
|
||||
"Bjornulf_MathNode": MathNode,
|
||||
"Bjornulf_TextAnalyzer": TextAnalyzer,
|
||||
"Bjornulf_ListSelector": ListSelector,
|
||||
"Bjornulf_WriteTextPickMeGlobal": WriteTextPickMeGlobal,
|
||||
"Bjornulf_LoadTextPickMeGlobal": LoadTextPickMeGlobal,
|
||||
@@ -273,6 +314,31 @@ NODE_CLASS_MAPPINGS = {
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
# "Bjornulf_RemoteTextEncodingWithCLIPs": "[BETA] 🔮 Remote Text Encoding with CLIPs",
|
||||
# "Bjornulf_ConditionalSwitch": "ConditionalSwitch",
|
||||
# "Bjornulf_PurgeCLIPNode": "🧹📎 Purge CLIP",
|
||||
|
||||
# "Bjornulf_FixFace": "[BETA] 🔧🧑 Fix Face",
|
||||
# "Bjornulf_FaceSettings": "[BETA] 🧑 Face Settings [Fix Face] ⚙",
|
||||
# "Bjornulf_SaveTensors": "[BETA] 💾 Save Tensors (tmp_api.pt) ⚠️💣",
|
||||
# "Bjornulf_LoadTensor": "[BETA] 📥 Load Tensor (tmp_api.pt)",
|
||||
# "Bjornulf_RemoteVAEDecoderNode": "[BETA] 🔮 Remote VAE Decoder",
|
||||
# "Bjornulf_RemoteVAEDecoderNodeTiled": "[BETA] 🔮 Remote VAE Decoder (Tiled)",
|
||||
# "Bjornulf_LoadFromBase64": "[BETA] 📥🔮 Load from Base64",
|
||||
# "Bjornulf_ApiDynamicTextInputs": "[BETA] 📥🔮📝 Text Manager Api (Execute Workflow)",
|
||||
# "Bjornulf_ExecuteWorkflowNode": "[BETA] 🔮⚡ Remote Execute Workflow",
|
||||
# "Bjornulf_VideoTextGenerator": "[BETA] 🔥📝📹 Video Text Generator 📹📝🔥",
|
||||
|
||||
"Bjornulf_LoadCivitAILinks": "📥🕑🤖 Load CivitAI Links",
|
||||
"Bjornulf_StyleSelector": "🎨📜 Style Selector (🎲 or ♻ or ♻📑) + Civitai urn",
|
||||
"Bjornulf_ReassembleImageGrid": "🖼📹🔨 Reassemble Image/Video Grid",
|
||||
"Bjornulf_SplitImageGrid": "🖼📹🔪 Split Image/Video Grid",
|
||||
"Bjornulf_SaveTmpAudio": "💾🔊 Save Audio (tmp_api.wav/mp3) ⚠️💣",
|
||||
"Bjornulf_SaveTmpVideo": "💾📹 Save Video (tmp_api.mp4/mkv/webm) ⚠️💣",
|
||||
"Bjornulf_AudioPreview": "🔊▶ Audio Preview (Audio player)",
|
||||
"Bjornulf_MathNode": "🧮 Basic Math",
|
||||
"Bjornulf_TextAnalyzer": "📊🔍 Text Analyzer",
|
||||
"Bjornulf_OllamaVisionPromptSelector": "🦙👁 Ollama Vision Prompt Selector",
|
||||
"Bjornulf_ListSelector": "📑👈 Select from List",
|
||||
"Bjornulf_PlayAudio": "🔊▶ Play Audio",
|
||||
"Bjornulf_SwitchText": "🔛📝 Text Switch On/Off",
|
||||
@@ -327,7 +393,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"Bjornulf_APIGenerateCivitAIAddLORA": "☁👑 Add Lora (API ONLY - CivitAI) 👑☁",
|
||||
"Bjornulf_APIGenerateFlux": "☁🎨 API Image Generator (Black Forest Labs - Flux) 🎨☁",
|
||||
"Bjornulf_APIGenerateStability": "☁🎨 API Image Generator (Stability - Stable Diffusion) 🎨☁",
|
||||
"Bjornulf_TextGenerator": "🔥📝 Text Generator 📝🔥",
|
||||
"Bjornulf_TextGenerator": "🔥📝 Image Text Generator 📝🔥",
|
||||
"Bjornulf_TextGeneratorCharacterFemale": "👩🦰📝 Text Generator (Character Female)",
|
||||
"Bjornulf_TextGeneratorCharacterMale": "👨🦰📝 Text Generator (Character Male)",
|
||||
"Bjornulf_TextGeneratorCharacterPose": "💃🕺📝 Text Generator (Character Pose)",
|
||||
|
||||
80
audio_preview.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import uuid
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
|
||||
class AudioPreview:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"audio_path": ("STRING", {"default": ""}),
|
||||
"autoplay": ("BOOLEAN", {"default": False}),
|
||||
"loop": ("BOOLEAN", {"default": False}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "preview_audio"
|
||||
CATEGORY = "audio"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def preview_audio(self, audio_path, autoplay, loop):
|
||||
try:
|
||||
# Validate input
|
||||
if not audio_path or not isinstance(audio_path, str) or not audio_path.strip():
|
||||
raise ValueError("No valid audio path provided.")
|
||||
|
||||
# Set up destination directory
|
||||
dest_dir = os.path.join("temp", "Bjornulf")
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
|
||||
# Generate unique filename components
|
||||
timestamp = int(time.time())
|
||||
uuid_str = str(uuid.uuid4()).replace('-', '')[:8] # Short unique string
|
||||
base_name = "Bjornulf"
|
||||
|
||||
if audio_path.startswith("http://") or audio_path.startswith("https://"):
|
||||
# Handle URL input
|
||||
parsed_url = urllib.parse.urlparse(audio_path)
|
||||
path = parsed_url.path
|
||||
ext = os.path.splitext(path)[1]
|
||||
if not ext:
|
||||
raise ValueError("URL does not have a file extension.")
|
||||
dest_name = f"{base_name}_{timestamp}_{uuid_str}{ext}"
|
||||
dest_path = os.path.join(dest_dir, dest_name)
|
||||
try:
|
||||
urllib.request.urlretrieve(audio_path, dest_path)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to download audio from URL: {audio_path}. Error: {e}")
|
||||
else:
|
||||
# Handle local file input
|
||||
audio_path = os.path.abspath(audio_path)
|
||||
if not os.path.exists(audio_path):
|
||||
raise FileNotFoundError(f"Audio file not found: {audio_path}")
|
||||
ext = os.path.splitext(audio_path)[1]
|
||||
dest_name = f"{base_name}_{timestamp}_{uuid_str}{ext}"
|
||||
dest_path = os.path.join(dest_dir, dest_name)
|
||||
shutil.copy2(audio_path, dest_path)
|
||||
|
||||
# Return UI data for frontend
|
||||
return {
|
||||
"ui": {
|
||||
"audio": [dest_name, "Bjornulf"],
|
||||
"metadata": {
|
||||
"autoplay": autoplay,
|
||||
"loop": loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
# Handle errors gracefully
|
||||
return {
|
||||
"ui": {
|
||||
"audio": [],
|
||||
"error": str(e)
|
||||
}
|
||||
}
|
||||
@@ -306,39 +306,41 @@ class AudioVideoSync:
|
||||
temp_dir = os.path.join(self.temp_dir, "temp_frames")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Extract frames using ffmpeg
|
||||
# Clear existing files to avoid mixing frames from previous runs
|
||||
for file in os.listdir(temp_dir):
|
||||
os.remove(os.path.join(temp_dir, file))
|
||||
|
||||
# Extract frames using FFmpeg
|
||||
subprocess.run([
|
||||
'ffmpeg', '-i', video_path,
|
||||
os.path.join(temp_dir, 'frame_%05d.png')
|
||||
], check=True)
|
||||
'ffmpeg', '-y', '-i', video_path, os.path.join(temp_dir, 'frame_%05d.png')
|
||||
], check=True) # Added '-y' to overwrite without prompting
|
||||
|
||||
# Load frames and convert to tensor
|
||||
frames = []
|
||||
frame_files = sorted(os.listdir(temp_dir))
|
||||
transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Lambda(lambda x: x * 255) # Scale to 0-255 range
|
||||
transforms.Lambda(lambda x: x * 255)
|
||||
])
|
||||
|
||||
for frame_file in frame_files:
|
||||
image = Image.open(os.path.join(temp_dir, frame_file))
|
||||
image = Image.open(os.path.join(temp_dir, frame_file)).convert('RGB')
|
||||
frame_tensor = transform(image)
|
||||
frames.append(frame_tensor)
|
||||
|
||||
# Stack frames into a single tensor
|
||||
frames_tensor = torch.stack(frames)
|
||||
|
||||
# Ensure the tensor is in the correct format (B, C, H, W)
|
||||
# Ensure correct format (B, C, H, W)
|
||||
if frames_tensor.dim() == 3:
|
||||
frames_tensor = frames_tensor.unsqueeze(0)
|
||||
|
||||
# Convert to uint8
|
||||
frames_tensor = frames_tensor.byte()
|
||||
|
||||
# Clean up temporary directory
|
||||
# Clean up
|
||||
for frame_file in frame_files:
|
||||
os.remove(os.path.join(temp_dir, frame_file))
|
||||
os.rmdir(temp_dir)
|
||||
|
||||
return frames_tensor
|
||||
|
||||
|
||||
@@ -122,6 +122,48 @@ class imagesToVideo:
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error in FFmpeg-python: {str(e)}")
|
||||
return False, f"Error: {str(e)}"
|
||||
|
||||
def get_next_filename(self, output_base, format="mp4"):
|
||||
"""
|
||||
Determines the next filename in a sequence with 4-digit numbering (0001, 0002, etc.).
|
||||
|
||||
Args:
|
||||
output_base (str): The base path and prefix (e.g., 'output/imgs2video/me')
|
||||
format (str): The file extension (e.g., 'mp4')
|
||||
|
||||
Returns:
|
||||
str: The next filename in the sequence (e.g., 'output/imgs2video/me_0001.mp4')
|
||||
"""
|
||||
# Ensure output_base is clean
|
||||
output_base = output_base.rstrip(os.sep)
|
||||
|
||||
# Pattern to match files with 4-digit numbers: e.g., 'output/imgs2video/me_0001.mp4'
|
||||
pattern = f"{output_base}_[0-9][0-9][0-9][0-9].{format}"
|
||||
|
||||
# Get all files matching the pattern
|
||||
existing_files = glob.glob(pattern)
|
||||
|
||||
# Extract numbers from filenames
|
||||
numbers = []
|
||||
for filepath in existing_files:
|
||||
# Get the filename from the full path
|
||||
filename = os.path.basename(filepath)
|
||||
# Extract the 4-digit number between '_' and '.'
|
||||
number_part = filename.split('_')[-1].split('.')[0]
|
||||
# Verify it's a 4-digit number
|
||||
if number_part.isdigit() and len(number_part) == 4:
|
||||
numbers.append(int(number_part))
|
||||
|
||||
# Determine the next number
|
||||
if numbers:
|
||||
next_num = max(numbers) + 1
|
||||
else:
|
||||
next_num = 1
|
||||
|
||||
# Construct the next filename with zero-padding to 4 digits
|
||||
next_filename = f"{output_base}_{next_num:04d}.{format}"
|
||||
|
||||
return next_filename
|
||||
|
||||
def image_to_video(self, images, fps, name_prefix, use_python_ffmpeg=False, audio=None, audio_path=None, FFMPEG_CONFIG_JSON=None):
|
||||
ffmpeg_config = self.parse_ffmpeg_config(FFMPEG_CONFIG_JSON)
|
||||
@@ -130,27 +172,24 @@ class imagesToVideo:
|
||||
if ffmpeg_config and ffmpeg_config["output"]["container_format"] != "None":
|
||||
format = ffmpeg_config["output"]["container_format"]
|
||||
|
||||
# Remove any extension from name_prefix and create output_base
|
||||
name_prefix = os.path.splitext(name_prefix)[0]
|
||||
output_base = os.path.join("output", name_prefix)
|
||||
|
||||
existing_files = glob.glob(f"{output_base}_*.{format}")
|
||||
if existing_files:
|
||||
max_num = max([int(f.split('_')[-1].split('.')[0]) for f in existing_files])
|
||||
next_num = max_num + 1
|
||||
else:
|
||||
next_num = 1
|
||||
# Get the next filename using the corrected function
|
||||
output_file = self.get_next_filename(output_base, format)
|
||||
|
||||
output_file = f"{output_base}_{next_num:04d}.{format}"
|
||||
|
||||
temp_dir = "Bjornulf/temp_images_imgs2video"
|
||||
# Clean up and prepare temporary directory
|
||||
temp_dir = os.path.join("Bjornulf", "temp_images_imgs2video") # Use os.path.join for cross-platform compatibility
|
||||
if os.path.exists(temp_dir) and os.path.isdir(temp_dir):
|
||||
for file in os.listdir(temp_dir):
|
||||
os.remove(os.path.join(temp_dir, file))
|
||||
os.rmdir(temp_dir)
|
||||
|
||||
# Create necessary directories
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
os.makedirs(os.path.dirname(output_file) if os.path.dirname(output_file) else ".", exist_ok=True)
|
||||
|
||||
|
||||
for i, img_tensor in enumerate(images):
|
||||
img = Image.fromarray((img_tensor.cpu().numpy() * 255).astype(np.uint8))
|
||||
if format == "webm":
|
||||
|
||||
@@ -56,4 +56,21 @@ class FreeVRAM:
|
||||
response.raise_for_status()
|
||||
print("HTTP request triggered successfully")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Failed to trigger HTTP request: {e}")
|
||||
print(f"Failed to trigger HTTP request: {e}")
|
||||
|
||||
class PurgeCLIPNode:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {}}
|
||||
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "purge_clip"
|
||||
CATEGORY = "utils"
|
||||
|
||||
def purge_clip(self):
|
||||
# Check if the CLIP model is accessible in the global scope
|
||||
global clip_model
|
||||
if 'clip_model' in globals():
|
||||
del clip_model # Delete the CLIP model reference
|
||||
torch.cuda.empty_cache() # Clear VRAM cache
|
||||
return ()
|
||||
@@ -1,14 +1,12 @@
|
||||
|
||||
from server import PromptServer
|
||||
import os
|
||||
from aiohttp import web
|
||||
import random
|
||||
|
||||
class GlobalSeedManager:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {"seed": ( "INT", {
|
||||
"default": 0,
|
||||
"default": 1,
|
||||
"min": 0,
|
||||
"max": 4294967294
|
||||
})}}
|
||||
@@ -19,8 +17,8 @@ class GlobalSeedManager:
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def generate_seed(self, seed: int):
|
||||
# Generate new random seed
|
||||
new_seed = random.randint(0, 2**31 - 1)
|
||||
# Use the provided seed instead of generating a new one
|
||||
new_seed = seed
|
||||
seed_str = str(new_seed)
|
||||
|
||||
# Define file path
|
||||
|
||||
@@ -12,6 +12,7 @@ class IfElse:
|
||||
"STRING: input EQUAL TO compare_with",
|
||||
"STRING: input NOT EQUAL TO compare_with",
|
||||
"BOOLEAN: input IS TRUE",
|
||||
"BOOLEAN: input IS FALSE",
|
||||
"NUMBER: input GREATER THAN compare_with",
|
||||
"NUMBER: input GREATER OR EQUAL TO compare_with",
|
||||
"NUMBER: input LESS THAN compare_with",
|
||||
@@ -61,6 +62,9 @@ class IfElse:
|
||||
elif input_type == "BOOLEAN: input IS TRUE":
|
||||
result = str(input).lower() in ("true", "1", "yes", "y", "on")
|
||||
details += f"\nChecked if '{input}' is considered True"
|
||||
elif input_type == "BOOLEAN: input IS FALSE":
|
||||
result = str(input).lower() in ("false", "0", "no", "n", "off")
|
||||
details += f"\nChecked if '{input}' is considered True"
|
||||
else: # Numeric comparisons
|
||||
try:
|
||||
input_num = float(input)
|
||||
|
||||
193
line_selector.py
@@ -14,31 +14,28 @@ class LineSelector:
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"text": ("STRING", {"multiline": True}), # Input for multiple lines
|
||||
"line_number": ("INT", {"default": 0, "min": 0, "max": 99999}), # 0 for random, >0 for specific line
|
||||
"RANDOM": ("BOOLEAN", {"default": False}), # Force random selection
|
||||
"LOOP": ("BOOLEAN", {"default": False}), # Return all lines as list
|
||||
"LOOP_SEQUENTIAL": ("BOOLEAN", {"default": False}), # Sequential looping
|
||||
"jump": ("INT", {"default": 1, "min": 1, "max": 100, "step": 1}), # Jump size for sequential loop
|
||||
"pick_random_variable": ("BOOLEAN", {"default": True}), # Enable random choice functionality
|
||||
"text": ("STRING", {"multiline": True}),
|
||||
"line_number": ("INT", {"default": 0, "min": 0, "max": 99999}),
|
||||
"RANDOM": ("BOOLEAN", {"default": False}),
|
||||
"LOOP": ("BOOLEAN", {"default": False}),
|
||||
"LOOP_SEQUENTIAL": ("BOOLEAN", {"default": False}),
|
||||
"jump": ("INT", {"default": 1, "min": 1, "max": 100, "step": 1}),
|
||||
"pick_random_variable": ("BOOLEAN", {"default": True}),
|
||||
},
|
||||
"optional": {
|
||||
"variables": ("STRING", {"multiline": True, "forceInput": True}),
|
||||
"seed": ("INT", {
|
||||
"default": -1,
|
||||
"min": -1,
|
||||
"max": 0x7FFFFFFFFFFFFFFF
|
||||
}),
|
||||
"seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFFFFFFFFFF}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING", "INT", "INT") # String output, remaining cycles, current line number
|
||||
RETURN_TYPES = ("STRING", "INT", "INT")
|
||||
RETURN_NAMES = ("text", "remaining_cycles", "current_line")
|
||||
OUTPUT_IS_LIST = (True, False, False) # Only text output can be a list
|
||||
OUTPUT_IS_LIST = (True, False, False)
|
||||
FUNCTION = "select_line"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def find_variables(self, text):
|
||||
"""Identify nested curly brace sections in the text."""
|
||||
stack = []
|
||||
variables = []
|
||||
for i, char in enumerate(text):
|
||||
@@ -55,6 +52,7 @@ class LineSelector:
|
||||
return variables
|
||||
|
||||
def parse_option(self, part):
|
||||
"""Parse options within curly braces, handling CSV and weighted choices."""
|
||||
if part.startswith('%csv='):
|
||||
try:
|
||||
filename = part.split('=', 1)[1].strip()
|
||||
@@ -67,61 +65,83 @@ class LineSelector:
|
||||
return (option.strip(), float(weight.split('%)')[0]))
|
||||
return part.strip()
|
||||
|
||||
def process_content(self, content, seed):
|
||||
random.seed(seed)
|
||||
parts = []
|
||||
def process_content(self, content, base_seed, position):
|
||||
"""Process content within curly braces, handling groups and random choices."""
|
||||
# Use a unique seed for regular choices based on position
|
||||
random.seed(base_seed + position)
|
||||
parts = [p.strip() for p in content.split('|')]
|
||||
options = []
|
||||
weights = []
|
||||
group_defined = False
|
||||
group_name = None
|
||||
static_group = None
|
||||
cycling_group = None
|
||||
|
||||
for p in content.split('|'):
|
||||
p = p.strip()
|
||||
if p.startswith('group='):
|
||||
group_name = p.split('=', 1)[1].strip()
|
||||
group_defined = True
|
||||
continue
|
||||
|
||||
parsed = self.parse_option(p)
|
||||
if isinstance(parsed, list): # CSV data
|
||||
parts.extend(parsed)
|
||||
weights.extend([1]*len(parsed))
|
||||
elif isinstance(parsed, tuple): # Weighted option
|
||||
parts.append(parsed[0])
|
||||
weights.append(parsed[1])
|
||||
for p in parts:
|
||||
if p.startswith('static_group='):
|
||||
static_group = p.split('=', 1)[1].strip()
|
||||
elif p.startswith('group='):
|
||||
cycling_group = p.split('=', 1)[1].strip()
|
||||
else:
|
||||
parts.append(parsed)
|
||||
weights.append(1)
|
||||
|
||||
if group_defined:
|
||||
return {'type': 'group', 'name': group_name, 'options': parts}
|
||||
|
||||
if any(w != 1 for w in weights):
|
||||
total = sum(weights)
|
||||
if total == 0: weights = [1]*len(parts)
|
||||
return random.choices(parts, weights=[w/total for w in weights])[0]
|
||||
|
||||
return random.choice(parts) if parts else ''
|
||||
parsed = self.parse_option(p)
|
||||
if isinstance(parsed, list): # CSV data
|
||||
options.extend(parsed)
|
||||
weights.extend([1] * len(parsed))
|
||||
elif isinstance(parsed, tuple): # Weighted option
|
||||
options.append(parsed[0])
|
||||
weights.append(parsed[1])
|
||||
else:
|
||||
options.append(parsed)
|
||||
weights.append(1)
|
||||
|
||||
if static_group and cycling_group:
|
||||
raise ValueError("Cannot specify both static_group and group in the same section.")
|
||||
|
||||
if static_group:
|
||||
return {'type': 'static_group', 'name': static_group, 'options': options, 'weights': weights}
|
||||
elif cycling_group:
|
||||
return {'type': 'cycling_group', 'name': cycling_group, 'options': options, 'weights': weights}
|
||||
else:
|
||||
if options:
|
||||
if any(w != 1 for w in weights):
|
||||
total = sum(weights)
|
||||
if total == 0:
|
||||
weights = [1] * len(options)
|
||||
return random.choices(options, weights=[w / total for w in weights])[0]
|
||||
else:
|
||||
return random.choice(options)
|
||||
return ''
|
||||
|
||||
def process_advanced_syntax(self, text, seed):
|
||||
# Process nested variables
|
||||
"""Process the entire text for advanced syntax, handling nested variables and groups."""
|
||||
variables = self.find_variables(text)
|
||||
static_groups = {}
|
||||
cycling_groups = {}
|
||||
substitutions = []
|
||||
groups = {}
|
||||
|
||||
for var in variables:
|
||||
start, end = var['start'], var['end']
|
||||
content = text[start+1:end-1]
|
||||
processed = self.process_content(content, seed)
|
||||
content = text[start + 1:end - 1]
|
||||
processed = self.process_content(content, seed, start)
|
||||
|
||||
if isinstance(processed, dict):
|
||||
if processed['type'] == 'group':
|
||||
group_name = processed['name']
|
||||
if group_name not in groups:
|
||||
groups[group_name] = []
|
||||
groups[group_name].append({
|
||||
if processed['type'] == 'static_group':
|
||||
name = processed['name']
|
||||
if name not in static_groups:
|
||||
static_groups[name] = []
|
||||
static_groups[name].append({
|
||||
'start': start,
|
||||
'end': end,
|
||||
'options': processed['options']
|
||||
'options': processed['options'],
|
||||
'weights': processed['weights']
|
||||
})
|
||||
elif processed['type'] == 'cycling_group':
|
||||
name = processed['name']
|
||||
if name not in cycling_groups:
|
||||
cycling_groups[name] = []
|
||||
cycling_groups[name].append({
|
||||
'start': start,
|
||||
'end': end,
|
||||
'options': processed['options'],
|
||||
'weights': processed['weights']
|
||||
})
|
||||
else:
|
||||
substitutions.append({
|
||||
@@ -130,15 +150,35 @@ class LineSelector:
|
||||
'sub': processed
|
||||
})
|
||||
|
||||
# Handle groups
|
||||
for group_name, matches in groups.items():
|
||||
# Handle static groups: choose one value per group name
|
||||
random.seed(seed) # Reset seed for consistent static group behavior
|
||||
for name, matches in static_groups.items():
|
||||
if not matches or not matches[0]['options']:
|
||||
continue
|
||||
options = matches[0]['options']
|
||||
weights = matches[0]['weights']
|
||||
if any(w != 1 for w in weights):
|
||||
total = sum(weights)
|
||||
if total == 0:
|
||||
weights = [1] * len(options)
|
||||
chosen = random.choices(options, weights=[w / total for w in weights])[0]
|
||||
else:
|
||||
chosen = random.choice(options) if options else ''
|
||||
for m in matches:
|
||||
substitutions.append({
|
||||
'start': m['start'],
|
||||
'end': m['end'],
|
||||
'sub': chosen
|
||||
})
|
||||
|
||||
# Handle cycling groups: cycle through shuffled options
|
||||
random.seed(seed) # Reset seed for consistent cycling group behavior
|
||||
for name, matches in cycling_groups.items():
|
||||
if not matches or not matches[0]['options']:
|
||||
continue
|
||||
|
||||
options = matches[0]['options']
|
||||
permuted = random.sample(options, len(options))
|
||||
perm_cycle = cycle(permuted)
|
||||
|
||||
for m in matches:
|
||||
substitutions.append({
|
||||
'start': m['start'],
|
||||
@@ -146,7 +186,7 @@ class LineSelector:
|
||||
'sub': next(perm_cycle)
|
||||
})
|
||||
|
||||
# Apply regular substitutions
|
||||
# Apply substitutions in reverse order
|
||||
substitutions.sort(key=lambda x: -x['start'])
|
||||
result_text = text
|
||||
for sub in substitutions:
|
||||
@@ -155,43 +195,31 @@ class LineSelector:
|
||||
return result_text
|
||||
|
||||
def select_line(self, text, line_number, RANDOM, LOOP, LOOP_SEQUENTIAL, jump, pick_random_variable, variables="", seed=-1):
|
||||
# Parse variables
|
||||
"""Select lines from the text based on the specified mode after processing advanced syntax."""
|
||||
var_dict = {}
|
||||
for line in variables.split('\n'):
|
||||
if '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
var_dict[key.strip()] = value.strip()
|
||||
|
||||
# Replace variables in the text
|
||||
for key, value in var_dict.items():
|
||||
text = text.replace(f"<{key}>", value)
|
||||
|
||||
# Split the input text into lines, remove empty lines and lines starting with #
|
||||
if seed < 0:
|
||||
seed = random.randint(0, 0x7FFFFFFFFFFFFFFF)
|
||||
|
||||
if pick_random_variable:
|
||||
text = self.process_advanced_syntax(text, seed)
|
||||
|
||||
lines = [line.strip() for line in text.split('\n')
|
||||
if line.strip() and not line.strip().startswith('#')]
|
||||
if line.strip() and not line.strip().startswith('#')]
|
||||
|
||||
if not lines:
|
||||
return (["No valid lines found."], 0, 0)
|
||||
|
||||
import random
|
||||
import os
|
||||
|
||||
# Set seed if provided
|
||||
if seed < 0:
|
||||
seed = random.randint(0, 0x7FFFFFFFFFFFFFFF)
|
||||
|
||||
# Process WriteTextAdvanced syntax if enabled
|
||||
if pick_random_variable:
|
||||
processed_lines = []
|
||||
for line in lines:
|
||||
processed_lines.append(self.process_advanced_syntax(line, seed))
|
||||
lines = processed_lines
|
||||
|
||||
# Handle sequential looping
|
||||
if LOOP_SEQUENTIAL:
|
||||
counter_file = os.path.join("Bjornulf", "line_selector_counter.txt")
|
||||
os.makedirs(os.path.dirname(counter_file), exist_ok=True)
|
||||
|
||||
try:
|
||||
with open(counter_file, 'r') as f:
|
||||
current_index = int(f.read().strip())
|
||||
@@ -199,11 +227,10 @@ class LineSelector:
|
||||
current_index = -jump
|
||||
|
||||
next_index = current_index + jump
|
||||
|
||||
if next_index >= len(lines):
|
||||
with open(counter_file, 'w') as f:
|
||||
f.write(str(-jump))
|
||||
raise ValueError(f"Counter has reached the last line (total lines: {len(lines)}). Counter has be reset.")
|
||||
raise ValueError(f"Counter has reached the last line (total lines: {len(lines)}). Counter has been reset.")
|
||||
|
||||
with open(counter_file, 'w') as f:
|
||||
f.write(str(next_index))
|
||||
@@ -211,11 +238,9 @@ class LineSelector:
|
||||
remaining_cycles = max(0, (len(lines) - next_index - 1) // jump + 1)
|
||||
return ([lines[next_index]], remaining_cycles, next_index + 1)
|
||||
|
||||
# Handle normal LOOP mode
|
||||
if LOOP:
|
||||
return (lines, len(lines), 0)
|
||||
|
||||
# Handle RANDOM or line_number selection
|
||||
if RANDOM or line_number == 0:
|
||||
selected = random.choice(lines)
|
||||
else:
|
||||
|
||||
@@ -42,16 +42,15 @@ class ListSelector:
|
||||
# Convert to 0-based index
|
||||
index = selection - 1
|
||||
|
||||
# Get the selected number
|
||||
# Get the selected element
|
||||
selected = numbers[index]
|
||||
|
||||
# Convert to integer and string
|
||||
# Try to convert to integer, but keep string value regardless
|
||||
try:
|
||||
selected_int = int(selected)
|
||||
selected_str = str(selected_int)
|
||||
except ValueError:
|
||||
# If conversion fails, return 0
|
||||
selected_int = 0
|
||||
selected_str = "0"
|
||||
selected_int = 0 # Return 0 for non-numeric values
|
||||
|
||||
selected_str = selected # Return original string value
|
||||
|
||||
return selected_int, selected_str, list_length
|
||||
56
load_text.py
@@ -1,56 +1,72 @@
|
||||
import os
|
||||
from server import PromptServer
|
||||
import aiohttp.web as web
|
||||
|
||||
class LoadTextFromFolder:
|
||||
default_dir = "Bjornulf/Text"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
"""Define input parameters for the node"""
|
||||
default_dir = "Bjornulf/Text"
|
||||
available_files = []
|
||||
|
||||
if os.path.exists(default_dir):
|
||||
available_files = [f for f in os.listdir(default_dir)
|
||||
if f.lower().endswith('.txt')]
|
||||
|
||||
available_files = cls.get_available_files()
|
||||
if not available_files:
|
||||
available_files = ["no_files_found"]
|
||||
|
||||
return {
|
||||
"required": {
|
||||
"text_file": (available_files, {"default": available_files[0]}),
|
||||
}
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_available_files(cls):
|
||||
"""Get list of .txt files recursively from the default directory"""
|
||||
available_files = []
|
||||
if os.path.exists(cls.default_dir):
|
||||
for root, dirs, files in os.walk(cls.default_dir):
|
||||
for file in files:
|
||||
if file.lower().endswith('.txt'):
|
||||
full_path = os.path.join(root, file)
|
||||
rel_path = os.path.relpath(full_path, cls.default_dir)
|
||||
available_files.append(rel_path)
|
||||
available_files.sort()
|
||||
return available_files
|
||||
|
||||
RETURN_TYPES = ("STRING", "STRING", "STRING")
|
||||
RETURN_NAMES = ("text", "filename", "full_path")
|
||||
FUNCTION = "load_text"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def load_text(self, text_file):
|
||||
"""Load text from the selected file"""
|
||||
try:
|
||||
if text_file == "no_files_found":
|
||||
raise ValueError("No text files found in Bjornulf/Text folder")
|
||||
|
||||
filepath = os.path.join("Bjornulf/Text", text_file)
|
||||
|
||||
# Check if file exists
|
||||
filepath = os.path.join(self.default_dir, text_file)
|
||||
if not os.path.exists(filepath):
|
||||
raise ValueError(f"File not found: {filepath}")
|
||||
|
||||
# Get absolute path
|
||||
full_path = os.path.abspath(filepath)
|
||||
|
||||
# Get just the filename
|
||||
filename = os.path.basename(filepath)
|
||||
|
||||
# Read text from file
|
||||
with open(filepath, 'r', encoding='utf-8') as file:
|
||||
text = file.read()
|
||||
|
||||
return (text, filename, full_path)
|
||||
|
||||
except (OSError, IOError) as e:
|
||||
raise ValueError(f"Error loading file: {str(e)}")
|
||||
|
||||
@PromptServer.instance.routes.post("/get_text_files")
|
||||
async def get_text_files(request):
|
||||
try:
|
||||
available_files = LoadTextFromFolder.get_available_files()
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"files": available_files
|
||||
}, status=200)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error_msg
|
||||
}, status=500)
|
||||
|
||||
class LoadTextFromPath:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
|
||||
@@ -8,42 +8,43 @@ class LoaderLoraWithPath:
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"lora_path": ("STRING", {"default": ""}),
|
||||
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
|
||||
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01}),
|
||||
},
|
||||
"optional": {
|
||||
"clip": ("CLIP",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP")
|
||||
FUNCTION = "load_lora" # Added this line
|
||||
RETURN_TYPES = ("MODEL", "CLIP", "STRING")
|
||||
FUNCTION = "load_lora"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def load_lora(self, model, clip, lora_path, strength_model, strength_clip):
|
||||
def load_lora(self, model, lora_path, strength_model, strength_clip, clip=None):
|
||||
try:
|
||||
# Check if path exists
|
||||
if not os.path.isfile(lora_path):
|
||||
print(f"Error: Lora file not found at path: {lora_path}")
|
||||
return (model, clip)
|
||||
return (model, clip if clip is not None else None, lora_path if lora_path is not None else None)
|
||||
|
||||
# Load the Lora file
|
||||
try:
|
||||
lora = comfy.utils.load_torch_file(lora_path)
|
||||
except Exception as e:
|
||||
print(f"Error loading Lora file: {str(e)}")
|
||||
return (model, clip)
|
||||
lora = comfy.utils.load_torch_file(lora_path)
|
||||
|
||||
if clip is not None:
|
||||
model_lora, clip_lora = comfy.sd.load_lora_for_models(
|
||||
model, clip, lora, strength_model, strength_clip
|
||||
)
|
||||
return (model_lora, clip_lora, lora_path if lora_path is not None else None)
|
||||
else:
|
||||
model_lora = model.clone()
|
||||
# Assuming ModelPatcher with diffusion_model
|
||||
state_dict = model_lora.model.diffusion_model.state_dict()
|
||||
for key in lora:
|
||||
if 'unet' in key: # Filter for UNet keys; adjust as needed
|
||||
if key in state_dict:
|
||||
state_dict[key] += strength_model * lora[key]
|
||||
model_lora.model.diffusion_model.load_state_dict(state_dict)
|
||||
return (model_lora, None, lora_path if lora_path is not None else None)
|
||||
|
||||
# Apply the Lora
|
||||
model_lora, clip_lora = comfy.sd.load_lora_for_models(
|
||||
model,
|
||||
clip,
|
||||
lora,
|
||||
strength_model,
|
||||
strength_clip
|
||||
)
|
||||
|
||||
return (model_lora, clip_lora)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in load_lora: {str(e)}")
|
||||
return (model, clip)
|
||||
return (model, clip if clip is not None else None)
|
||||
62
math_node.py
Normal file
@@ -0,0 +1,62 @@
|
||||
class MathNode:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"operation": (["+", "-", "*", "/", "%"], {"default": "+"}),
|
||||
"num_inputs": ("INT", {"default": 2, "min": 2, "max": 100, "step": 1}),
|
||||
},
|
||||
"hidden": {
|
||||
**{f"value_{i}": ("*", {"forceInput": True}) for i in range(1, 101)}
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("FLOAT", "INT", "STRING")
|
||||
FUNCTION = "compute"
|
||||
CATEGORY = "Utilities/Math"
|
||||
|
||||
def compute(self, operation, num_inputs, **kwargs):
|
||||
# Collect and convert inputs to float, defaulting to 0.0 if conversion fails or input is missing
|
||||
values = []
|
||||
for i in range(1, num_inputs + 1):
|
||||
key = f"value_{i}"
|
||||
raw_value = kwargs.get(key, 0.0)
|
||||
try:
|
||||
# Attempt to convert any input to float
|
||||
value = float(raw_value)
|
||||
except (ValueError, TypeError):
|
||||
value = 0.0 # Fallback to 0.0 if conversion fails
|
||||
values.append(value)
|
||||
|
||||
# If no valid values, return 0.0, 0, "0.0"
|
||||
if not values:
|
||||
return (0.0, 0, "0.0")
|
||||
|
||||
# Initialize result with the first value
|
||||
result = values[0]
|
||||
|
||||
# Apply the selected operation cumulatively from left to right
|
||||
for val in values[1:]:
|
||||
if operation == "+":
|
||||
result += val
|
||||
elif operation == "-":
|
||||
result -= val
|
||||
elif operation == "*":
|
||||
result *= val
|
||||
elif operation == "/":
|
||||
if val == 0:
|
||||
result = 0.0 # Handle division by zero
|
||||
else:
|
||||
result /= val
|
||||
elif operation == "%":
|
||||
if val == 0:
|
||||
result = 0.0 # Handle modulo by zero
|
||||
else:
|
||||
result %= val
|
||||
|
||||
# Create integer and string versions of the result
|
||||
int_result = int(result) # Truncate to integer
|
||||
str_result = str(result) # Convert to string
|
||||
|
||||
# Return the three outputs
|
||||
return (result, int_result, str_result)
|
||||
224
note_image.py
@@ -9,27 +9,22 @@ import folder_paths
|
||||
import node_helpers
|
||||
from aiohttp import web
|
||||
|
||||
class ImageNote(SaveImage):
|
||||
class ImageNote:
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_temp_directory()
|
||||
self.type = "temp"
|
||||
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for _ in range(5))
|
||||
self.compress_level = 1
|
||||
# Directory to store notes, created if it doesn’t exist
|
||||
self.note_dir = os.path.join("ComfyUI", "Bjornulf", "imageNote")
|
||||
os.makedirs(self.note_dir, exist_ok=True)
|
||||
|
||||
# Store last image path and hash to prevent unnecessary reloading
|
||||
self.last_image_path = None
|
||||
self.last_image_hash = None
|
||||
self.last_output_images = None
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
"""Define the input types for the node."""
|
||||
return {
|
||||
"optional": {
|
||||
"images": ("IMAGE", ),
|
||||
"image_path": ("STRING", {"default": ""}),
|
||||
"note_text": ("STRING", {"default": "", "multiline": True})
|
||||
"note": ("STRING", {"default": ""}),
|
||||
"note_2": ("STRING", {"default": ""}),
|
||||
"note_3": ("STRING", {"default": ""})
|
||||
},
|
||||
"hidden": {
|
||||
"prompt": "PROMPT",
|
||||
@@ -37,136 +32,164 @@ class ImageNote(SaveImage):
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING", "STRING")
|
||||
RETURN_NAMES = ("image_path", "note")
|
||||
FUNCTION = "process_image"
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def compute_md5(self, image):
|
||||
image_bytes = image.tobytes() if isinstance(image, Image.Image) else image
|
||||
"""Compute MD5 hash of an image for note association."""
|
||||
if isinstance(image, Image.Image):
|
||||
image_bytes = image.tobytes()
|
||||
elif isinstance(image, torch.Tensor):
|
||||
image_bytes = (image.numpy() * 255).astype(np.uint8).tobytes()
|
||||
else:
|
||||
image_bytes = image
|
||||
return hashlib.md5(image_bytes).hexdigest()
|
||||
|
||||
def process_image(self, images=None, image_path="", note_text="", prompt=None, extra_pnginfo=None):
|
||||
output_images = None
|
||||
output_note_text = ""
|
||||
def process_image(self, images=None, image_path="", note="", note_2="", note_3="", prompt=None, extra_pnginfo=None):
|
||||
"""Process the image and associate all notes."""
|
||||
output_note = ""
|
||||
ui_images = []
|
||||
|
||||
# If images are given, process them
|
||||
if images is not None and len(images) > 0:
|
||||
output_images = images
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
output_dir = folder_paths.get_output_directory()
|
||||
temp_dir = folder_paths.get_temp_directory()
|
||||
|
||||
# Collect all non-empty notes
|
||||
all_notes = [n for n in [note, note_2, note_3] if n]
|
||||
|
||||
# Case 1: Image provided via file path
|
||||
if image_path and os.path.isfile(image_path):
|
||||
image = Image.open(image_path).convert("RGB")
|
||||
image_hash = self.compute_md5(image)
|
||||
|
||||
# Determine image reference for UI
|
||||
if image_path.startswith(input_dir):
|
||||
type_ = "input"
|
||||
filename = os.path.relpath(image_path, input_dir)
|
||||
elif image_path.startswith(output_dir):
|
||||
type_ = "output"
|
||||
filename = os.path.relpath(image_path, output_dir)
|
||||
else:
|
||||
temp_filename = f"{image_hash}.png"
|
||||
temp_path = os.path.join(temp_dir, temp_filename)
|
||||
if not os.path.exists(temp_path):
|
||||
image.save(temp_path)
|
||||
type_ = "temp"
|
||||
filename = temp_filename
|
||||
|
||||
# Handle notes: append new notes and read all
|
||||
note_path = os.path.join(self.note_dir, f"{image_hash}.txt")
|
||||
if all_notes:
|
||||
with open(note_path, "a", encoding="utf-8") as f:
|
||||
for n in all_notes:
|
||||
f.write(n + "\n")
|
||||
if os.path.exists(note_path):
|
||||
with open(note_path, "r", encoding="utf-8") as f:
|
||||
output_note = f.read().rstrip() # Remove trailing newline
|
||||
|
||||
ui_images = [{"filename": filename, "subfolder": "", "type": type_}]
|
||||
result = (image_path, output_note)
|
||||
|
||||
# Case 2: Image provided as tensor
|
||||
elif images is not None and len(images) > 0:
|
||||
image_np = (images[0].numpy() * 255).astype(np.uint8)
|
||||
image = Image.fromarray(image_np)
|
||||
image_hash = self.compute_md5(image)
|
||||
|
||||
temp_filename = f"{image_hash}.png"
|
||||
temp_path = os.path.join(temp_dir, temp_filename)
|
||||
if not os.path.exists(temp_path):
|
||||
image.save(temp_path)
|
||||
|
||||
# Handle notes: append new notes and read all
|
||||
note_path = os.path.join(self.note_dir, f"{image_hash}.txt")
|
||||
if all_notes:
|
||||
with open(note_path, "a", encoding="utf-8") as f:
|
||||
for n in all_notes:
|
||||
f.write(n + "\n")
|
||||
if os.path.exists(note_path):
|
||||
with open(note_path, "r", encoding="utf-8") as f:
|
||||
output_note_text = f.read()
|
||||
elif note_text:
|
||||
with open(note_path, "w", encoding="utf-8") as f:
|
||||
f.write(note_text)
|
||||
output_note_text = note_text
|
||||
output_note = f.read().rstrip()
|
||||
|
||||
# If image_path is empty, do nothing
|
||||
elif not image_path:
|
||||
# logger.debug("No image path provided, skipping processing.")
|
||||
return None, ""
|
||||
ui_images = [{"filename": temp_filename, "subfolder": "", "type": "temp"}]
|
||||
result = (temp_path, output_note)
|
||||
|
||||
# Process image from path only if it has changed
|
||||
elif os.path.isfile(image_path):
|
||||
if image_path == self.last_image_path:
|
||||
# logger.debug("Image path has not changed, skipping reload.")
|
||||
return super().save_images(images=self.last_output_images, prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
# Case 3: No image provided
|
||||
else:
|
||||
result = ("", "")
|
||||
|
||||
image = Image.open(image_path).convert("RGB")
|
||||
image_hash = self.compute_md5(image)
|
||||
return {"ui": {"images": ui_images}, "result": result}
|
||||
|
||||
if image_hash == self.last_image_hash:
|
||||
# logger.debug("Image content has not changed, skipping reload.")
|
||||
return super().save_images(images=self.last_output_images, prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
|
||||
note_path = os.path.join(self.note_dir, f"{image_hash}.txt")
|
||||
if os.path.exists(note_path):
|
||||
with open(note_path, "r", encoding="utf-8") as f:
|
||||
output_note_text = f.read()
|
||||
elif note_text:
|
||||
with open(note_path, "w", encoding="utf-8") as f:
|
||||
f.write(note_text)
|
||||
output_note_text = note_text
|
||||
|
||||
image_np = np.array(image).astype(np.float32) / 255.0
|
||||
output_images = torch.from_numpy(image_np).unsqueeze(0)
|
||||
|
||||
# Update stored values
|
||||
self.last_image_path = image_path
|
||||
self.last_image_hash = image_hash
|
||||
self.last_output_images = output_images
|
||||
|
||||
return super().save_images(images=output_images, prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
class ImageNoteLoadImage:
|
||||
def __init__(self):
|
||||
self.note_dir = os.path.join("ComfyUI", "Bjornulf", "imageNote")
|
||||
os.makedirs(self.note_dir, exist_ok=True)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
base_input_dir = folder_paths.get_input_directory() # Get base input directory
|
||||
input_dir = os.path.join(base_input_dir, "Bjornulf", "imagenote_images") # Specify subdirectory
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
base_input_dir = folder_paths.get_input_directory()
|
||||
input_dir = os.path.join(base_input_dir, "Bjornulf", "imagenote_images")
|
||||
if not os.path.exists(input_dir):
|
||||
os.makedirs(input_dir, exist_ok=True) # Create directory and parents if needed
|
||||
|
||||
# Filter for image files only
|
||||
os.makedirs(input_dir, exist_ok=True)
|
||||
valid_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp')
|
||||
files = [f for f in os.listdir(input_dir) if
|
||||
os.path.isfile(os.path.join(input_dir, f)) and
|
||||
f.lower().endswith(valid_extensions)]
|
||||
|
||||
os.path.isfile(os.path.join(input_dir, f)) and
|
||||
f.lower().endswith(valid_extensions)]
|
||||
if not files:
|
||||
# Provide a default option if no files are found
|
||||
files = ["none"]
|
||||
return {
|
||||
"required": {
|
||||
"image": (sorted(files), {"image_upload": True}),
|
||||
"note": ("STRING", {"default": ""}),
|
||||
"note_2": ("STRING", {"default": ""}),
|
||||
"note_3": ("STRING", {"default": ""})
|
||||
}
|
||||
}
|
||||
|
||||
return {"required":
|
||||
{
|
||||
"image": (sorted(files), {"image_upload": True}),
|
||||
# "note": ("STRING", {"default": ""}), # Added multiline option FAILURE
|
||||
"note": ("STRING", {"multiline": True, "lines": 10})
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "MASK", "STRING", "STRING") # Added note to return types
|
||||
RETURN_NAMES = ("image", "mask", "image_path", "note") # Added note to return names
|
||||
RETURN_TYPES = ("IMAGE", "MASK", "STRING", "STRING")
|
||||
RETURN_NAMES = ("image", "mask", "image_path", "note")
|
||||
FUNCTION = "load_image_alpha"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def load_image_alpha(self, image, note): # Added note parameter
|
||||
def compute_md5(self, image):
|
||||
"""Compute MD5 hash of an image for note association."""
|
||||
if isinstance(image, Image.Image):
|
||||
image_bytes = image.tobytes()
|
||||
elif isinstance(image, torch.Tensor):
|
||||
image_bytes = (image.numpy() * 255).astype(np.uint8).tobytes()
|
||||
else:
|
||||
image_bytes = image
|
||||
return hashlib.md5(image_bytes).hexdigest()
|
||||
|
||||
def load_image_alpha(self, image, note="", note_2="", note_3=""):
|
||||
image_path = folder_paths.get_annotated_filepath(image)
|
||||
|
||||
img = node_helpers.pillow(Image.open, image_path)
|
||||
|
||||
output_images = []
|
||||
output_masks = []
|
||||
w, h = None, None
|
||||
|
||||
excluded_formats = ['MPO']
|
||||
|
||||
for i in ImageSequence.Iterator(img):
|
||||
i = node_helpers.pillow(ImageOps.exif_transpose, i)
|
||||
|
||||
if i.mode == 'I':
|
||||
i = i.point(lambda i: i * (1 / 255))
|
||||
image_converted = i.convert("RGBA") # Renamed to avoid shadowing
|
||||
|
||||
image_converted = i.convert("RGBA")
|
||||
if len(output_images) == 0:
|
||||
w = image_converted.size[0]
|
||||
h = image_converted.size[1]
|
||||
|
||||
if image_converted.size[0] != w or image_converted.size[1] != h:
|
||||
continue
|
||||
|
||||
image_np = np.array(image_converted).astype(np.float32) / 255.0 # Renamed to avoid shadowing
|
||||
image_tensor = torch.from_numpy(image_np)[None,] # Renamed to avoid shadowing
|
||||
image_np = np.array(image_converted).astype(np.float32) / 255.0
|
||||
image_tensor = torch.from_numpy(image_np)[None,]
|
||||
if 'A' in i.getbands():
|
||||
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
||||
mask = 1. - torch.from_numpy(mask)
|
||||
else:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
output_images.append(image_tensor) # Renamed to avoid shadowing
|
||||
output_images.append(image_tensor)
|
||||
output_masks.append(mask.unsqueeze(0))
|
||||
|
||||
if len(output_images) > 1 and img.format not in excluded_formats:
|
||||
@@ -176,19 +199,34 @@ class ImageNoteLoadImage:
|
||||
output_image = output_images[0]
|
||||
output_mask = output_masks[0]
|
||||
|
||||
return (output_image, output_mask, image_path, note) # Added note to return tuple
|
||||
# Compute hash from the first image
|
||||
first_image = output_image[0] if output_image.dim() == 4 else output_image
|
||||
image_hash = self.compute_md5(first_image)
|
||||
|
||||
# Handle notes: append new notes and read all
|
||||
all_notes = [n for n in [note, note_2, note_3] if n]
|
||||
note_path = os.path.join(self.note_dir, f"{image_hash}.txt")
|
||||
if all_notes:
|
||||
with open(note_path, "a", encoding="utf-8") as f:
|
||||
for n in all_notes:
|
||||
f.write(n + "\n")
|
||||
output_note = ""
|
||||
if os.path.exists(note_path):
|
||||
with open(note_path, "r", encoding="utf-8") as f:
|
||||
output_note = f.read().rstrip()
|
||||
|
||||
return (output_image, output_mask, image_path, output_note)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(s, image, note): # Added note to IS_CHANGED
|
||||
def IS_CHANGED(s, image, note, note_2, note_3):
|
||||
image_path = folder_paths.get_annotated_filepath(image)
|
||||
m = hashlib.sha256()
|
||||
with open(image_path, 'rb') as f:
|
||||
m.update(f.read())
|
||||
return m.digest().hex() + str(note) # Include note in hash
|
||||
return m.digest().hex() + str(note) + str(note_2) + str(note_3)
|
||||
|
||||
@classmethod
|
||||
def VALIDATE_INPUTS(s, image):
|
||||
if not folder_paths.exists_annotated_filepath(image):
|
||||
return "Invalid image file: {}".format(image)
|
||||
|
||||
return True
|
||||
@@ -22,4 +22,7 @@ class DisplayNote:
|
||||
|
||||
def display_text_pass(self, any, display_text):
|
||||
# Simply pass through the input
|
||||
return (any,)
|
||||
if any is None:
|
||||
return (None,)
|
||||
else:
|
||||
return (any,)
|
||||
@@ -1,16 +1,7 @@
|
||||
import torch
|
||||
import base64
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
import requests
|
||||
import json
|
||||
import ollama
|
||||
from ollama import Client
|
||||
# import logging
|
||||
import hashlib
|
||||
from typing import Dict, Any
|
||||
from PIL.PngImagePlugin import PngInfo
|
||||
|
||||
class OllamaImageVision:
|
||||
@classmethod
|
||||
@@ -18,46 +9,34 @@ class OllamaImageVision:
|
||||
return {
|
||||
"required": {
|
||||
"IMAGE": ("IMAGE",),
|
||||
"output_selection": ("INT", {"default": 7, "min": 1, "max": 8,
|
||||
"step": 1, "display": "slider", "label": "Number of outputs (1-8)"}),
|
||||
"process_below_output_selection": ("BOOLEAN", {"default": False, "label": "Process all up to selection"})
|
||||
"OLLAMA_VISION_PROMPT": ("STRING", {"forceInput": True}),
|
||||
"vram_retention_minutes": ("INT", {
|
||||
"default": 0,
|
||||
"min": 0,
|
||||
"max": 99
|
||||
}),
|
||||
"seed": ("INT", {
|
||||
"default": -1,
|
||||
"min": -1,
|
||||
"max": 2147483647
|
||||
}),
|
||||
"answer_single_line": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
"optional": {
|
||||
"OLLAMA_CONFIG": ("OLLAMA_CONFIG", {"forceInput": True}),
|
||||
"context": ("STRING", {"multiline": True}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",)
|
||||
RETURN_NAMES = ("1 - Basic Description", "2 - Advanced Description", "3 - Characters Description", "4 - Object Recognition", "5 - Semantic Understanding", "6 - Contextual Analysis", "7 - SDXL Prompt (words)", "8 - FLUX Prompt (sentences)")
|
||||
FUNCTION = "process_image_base64"
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("response",)
|
||||
FUNCTION = "process_image"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def __init__(self):
|
||||
self.client = None
|
||||
|
||||
def create_prompts(self):
|
||||
# return {
|
||||
# "basic": "Describe the image in one sentence.",
|
||||
# "advanced": "Describe the image in details.",
|
||||
# "characters": "Describe the physical appearance of the character in vivid detail.",
|
||||
# "objects": "List the key objects and elements visible in the image.",
|
||||
# "semantic": "Provide an in-depth interpretation including mood, environment, and purpose in the image.",
|
||||
# "context": "Describe the relationships and context between objects and people in the image."
|
||||
# }
|
||||
return {
|
||||
# Do not describe what isn't there.
|
||||
"basic": "Summarize the main content of the image in one concise sentence.",
|
||||
"advanced": "Describe the scene thoroughly, capturing intricate details, colors, textures, and any significant actions or events occurring in the image.",
|
||||
"characters": "Describe each character's physical appearance in vivid, descriptive terms, including clothing, expressions, body language, and notable features.",
|
||||
"objects": "Identify and describe the primary objects in the image, detailing their size, position, color, and any unique characteristics.",
|
||||
"semantic": "Analyze the image's mood, environment, and implied meaning. Discuss any symbolic elements, artistic style, and possible intent or story conveyed.",
|
||||
"context": "Describe the relationships and interactions between objects and characters, focusing on spatial arrangement, implied actions, and any contextual clues suggesting connections or purpose.",
|
||||
"SDXL": "Describe the image. The goal is to generate a concise, detailed, and effective description. Guidelines for describing the image:- Focus on visual elements, be specific about objects, colors, textures, and compositions. Use adjectives to describe key features. Avoid complete sentences or narrative descriptions. Prioritize important elements over minor details. Your input will be a detailed description of an image. Process this description and refine it into a prompt suitable for stable diffusion models using the following steps: 1. Identify the most important visual elements and characteristics. 2. Condense the description into a series of comma-separated phrases or words. 3. Prioritize specific, descriptive terms over general ones. Here are two examples of good outputs: Example 1:vibrant sunset, tropical beach, silhouetted palm trees, calm ocean, orange and purple sky, wispy clouds, golden sand, gentle waves, beachgoers in distance, serene atmosphere, warm lighting, panoramic view. Example 2: steampunk cityscape, towering clockwork structures, brass and copper tones, billowing steam, airships in sky, cobblestone streets, Victorian-era citizens, gears and pipes visible, warm sepia lighting, hazy atmosphere, intricate mechanical details. Your final output should be a single line of text containing the refined prompt, without any additional explanation or commentary. IMPORTANT : DO NOT Include information about the overall style or artistic technique.",
|
||||
"FLUX": "Describe the given image in a detailed and structured format that is specifically designed for image generation. Use descriptive language to capture the essence of the image, including the environment, objects, characters, lighting, textures, and any other notable elements. The description must use some of these 9 points : 1. Scene Type: [Outdoor/Indoor/Abstract/Fantasy/Realistic/etc.] 2. Primary Subject: [Main focus or characters in the scene.] 3. Environment Details: [Describe the setting in vivid detail, including any landscapes, architecture, or surroundings.] 4. Lighting: [Specify the type, color, and intensity of the lighting.] 5. Colors and Tones: [Dominant colors and overall mood.] 6. Perspective: [Camera angle or viewpoint—close-up, wide shot, aerial, etc.] 7. Texture and Details: [Surface materials, patterns, and fine details.] 8. Emotion or Atmosphere: [Mood conveyed by the scene—serene, ominous, lively, etc.] 9. Unique Elements: [Special features or focal points that make the image distinctive.] For example: 1. Scene Type: Outdoor, natural landscape. 2. Primary Subject: A majestic lion standing atop a rocky hill. 3. Environment Details: A vast savannah with tall golden grass, sparse acacia trees, and distant mountains under a clear blue sky. 4. Lighting: Bright, warm sunlight casting long shadows. 5. Colors and Tones: Predominantly gold and blue, with subtle earthy browns and greens. 6. Perspective: Mid-range shot, slightly low angle to emphasize the lion's dominance. 7. Texture and Details: The lion's fur appears detailed with visible strands, and the rocks have a rough, weathered texture. 8. Emotion or Atmosphere: Majestic, powerful, and serene. 9. Unique Elements: A subtle wind effect in the grass and mane, adding movement to the scene. IMPORTANT : DO NOT Include information about the overall style or artistic technique."
|
||||
}
|
||||
|
||||
|
||||
def process_image_base64(self, IMAGE, OLLAMA_CONFIG=None, output_selection=6, process_below_output_selection=True):
|
||||
def process_image(self, IMAGE, OLLAMA_VISION_PROMPT, answer_single_line, vram_retention_minutes, seed, OLLAMA_CONFIG=None, context=None):
|
||||
from ollama import Client
|
||||
|
||||
# Default OLLAMA_CONFIG if not provided
|
||||
if OLLAMA_CONFIG is None:
|
||||
OLLAMA_CONFIG = {
|
||||
"model": "moondream",
|
||||
@@ -65,65 +44,78 @@ class OllamaImageVision:
|
||||
}
|
||||
selected_model = OLLAMA_CONFIG["model"]
|
||||
ollama_url = OLLAMA_CONFIG["url"]
|
||||
|
||||
|
||||
# Convert images to base64
|
||||
images_base64 = []
|
||||
for img in IMAGE:
|
||||
# Convert tensor to numpy array
|
||||
numpy_img = (255. * img.cpu().numpy()).clip(0, 255).astype(np.uint8)
|
||||
|
||||
# Create PIL Image
|
||||
pil_image = Image.fromarray(numpy_img)
|
||||
|
||||
# Create a BytesIO object
|
||||
buffered = BytesIO()
|
||||
|
||||
# Save the image into the BytesIO object
|
||||
pil_image.save(buffered, format="PNG")
|
||||
|
||||
# Get the byte value and encode to base64
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
images_base64.append(img_str)
|
||||
|
||||
# Clean up
|
||||
buffered.close()
|
||||
|
||||
# Initialize client
|
||||
client = Client(host=ollama_url)
|
||||
|
||||
# Get prompts
|
||||
prompts = list(self.create_prompts().items())
|
||||
|
||||
# Process outputs based on selection and process_below_output_selection flag
|
||||
responses = []
|
||||
for i in range(8): # Always prepare 5 slots for output
|
||||
if process_below_output_selection:
|
||||
# Process all outputs up to output_selection
|
||||
if i < output_selection:
|
||||
prompt_type, prompt = prompts[i]
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
prompt=prompt,
|
||||
images=images_base64
|
||||
)
|
||||
responses.append(response['response'].strip())
|
||||
else:
|
||||
responses.append("")
|
||||
else:
|
||||
# Process only the selected output (output_selection - 1)
|
||||
if i == (output_selection - 1):
|
||||
prompt_type, prompt = prompts[i]
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
prompt=prompt,
|
||||
images=images_base64
|
||||
)
|
||||
responses.append(response['response'].strip())
|
||||
else:
|
||||
responses.append("")
|
||||
|
||||
return tuple(responses)
|
||||
# Construct the final prompt
|
||||
if context:
|
||||
final_prompt = context + "\n" + OLLAMA_VISION_PROMPT
|
||||
else:
|
||||
final_prompt = OLLAMA_VISION_PROMPT
|
||||
|
||||
# Generate response with the final prompt
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
prompt=final_prompt,
|
||||
images=images_base64,
|
||||
keep_alive=f"{vram_retention_minutes}m"
|
||||
)
|
||||
|
||||
if answer_single_line:
|
||||
response['response'] = ' '.join(response['response'].split())
|
||||
|
||||
return (response['response'].strip(),)
|
||||
|
||||
def handle_error(self, error_message: str) -> tuple:
|
||||
"""Handle errors by returning appropriate error messages for all outputs"""
|
||||
error_response = f"Error: {error_message}"
|
||||
return tuple([error_response] * 4)
|
||||
class OllamaVisionPromptSelector: #Prompts made for gemma3
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"prompt_type": (["NONE", "basic", "advanced", "characters", "objects", "semantic", "basic_action", "advanced_action", "context", "SDXL", "FLUX", "video"],),
|
||||
},
|
||||
"optional": {
|
||||
"prefix_custom_prompt": ("STRING", {"multiline": True, "default": ""}),
|
||||
"suffix_custom_prompt": ("STRING", {"multiline": True, "default": "Do not include any introductory text or explanations, make it a clean one line answer."}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("OLLAMA_VISION_PROMPT",)
|
||||
FUNCTION = "select_prompt"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def get_prompts(self):
|
||||
"""Returns a dictionary of predefined prompts for each prompt_type."""
|
||||
return {
|
||||
"basic": "Summarize the main content of the image in one concise sentence.",
|
||||
"advanced": "Describe the scene thoroughly, capturing intricate details, colors, textures, and significant actions.",
|
||||
"characters": "Describe each character's physical appearance, including clothing, expressions, and notable features.",
|
||||
"objects": "Identify and describe the primary objects, detailing their size, position, color, and characteristics.",
|
||||
"semantic": "Analyze the image's mood, environment, and implied meaning, focusing on symbolic elements and atmosphere.",
|
||||
"context": "Describe relationships and interactions between objects and characters, focusing on spatial arrangement and actions.",
|
||||
"basic_action": "Describe the action that is happening in the image.",
|
||||
"advanced_action": "Describe the action that is happening in the image in details.",
|
||||
"SDXL": "Generate a concise, comma-separated list of visual elements from the image, suitable for Stable Diffusion XL. Focus on objects, colors, textures, and composition. Use adjectives for key features. Output only the final prompt. Example: vibrant sunset, tropical beach, silhouetted palm trees, calm ocean, orange sky.",
|
||||
"FLUX": "Generate a detailed, structured description of the image for FLUX, including scene type, primary subject, environment details, lighting, colors, perspective, textures, atmosphere, and unique elements. Use concise descriptive phrases. Output only the final prompt. Example: A serene, moonlit forest clearing with a glowing, ethereal portal, surrounded by ancient, towering trees, casting long shadows in the silver light.",
|
||||
"video": "Analyze the image to identify the characters. Then, imagine and describe in a single sentence a random dynamic actions they might perform next, involving movement or interaction, based on their appearance and the scene's context. Example : The woman is smirking seductively while staring at the camera, then suddenly winks. Do not add new characters that are not in the image."
|
||||
}
|
||||
|
||||
def select_prompt(self, prompt_type, prefix_custom_prompt="", suffix_custom_prompt=""):
|
||||
if prompt_type == "NONE":
|
||||
selected_prompt = prefix_custom_prompt + suffix_custom_prompt
|
||||
else:
|
||||
prompts = self.get_prompts()
|
||||
selected_prompt = prefix_custom_prompt + prompts.get(prompt_type, "") + suffix_custom_prompt
|
||||
return (selected_prompt,) # Return as a tuple for ComfyUI compatibility
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import requests
|
||||
import json
|
||||
import ollama
|
||||
from ollama import Client
|
||||
import logging
|
||||
import hashlib
|
||||
from typing import Dict, Any
|
||||
@@ -47,6 +45,8 @@ class OllamaTalk:
|
||||
current_instance = None
|
||||
|
||||
def __init__(self):
|
||||
import ollama
|
||||
from ollama import Client
|
||||
self.last_content_hash = None
|
||||
self.waiting = False
|
||||
self.OLLAMA_CONFIG = None
|
||||
|
||||
@@ -8,6 +8,7 @@ import os
|
||||
import io
|
||||
import sys
|
||||
import random
|
||||
# from comfy_execution.graph import ExecutionBlocker
|
||||
|
||||
class Everything(str):
|
||||
def __ne__(self, __value: object) -> bool:
|
||||
@@ -68,6 +69,7 @@ class PauseResume:
|
||||
PauseResume.should_stop = False # Reset for next run
|
||||
PauseResume.is_paused = True
|
||||
raise Exception("Workflow stopped by user")
|
||||
# return (ExecutionBlocker("Workflow stopped by user"),) # Return ExecutionBlocker to stop gracefully, but error on next node.
|
||||
|
||||
PauseResume.is_paused = True
|
||||
PauseResume.should_stop = False
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "bjornulf_custom_nodes"
|
||||
description = "133 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech Kokoro, etc..."
|
||||
version = "0.76"
|
||||
description = "162 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras, generate characters and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech, etc..."
|
||||
version = "0.77"
|
||||
license = {file = "LICENSE"}
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
ollama
|
||||
pydub
|
||||
opencv-python
|
||||
faster_whisper
|
||||
# Remove faster-whisper for the people using python 3.13 (old pkgutil)
|
||||
# faster_whisper
|
||||
ffmpeg-python
|
||||
civitai-py
|
||||
fal_client
|
||||
sounddevice
|
||||
#24, remove kokoro install by default (need to do that manually to use the kokoro node)
|
||||
# kokoro_onnx
|
||||
langdetect
|
||||
spacy
|
||||
textblob
|
||||
dill
|
||||
@@ -24,9 +24,12 @@ class SaveImageToFolder(SaveImage):
|
||||
|
||||
# Call the parent's save_images with filename_prefix set to "folder_name/"
|
||||
# This will make the parent class save to the custom folder
|
||||
return super().save_images(
|
||||
images=images,
|
||||
filename_prefix=f"{folder_name}/_",
|
||||
prompt=prompt,
|
||||
extra_pnginfo=extra_pnginfo
|
||||
)
|
||||
if images is None:
|
||||
return (None,)
|
||||
else:
|
||||
return super().save_images(
|
||||
images=images,
|
||||
filename_prefix=f"{folder_name}/_",
|
||||
prompt=prompt,
|
||||
extra_pnginfo=extra_pnginfo
|
||||
)
|
||||
80
save_tmp_audio.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import os
|
||||
import shutil
|
||||
import numpy as np
|
||||
import soundfile as sf
|
||||
import torch
|
||||
|
||||
class SaveTmpAudio:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"optional": {
|
||||
"audio": ("AUDIO",),
|
||||
"audio_path": ("STRING",),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "save_audio"
|
||||
RETURN_TYPES = ()
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def save_audio(self, audio=None, audio_path=None):
|
||||
# Ensure the output directory exists
|
||||
os.makedirs("./output", exist_ok=True)
|
||||
|
||||
# Check if neither input is provided
|
||||
if audio is None and audio_path is None:
|
||||
raise ValueError("Either 'audio' or 'audio_path' must be provided")
|
||||
|
||||
# Case 1: Handle AUDIO input if provided
|
||||
if audio is not None:
|
||||
# Validate that audio is a dictionary with required keys
|
||||
if not isinstance(audio, dict):
|
||||
raise ValueError("AUDIO input must be a dictionary with 'waveform' and 'sample_rate'")
|
||||
if 'waveform' not in audio or 'sample_rate' not in audio:
|
||||
raise ValueError("AUDIO dictionary must contain 'waveform' and 'sample_rate' keys")
|
||||
|
||||
# Extract waveform and sample rate
|
||||
waveform = audio['waveform']
|
||||
sample_rate = audio['sample_rate']
|
||||
|
||||
# Ensure waveform is a PyTorch tensor
|
||||
if not isinstance(waveform, torch.Tensor):
|
||||
raise TypeError(f"Waveform must be a PyTorch tensor, got {type(waveform)}")
|
||||
|
||||
# Squeeze waveform to remove extra dimensions (e.g., from (1, 1, N) to (N,))
|
||||
waveform = waveform.squeeze()
|
||||
|
||||
# Convert to NumPy array
|
||||
audio_np = waveform.cpu().numpy()
|
||||
|
||||
# Validate the shape after squeezing
|
||||
if audio_np.ndim > 2:
|
||||
raise ValueError(f"Audio data has too many dimensions after squeezing: {audio_np.shape}")
|
||||
|
||||
# Scale floating-point data to 16-bit integers (assuming range [-1, 1])
|
||||
if audio_np.dtype in (np.float32, np.float64):
|
||||
audio_np = (audio_np * 32767).astype(np.int16)
|
||||
|
||||
# Save as WAV file
|
||||
filename = "./output/tmp_api.wav"
|
||||
sf.write(filename, audio_np, sample_rate)
|
||||
|
||||
# Case 2: Handle audio_path input if audio is not provided
|
||||
elif audio_path is not None:
|
||||
# Verify the file exists
|
||||
if not os.path.exists(audio_path):
|
||||
raise FileNotFoundError(f"Audio file not found: {audio_path}")
|
||||
|
||||
# Check for supported file extensions
|
||||
ext = os.path.splitext(audio_path)[1].lower()
|
||||
if ext not in ('.wav', '.mp3'):
|
||||
raise ValueError("audio_path must be a .wav or .mp3 file")
|
||||
|
||||
# Copy the file to the output directory
|
||||
filename = f"./output/tmp_api{ext}"
|
||||
shutil.copy(audio_path, filename)
|
||||
|
||||
# Return UI information for ComfyUI
|
||||
return {"ui": {"audio": [{"filename": filename, "type": "output"}]}}
|
||||
@@ -3,6 +3,7 @@ import numpy as np
|
||||
from PIL import Image
|
||||
import json
|
||||
from PIL.PngImagePlugin import PngInfo
|
||||
import torch
|
||||
|
||||
class SaveTmpImage:
|
||||
@classmethod
|
||||
|
||||
34
save_tmp_video.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
class SaveTmpVideo:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"video_path": ("STRING", {"forceInput": True}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "save_video"
|
||||
RETURN_TYPES = ()
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def save_video(self, video_path):
|
||||
# Ensure the output directory exists
|
||||
os.makedirs("./output", exist_ok=True)
|
||||
|
||||
# Verify the video file exists
|
||||
if not os.path.exists(video_path):
|
||||
raise FileNotFoundError(f"Video file not found: {video_path}")
|
||||
# Get the file extension
|
||||
ext = os.path.splitext(video_path)[1].lower()
|
||||
if ext not in ('.mp4', '.mkv', '.webm'):
|
||||
raise ValueError("video_path must be a .mp4, .mkv, or .webm file")
|
||||
# Copy the file to the output directory with the same extension
|
||||
filename = f"./output/tmp_api{ext}"
|
||||
shutil.copy(video_path, filename)
|
||||
|
||||
print(f"Temporary video saved as: {filename}")
|
||||
return {"ui": {"videos": [{"filename": filename, "type": "output"}]}}
|
||||
BIN
screenshots/audio_preview.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
screenshots/basic_math.png
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
screenshots/execute_worflow.png
Normal file
|
After Width: | Height: | Size: 290 KiB |
BIN
screenshots/fix_face_1.png
Normal file
|
After Width: | Height: | Size: 331 KiB |
BIN
screenshots/fix_face_2_zoom.png
Normal file
|
After Width: | Height: | Size: 317 KiB |
BIN
screenshots/fix_face_3.png
Normal file
|
After Width: | Height: | Size: 399 KiB |
BIN
screenshots/fix_face_4_ignore.png
Normal file
|
After Width: | Height: | Size: 389 KiB |
BIN
screenshots/fix_face_5_person.png
Normal file
|
After Width: | Height: | Size: 444 KiB |
BIN
screenshots/fix_face_6_preview.png
Normal file
|
After Width: | Height: | Size: 149 KiB |
BIN
screenshots/grid_assemble_1.png
Normal file
|
After Width: | Height: | Size: 446 KiB |
BIN
screenshots/grid_assemble_2.png
Normal file
|
After Width: | Height: | Size: 453 KiB |
BIN
screenshots/load_civit.png
Normal file
|
After Width: | Height: | Size: 170 KiB |
|
Before Width: | Height: | Size: 134 KiB After Width: | Height: | Size: 174 KiB |
|
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 56 KiB |
BIN
screenshots/remote_vae.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
screenshots/remote_vae_tiled.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
screenshots/savetmp_audio1.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
screenshots/savetmp_audio2.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
screenshots/savetmp_video.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
screenshots/split_grid.png
Normal file
|
After Width: | Height: | Size: 318 KiB |
BIN
screenshots/style_gen_1.png
Normal file
|
After Width: | Height: | Size: 208 KiB |
BIN
screenshots/style_gen_2.png
Normal file
|
After Width: | Height: | Size: 263 KiB |
BIN
screenshots/style_gen_3.png
Normal file
|
After Width: | Height: | Size: 245 KiB |
|
Before Width: | Height: | Size: 272 KiB After Width: | Height: | Size: 290 KiB |
BIN
screenshots/text_ana_1.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
screenshots/text_ana_2.png
Normal file
|
After Width: | Height: | Size: 91 KiB |
BIN
screenshots/video_text_generator.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
@@ -1,7 +1,6 @@
|
||||
class Everything(str):
|
||||
def __ne__(self, _):
|
||||
return False
|
||||
|
||||
class ShowText:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
@@ -21,7 +20,6 @@ class ShowText:
|
||||
if isinstance(value, int):
|
||||
return 'integer'
|
||||
elif isinstance(value, float):
|
||||
# Check if it has a decimal part
|
||||
if value % 1 == 0:
|
||||
return 'float' if str(value).endswith('.0') else 'integer'
|
||||
return 'float'
|
||||
@@ -39,5 +37,5 @@ class ShowText:
|
||||
return 'other type'
|
||||
|
||||
def show_text(self, text_int_float):
|
||||
type_info = [f"{value}" for value in text_int_float]
|
||||
type_info = [f"{value}" for value in text_int_float if value is not None]
|
||||
return {"ui": {"text": type_info}}
|
||||
@@ -5,13 +5,8 @@ import numpy as np
|
||||
import tempfile
|
||||
import wave
|
||||
import subprocess # Added for ffmpeg
|
||||
|
||||
try:
|
||||
import faster_whisper
|
||||
WHISPER_AVAILABLE = True
|
||||
except ImportError:
|
||||
WHISPER_AVAILABLE = False
|
||||
print("faster-whisper not found. To use local transcription, install with: pip install faster-whisper")
|
||||
import sys
|
||||
import logging
|
||||
|
||||
class SpeechToText:
|
||||
def __init__(self):
|
||||
@@ -56,8 +51,7 @@ class SpeechToText:
|
||||
return temp_file.name
|
||||
|
||||
def load_local_model(self, model_size):
|
||||
if not WHISPER_AVAILABLE:
|
||||
return False, "faster-whisper not installed. Install with: pip install faster-whisper"
|
||||
import faster_whisper
|
||||
|
||||
try:
|
||||
if self.local_model is None:
|
||||
@@ -84,6 +78,10 @@ class SpeechToText:
|
||||
return False, f"Error during local transcription: {str(e)}", None
|
||||
|
||||
def transcribe_audio(self, model_size, AUDIO=None, audio_path=None, video_path=None):
|
||||
# Check Python version and warn if 3.12 or higher
|
||||
if sys.version_info > (3, 12):
|
||||
logging.warning("⚠️⚠️⚠️ Warning: You are using Python {}.{} or higher. This may cause compatibility issues with some dependencies (e.g., faster_whisper). Consider using Python 3.11 or 3.12 instead. ⚠️⚠️⚠️".format(sys.version_info.major, sys.version_info.minor))
|
||||
import faster_whisper
|
||||
transcript = "No valid audio input provided"
|
||||
detected_language = ""
|
||||
temp_wav_path = None
|
||||
|
||||
206
split_image.py
Normal file
@@ -0,0 +1,206 @@
|
||||
import torch
|
||||
|
||||
class SplitImageGrid:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
"rows": ("INT", {"default": 1, "min": 1, "max": 9}),
|
||||
"columns": ("INT", {"default": 1, "min": 1, "max": 9}),
|
||||
"MODIFIED_part_index": ("INT", {"default": 1, "min": 1, "max": 9}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ["IMAGE"] * 9 + ["INT", "INT", "IMAGE", "INT"]
|
||||
RETURN_NAMES = [f"part_{i}" for i in range(1, 10)] + ["rows", "columns", "MODIFIED_part", "MODIFIED_part_index"]
|
||||
FUNCTION = "split"
|
||||
CATEGORY = "image"
|
||||
|
||||
def split(self, image, rows, columns, MODIFIED_part_index):
|
||||
# Get image dimensions
|
||||
B, H, W, C = image.shape
|
||||
# Removed check: if H % rows != 0 or W % columns != 0:
|
||||
# raise ValueError("Image dimensions must be divisible by rows and columns")
|
||||
|
||||
# Calculate base part dimensions
|
||||
part_height = H // rows
|
||||
part_width = W // columns
|
||||
O = 2 # Overlap of 2 pixels
|
||||
parts = []
|
||||
|
||||
# Split image with overlap
|
||||
for r in range(rows):
|
||||
for c in range(columns):
|
||||
# Define slicing indices with overlap
|
||||
h_start = max(0, r * part_height - O) # Extend O pixels up, but not beyond top
|
||||
h_end = min(H, (r + 1) * part_height + O) # Extend O pixels down, not beyond bottom
|
||||
w_start = max(0, c * part_width - O) # Extend O pixels left, not beyond left edge
|
||||
w_end = min(W, (c + 1) * part_width + O) # Extend O pixels right, not beyond right edge
|
||||
part = image[:, h_start:h_end, w_start:w_end, :]
|
||||
parts.append(part)
|
||||
|
||||
# Pad unused parts with None
|
||||
while len(parts) < 9:
|
||||
parts.append(None)
|
||||
|
||||
# Adjust MODIFIED_part_index to 0-based and validate
|
||||
MODIFIED_index = MODIFIED_part_index - 1
|
||||
if MODIFIED_index < 0 or MODIFIED_index >= rows * columns:
|
||||
raise ValueError(f"MODIFIED_part_index {MODIFIED_part_index} is out of range for {rows}x{columns} grid")
|
||||
MODIFIED_part = parts[MODIFIED_index]
|
||||
|
||||
return tuple(parts + [rows, columns, MODIFIED_part, MODIFIED_part_index])
|
||||
|
||||
class ReassembleImageGrid:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"original": ("IMAGE",),
|
||||
"rows": ("INT", {"default": 1, "min": 1, "max": 10}),
|
||||
"columns": ("INT", {"default": 1, "min": 1, "max": 10}),
|
||||
},
|
||||
"optional": {
|
||||
"part_1": ("IMAGE",),
|
||||
"part_2": ("IMAGE",),
|
||||
"part_3": ("IMAGE",),
|
||||
"part_4": ("IMAGE",),
|
||||
"part_5": ("IMAGE",),
|
||||
"part_6": ("IMAGE",),
|
||||
"part_7": ("IMAGE",),
|
||||
"part_8": ("IMAGE",),
|
||||
"part_9": ("IMAGE",),
|
||||
"MODIFIED_part": ("IMAGE",),
|
||||
"MODIFIED_part_index": ("INT", {"default": 0, "min": 0, "max": 9}),
|
||||
"reference_video_part_index": ("INT", {"default": 0, "min": 0, "max": 9}),
|
||||
"auto_resize": ("BOOLEAN", {"default": True}), # Add option to enable/disable auto-resizing
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ["IMAGE"]
|
||||
RETURN_NAMES = ["image"]
|
||||
FUNCTION = "reassemble"
|
||||
CATEGORY = "image"
|
||||
|
||||
def repeat_frames(self, tensor, k):
|
||||
"""Repeat the tensor k times along the batch dimension."""
|
||||
return tensor.repeat(k, 1, 1, 1) if k > 1 else tensor
|
||||
|
||||
def adjust_frame_count_with_repeat(self, tensor, B_ref, B_original):
|
||||
"""Adjust frame count, considering repetition if B_ref ≈ k * B_original."""
|
||||
if B_original == 0:
|
||||
raise ValueError("Original frame count is zero")
|
||||
k = round(B_ref / B_original)
|
||||
if k > 0 and abs(B_ref - k * B_original) <= 1:
|
||||
repeated = self.repeat_frames(tensor, k)
|
||||
if repeated.shape[0] > B_ref:
|
||||
repeated = repeated[:B_ref]
|
||||
elif repeated.shape[0] < B_ref:
|
||||
pad_size = B_ref - repeated.shape[0]
|
||||
last_frame = repeated[-1:].repeat(pad_size, 1, 1, 1)
|
||||
repeated = torch.cat([repeated, last_frame], dim=0)
|
||||
return repeated
|
||||
else:
|
||||
return self.adjust_frame_count(tensor, B_ref)
|
||||
|
||||
def adjust_frame_count(self, tensor, target_B):
|
||||
"""Adjust the frame count of a tensor to match target_B by repeating or skipping frames."""
|
||||
B = tensor.shape[0]
|
||||
if B == target_B:
|
||||
return tensor
|
||||
indices = torch.linspace(0, B - 1, steps=target_B).round().long()
|
||||
indices = indices.clamp(0, B - 1)
|
||||
return tensor[indices]
|
||||
|
||||
def resize_tensor(self, tensor, target_height, target_width):
|
||||
"""Resize tensor to target dimensions using interpolation."""
|
||||
import torch.nn.functional as F
|
||||
B, H, W, C = tensor.shape
|
||||
|
||||
# PyTorch's F.interpolate expects [B, C, H, W] format
|
||||
# So we need to permute, resize, then permute back
|
||||
tensor_BCHW = tensor.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W]
|
||||
|
||||
# Resize using bilinear interpolation
|
||||
resized = F.interpolate(
|
||||
tensor_BCHW,
|
||||
size=(target_height, target_width),
|
||||
mode='bilinear',
|
||||
align_corners=False
|
||||
)
|
||||
|
||||
# Convert back to [B, H, W, C] format
|
||||
return resized.permute(0, 2, 3, 1) # [B, C, H, W] -> [B, H, W, C]
|
||||
|
||||
def reassemble(self, original, rows, columns, part_1=None, part_2=None, part_3=None,
|
||||
part_4=None, part_5=None, part_6=None, part_7=None, part_8=None,
|
||||
part_9=None, MODIFIED_part=None, MODIFIED_part_index=0,
|
||||
reference_video_part_index=0, auto_resize=True):
|
||||
# Get original dimensions
|
||||
B, H, W, C = original.shape
|
||||
|
||||
# Calculate part dimensions
|
||||
part_height = H // rows
|
||||
part_width = W // columns
|
||||
O = 2 # Overlap pixels, matching SplitImageGrid
|
||||
parts = [part_1, part_2, part_3, part_4, part_5, part_6, part_7, part_8, part_9]
|
||||
|
||||
# Override with MODIFIED_part if provided
|
||||
if MODIFIED_part is not None and MODIFIED_part_index > 0:
|
||||
index = MODIFIED_part_index - 1
|
||||
if index < 0 or index >= 9:
|
||||
raise ValueError(f"Invalid MODIFIED_part_index: {MODIFIED_part_index}")
|
||||
parts[index] = MODIFIED_part
|
||||
|
||||
# Handle reference part logic
|
||||
if reference_video_part_index > 0:
|
||||
ref_index = reference_video_part_index - 1
|
||||
if parts[ref_index] is None:
|
||||
raise ValueError(f"Reference part {reference_video_part_index} is not provided")
|
||||
B_ref = parts[ref_index].shape[0]
|
||||
original = self.adjust_frame_count_with_repeat(original, B_ref, B)
|
||||
for i in range(9):
|
||||
if parts[i] is not None and i != ref_index:
|
||||
parts[i] = self.adjust_frame_count_with_repeat(parts[i], B_ref, B)
|
||||
elif i == ref_index:
|
||||
parts[i] = parts[i]
|
||||
else:
|
||||
B_ref = B
|
||||
|
||||
# Clone original to avoid modifying it
|
||||
reassembled = original.clone()
|
||||
|
||||
# Reassemble the parts into the grid
|
||||
for i, part in enumerate(parts, start=1):
|
||||
if part is not None:
|
||||
# Determine part position
|
||||
row = (i - 1) // columns
|
||||
col = (i - 1) % columns
|
||||
# Calculate cropping offsets based on position
|
||||
crop_top = O if row > 0 else 0
|
||||
crop_left = O if col > 0 else 0
|
||||
|
||||
# Get the cropped part
|
||||
cropped_part = part[:, crop_top:, crop_left:, :]
|
||||
|
||||
# Check if resize is needed and enabled
|
||||
if auto_resize and (cropped_part.shape[1] != part_height or cropped_part.shape[2] != part_width):
|
||||
print(f"Resizing part {i} from {cropped_part.shape[1:3]} to ({part_height}, {part_width})")
|
||||
cropped_part = self.resize_tensor(cropped_part, part_height, part_width)
|
||||
elif not auto_resize and (cropped_part.shape[1] != part_height or cropped_part.shape[2] != part_width):
|
||||
# If auto-resize is disabled, still throw the error
|
||||
raise ValueError(f"Cropped part {i} has incorrect shape. Expected ({part_height}, {part_width}, {C}), got {cropped_part.shape[1:]}")
|
||||
|
||||
# Validate frame count
|
||||
if cropped_part.shape[0] != B_ref:
|
||||
raise ValueError(f"Cropped part {i} has incorrect frame count. Expected {B_ref}, got {cropped_part.shape[0]}")
|
||||
|
||||
# Place cropped part into reassembled image
|
||||
h_start = row * part_height
|
||||
h_end = h_start + part_height
|
||||
w_start = col * part_width
|
||||
w_end = w_start + part_width
|
||||
reassembled[:, h_start:h_end, w_start:w_end, :] = cropped_part
|
||||
|
||||
return (reassembled,)
|
||||
325
style_selector.py
Normal file
@@ -0,0 +1,325 @@
|
||||
import os
|
||||
import random
|
||||
from aiohttp import web
|
||||
from server import PromptServer
|
||||
|
||||
# Shared data structures (unchanged)
|
||||
CATEGORIES = ["Painting", "Photography", "Digital Art", "3D Rendering", "Illustration"]
|
||||
|
||||
BRANCHES = {
|
||||
"Painting": [
|
||||
"Renaissance", "Baroque", "Rococo", "Neoclassicism",
|
||||
"Romanticism", "Realism", "Impressionism", "Post-Impressionism",
|
||||
"Expressionism", "Fauvism", "Cubism", "Futurism", "Dadaism",
|
||||
"Surrealism", "Abstract Expressionism", "Pop Art", "Op Art",
|
||||
"Minimalism"
|
||||
],
|
||||
"Photography": [
|
||||
"Black and White", "Color", "Vintage", "Sepia Tone", "HDR",
|
||||
"Long Exposure", "Macro", "Portrait", "Landscape", "Street",
|
||||
"Fashion", "Analog Film", "Cinematic"
|
||||
],
|
||||
"Digital Art": [
|
||||
"Digital Painting", "Vector Art", "Pixel Art", "Fractal Art",
|
||||
"Algorithmic Art", "Glitch Art"
|
||||
],
|
||||
"3D Rendering": [
|
||||
"Low Poly", "Voxel", "Isometric", "Ray Tracing"
|
||||
],
|
||||
"Illustration": [
|
||||
"Line Art", "Cartoon", "Comic Book", "Manga", "Anime",
|
||||
"Technical Illustration", "Botanical Illustration",
|
||||
"Architectural Rendering", "Concept Art", "Storyboard Art"
|
||||
],
|
||||
}
|
||||
|
||||
BRANCHES_MODELS = {
|
||||
"Photography": [
|
||||
("SDXL", "urn:air:sdxl:checkpoint:civitai:101055@128078", "https://civitai.green/models/101055?modelVersionId=128078"),
|
||||
("Juggernaut XL", "urn:air:sdxl:checkpoint:civitai:133005@166909", "https://civitai.green/models/133005?modelVersionId=166909"),
|
||||
("Realistic Stock Photo", "urn:air:sdxl:checkpoint:civitai:139565@154593", "https://civitai.green/models/139565?modelVersionId=154593"),
|
||||
],
|
||||
"Illustration": [
|
||||
("Hassaku XL", "urn:air:sdxl:checkpoint:civitai:140272@176059", "https://civitai.green/models/140272?modelVersionId=176059"),
|
||||
("[Lah] Mysterious", "urn:air:sdxl:checkpoint:civitai:118441@162380", "https://civitai.green/models/118441?modelVersionId=162380"),
|
||||
("Copax TimeLessXL", "urn:air:sdxl:checkpoint:civitai:118111@1108377", "https://civitai.green/models/118111?modelVersionId=172160"),
|
||||
],
|
||||
"3D Rendering": [
|
||||
("Samaritan 3D Cartoon", "urn:air:sdxl:checkpoint:civitai:81270@144566", "https://civitai.green/models/81270?modelVersionId=144566"),
|
||||
("FormulaXL", "urn:air:sdxl:checkpoint:civitai:129922@160525", "https://civitai.green/models/129922?modelVersionId=160525"),
|
||||
],
|
||||
"Digital Art": [
|
||||
("BriXL", "urn:air:sdxl:checkpoint:civitai:131703@166762", "https://civitai.green/models/131703?modelVersionId=166762"),
|
||||
("SDXL Unstable Diffusers", "urn:air:sdxl:checkpoint:civitai:84040@395107", "https://civitai.green/models/84040?modelVersionId=395107"),
|
||||
],
|
||||
"Painting": [
|
||||
("Copax TimeLessXL", "urn:air:sdxl:checkpoint:civitai:118111@1108377", "https://civitai.green/models/118111?modelVersionId=172160"),
|
||||
("PixelPaint - Beautiful Painting Style", "urn:air:sdxl:checkpoint:civitai:284101@319693", "https://civitai.green/models/284101?modelVersionId=319693"),
|
||||
],
|
||||
}
|
||||
|
||||
MODELS = {model_name: (urn, link) for category in BRANCHES_MODELS for model_name, urn, link in BRANCHES_MODELS[category]}
|
||||
|
||||
# Counter files
|
||||
STYLE_LIST_COUNTER_FILE = os.path.join("Bjornulf", "style_list_counter.txt")
|
||||
MODEL_LIST_COUNTER_FILE = os.path.join("Bjornulf", "model_list_counter.txt")
|
||||
|
||||
class StyleSelector:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
ALL_STYLES = sorted(set(style for styles in BRANCHES.values() for style in styles))
|
||||
ALL_MODELS = ["None"] + sorted(MODELS.keys())
|
||||
return {
|
||||
"required": {
|
||||
"category": (CATEGORIES,),
|
||||
"style": (ALL_STYLES,),
|
||||
"model": (ALL_MODELS, {"default": "None"}),
|
||||
"seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFFFFFFFFFF}),
|
||||
"LOOP_random_LIST": ("BOOLEAN", {"default": False}),
|
||||
"LOOP_style_LIST": ("BOOLEAN", {"default": False}),
|
||||
"LOOP_SEQUENTIAL": ("BOOLEAN", {"default": False}),
|
||||
"jump": ("INT", {"default": 1, "min": 1, "max": 100, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING")
|
||||
RETURN_NAMES = (
|
||||
"selected_category",
|
||||
"selected_style_LIST",
|
||||
"recommended_with_selected_category",
|
||||
"random_LIST_with_selected_category"
|
||||
)
|
||||
OUTPUT_IS_LIST = (False, True, False, True)
|
||||
FUNCTION = "select_style"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def format_style(self, s, descriptor, model=None):
|
||||
"""Helper function to format a style string."""
|
||||
if model != "None" and model in MODELS:
|
||||
urn, link = MODELS[model]
|
||||
return f"{s} {descriptor};{model};{urn};{link}"
|
||||
return f"{s} {descriptor}"
|
||||
|
||||
def get_next_index(self, counter_file, jump, max_items):
|
||||
"""Get the next index from the counter file, stopping at max_items."""
|
||||
os.makedirs(os.path.dirname(counter_file), exist_ok=True)
|
||||
try:
|
||||
with open(counter_file, 'r') as f:
|
||||
current_index = int(f.read().strip())
|
||||
except (FileNotFoundError, ValueError):
|
||||
current_index = -jump # Start before the first index (e.g., -1 if jump=1)
|
||||
|
||||
next_index = current_index + jump
|
||||
if next_index >= max_items:
|
||||
raise ValueError(f"Counter has reached its limit of {max_items}. Reset counter to continue.")
|
||||
|
||||
with open(counter_file, 'w') as f:
|
||||
f.write(str(next_index))
|
||||
|
||||
return next_index
|
||||
|
||||
def select_style(self, category, style, seed, LOOP_random_LIST, LOOP_style_LIST, LOOP_SEQUENTIAL, jump, model=None):
|
||||
DESCRIPTORS = {
|
||||
"Painting": "painting",
|
||||
"Photography": "photography",
|
||||
"Illustration": "illustration",
|
||||
}
|
||||
rng = random.Random() if seed == -1 else random.Random(seed)
|
||||
descriptor = DESCRIPTORS.get(category, "")
|
||||
|
||||
# Get styles and models for the category
|
||||
styles = BRANCHES.get(category, [])
|
||||
models = BRANCHES_MODELS.get(category, [])
|
||||
|
||||
# selected_category: Single string
|
||||
selected_category = category
|
||||
|
||||
# selected_style_LIST: List based on loop mode
|
||||
if not styles:
|
||||
selected_style_LIST = ["No styles found."]
|
||||
elif LOOP_SEQUENTIAL and LOOP_style_LIST:
|
||||
# Sequential mode for styles
|
||||
max_styles = len(styles)
|
||||
next_index = self.get_next_index(STYLE_LIST_COUNTER_FILE, jump, max_styles)
|
||||
selected_style = styles[next_index]
|
||||
selected_style_LIST = [self.format_style(selected_style, descriptor, model)]
|
||||
elif LOOP_style_LIST:
|
||||
# Return full list of styles
|
||||
selected_style_LIST = [self.format_style(s, descriptor, model) for s in styles]
|
||||
else:
|
||||
# Default: single selected style
|
||||
selected_style_LIST = [self.format_style(style, descriptor, model)]
|
||||
|
||||
# recommended_with_selected_category: Single string based on input style
|
||||
if models:
|
||||
recommended_model = models[0][0] # First model's name
|
||||
recommended_with_selected_category = self.format_style(style, descriptor, recommended_model)
|
||||
else:
|
||||
recommended_with_selected_category = ""
|
||||
|
||||
# random_LIST_with_selected_category: List based on LOOP_random_LIST and LOOP_SEQUENTIAL
|
||||
if LOOP_SEQUENTIAL and LOOP_random_LIST:
|
||||
# Sequential mode for models
|
||||
if not models:
|
||||
random_LIST_with_selected_category = []
|
||||
else:
|
||||
max_models = len(models)
|
||||
next_index = self.get_next_index(MODEL_LIST_COUNTER_FILE, jump, max_models)
|
||||
selected_model = models[next_index][0]
|
||||
random_LIST_with_selected_category = [self.format_style(style, descriptor, selected_model)]
|
||||
elif LOOP_random_LIST:
|
||||
# Return list of selected style with all models
|
||||
random_LIST_with_selected_category = [self.format_style(style, descriptor, m[0]) for m in models] if models else []
|
||||
else:
|
||||
# Default: single random style and model
|
||||
if models:
|
||||
random_model = rng.choice(models)[0]
|
||||
random_style = rng.choice(styles)
|
||||
random_LIST_with_selected_category = [self.format_style(random_style, descriptor, random_model)]
|
||||
else:
|
||||
random_LIST_with_selected_category = []
|
||||
|
||||
return (
|
||||
selected_category,
|
||||
selected_style_LIST,
|
||||
recommended_with_selected_category,
|
||||
random_LIST_with_selected_category
|
||||
)
|
||||
|
||||
# API endpoints for counter management
|
||||
@PromptServer.instance.routes.post("/reset_style_list_counter")
|
||||
async def reset_style_list_counter(request):
|
||||
try:
|
||||
os.remove(STYLE_LIST_COUNTER_FILE)
|
||||
return web.json_response({"success": True}, status=200)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"success": True}, status=200)
|
||||
except Exception as e:
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
@PromptServer.instance.routes.post("/reset_model_list_counter")
|
||||
async def reset_model_list_counter(request):
|
||||
try:
|
||||
os.remove(MODEL_LIST_COUNTER_FILE)
|
||||
return web.json_response({"success": True}, status=200)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"success": True}, status=200)
|
||||
except Exception as e:
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
@PromptServer.instance.routes.post("/get_style_list_counter")
|
||||
async def get_style_list_counter(request):
|
||||
try:
|
||||
with open(STYLE_LIST_COUNTER_FILE, 'r') as f:
|
||||
current_index = int(f.read().strip())
|
||||
return web.json_response({"success": True, "value": current_index + 1}, status=200)
|
||||
except (FileNotFoundError, ValueError):
|
||||
return web.json_response({"success": True, "value": 0}, status=200)
|
||||
except Exception as e:
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
@PromptServer.instance.routes.post("/get_model_list_counter")
|
||||
async def get_model_list_counter(request):
|
||||
try:
|
||||
with open(MODEL_LIST_COUNTER_FILE, 'r') as f:
|
||||
current_index = int(f.read().strip())
|
||||
return web.json_response({"success": True, "value": current_index + 1}, status=200)
|
||||
except (FileNotFoundError, ValueError):
|
||||
return web.json_response({"success": True, "value": 0}, status=200)
|
||||
except Exception as e:
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# class StyleSelector:
|
||||
# @classmethod
|
||||
# def INPUT_TYPES(cls):
|
||||
# # Input configuration remains unchanged
|
||||
# ALL_STYLES = sorted(set(style for styles in BRANCHES.values() for style in styles))
|
||||
# ALL_MODELS = ["None"] + sorted(MODELS.keys())
|
||||
# return {
|
||||
# "required": {
|
||||
# "category": (CATEGORIES,),
|
||||
# "style": (ALL_STYLES,),
|
||||
# "model": (ALL_MODELS, {"default": "None"}),
|
||||
# "seed": ("INT", {"default": -1, "min": -1, "max": 0x7FFFFFFFFFFFFFFF}),
|
||||
# "LOOP_random_LIST": ("BOOLEAN", {"default": False}),
|
||||
# "LOOP_style_LIST": ("BOOLEAN", {"default": False}),
|
||||
# }
|
||||
# }
|
||||
|
||||
# RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING")
|
||||
# RETURN_NAMES = (
|
||||
# "selected_category",
|
||||
# "selected_style_LIST",
|
||||
# "recommended_with_selected_category",
|
||||
# "random_LIST_with_selected_category"
|
||||
# )
|
||||
# OUTPUT_IS_LIST = (False, True, False, True) # Updated: third element is now False
|
||||
# FUNCTION = "select_style"
|
||||
# CATEGORY = "Bjornulf"
|
||||
|
||||
# def select_style(self, category, style, seed, LOOP_random_LIST, LOOP_style_LIST, model=None):
|
||||
# DESCRIPTORS = {
|
||||
# "Painting": "painting",
|
||||
# "Photography": "photography",
|
||||
# "Illustration": "illustration",
|
||||
# }
|
||||
# rng = random.Random() if seed == -1 else random.Random(seed)
|
||||
# descriptor = DESCRIPTORS.get(category, "")
|
||||
|
||||
# # selected_category: Single string (unchanged)
|
||||
# selected_category = category
|
||||
|
||||
# # selected_style_LIST: List (unchanged)
|
||||
# if LOOP_style_LIST:
|
||||
# styles = BRANCHES.get(category, [])
|
||||
# if model != "None" and model in MODELS:
|
||||
# urn, link = MODELS[model]
|
||||
# selected_style_LIST = [
|
||||
# f"{s} {descriptor};{model};{urn};{link}" for s in styles
|
||||
# ]
|
||||
# else:
|
||||
# selected_style_LIST = [f"{s} {descriptor}" for s in styles]
|
||||
# else:
|
||||
# if model != "None" and model in MODELS:
|
||||
# urn, link = MODELS[model]
|
||||
# selected_style_LIST = [f"{style} {descriptor};{model};{urn};{link}"]
|
||||
# else:
|
||||
# selected_style_LIST = [f"{style} {descriptor}"]
|
||||
|
||||
# # recommended_with_selected_category: Now a single string
|
||||
# if category in BRANCHES_MODELS and BRANCHES_MODELS[category]:
|
||||
# recommended_model = BRANCHES_MODELS[category][0] # First model
|
||||
# recommended_with_selected_category = f"{style} {descriptor};{recommended_model[0]};{recommended_model[1]};{recommended_model[2]}"
|
||||
# else:
|
||||
# recommended_with_selected_category = ""
|
||||
|
||||
# # random_LIST_with_selected_category: List (unchanged)
|
||||
# if LOOP_random_LIST:
|
||||
# if category in BRANCHES_MODELS and BRANCHES_MODELS[category]:
|
||||
# models = BRANCHES_MODELS[category]
|
||||
# random_LIST_with_selected_category = [
|
||||
# f"{style} {descriptor};{m[0]};{m[1]};{m[2]}" for m in models
|
||||
# ]
|
||||
# else:
|
||||
# random_LIST_with_selected_category = []
|
||||
# else:
|
||||
# if category in BRANCHES_MODELS and BRANCHES_MODELS[category]:
|
||||
# random_model = rng.choice(BRANCHES_MODELS[category])
|
||||
# random_style_base = rng.choice(BRANCHES[category])
|
||||
# random_style = f"{random_style_base} {descriptor}" if descriptor else random_style_base
|
||||
# random_LIST_with_selected_category = [
|
||||
# f"{random_style};{random_model[0]};{random_model[1]};{random_model[2]}"
|
||||
# ]
|
||||
# else:
|
||||
# random_LIST_with_selected_category = []
|
||||
|
||||
# return (
|
||||
# selected_category,
|
||||
# selected_style_LIST,
|
||||
# recommended_with_selected_category,
|
||||
# random_LIST_with_selected_category
|
||||
# )
|
||||
27
switches.py
@@ -45,4 +45,29 @@ class SwitchText:
|
||||
if switch:
|
||||
return (STRING,)
|
||||
else:
|
||||
return ("",)
|
||||
return ("",)
|
||||
|
||||
class ConditionalSwitch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"condition": ("BOOLEAN", {"default": False}), # True or False to pick the path
|
||||
},
|
||||
"optional": {
|
||||
"input_data": (Everything("*"), {"forceInput": True}), # Passthrough any data type
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = (Everything("*"), Everything("*")) # Two outputs, dynamically typed
|
||||
RETURN_NAMES = ("TRUE", "FALSE") # Named outputs for clarity
|
||||
FUNCTION = "switch"
|
||||
CATEGORY = "Utilities"
|
||||
|
||||
def switch(self, condition, input_data=None):
|
||||
if condition:
|
||||
# If condition is True, send data to TRUE output, FALSE gets None
|
||||
return (input_data, None)
|
||||
else:
|
||||
# If condition is False, send data to FALSE output, TRUE gets None
|
||||
return (None, input_data)
|
||||
112
text_analyzer.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
# Load the spaCy model once globally to keep it lightweight
|
||||
def ensure_model(model_name="en_core_web_sm"):
|
||||
try:
|
||||
# Try to load the model
|
||||
import spacy
|
||||
spacy.load(model_name)
|
||||
except OSError:
|
||||
# If the model isn't found, download it
|
||||
print(f"Model '{model_name}' not found. Downloading now...")
|
||||
subprocess.check_call([sys.executable, "-m", "spacy", "download", model_name])
|
||||
print(f"Model '{model_name}' downloaded successfully.")
|
||||
|
||||
class TextAnalyzer:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"text": ("STRING", {"multiline": True})
|
||||
}
|
||||
}
|
||||
|
||||
# Define the output types and their names
|
||||
RETURN_TYPES = ("INT", "INT", "INT", "STRING", "STRING", "FLOAT", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING")
|
||||
RETURN_NAMES = ("number_lines", "number_words", "number_characters", "language", "sentiment", "sentiment_polarity", "type", "character", "sentence", "subject", "action", "target")
|
||||
|
||||
FUNCTION = "analyze"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def analyze(self, text):
|
||||
from langdetect import detect
|
||||
from textblob import TextBlob
|
||||
import spacy
|
||||
|
||||
ensure_model()
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
# **Statistics**
|
||||
# Count lines by splitting on newline characters
|
||||
lines = len(text.split('\n'))
|
||||
# Count words by splitting on whitespace
|
||||
words = len(text.split())
|
||||
# Count total characters including spaces and punctuation
|
||||
characters = len(text)
|
||||
|
||||
# **Dialog or Description Detection**
|
||||
# Check if the text starts with a name followed by a colon (e.g., "Jessica:")
|
||||
dialog_match = re.match(r'^([A-Za-z]+):', text)
|
||||
if dialog_match:
|
||||
type_ = 'dialog'
|
||||
character = dialog_match.group(1) # Extract the character name
|
||||
spoken_text = text[dialog_match.end():].strip() # Text after the colon
|
||||
else:
|
||||
type_ = 'description'
|
||||
character = None
|
||||
spoken_text = text
|
||||
|
||||
# **Language Detection**
|
||||
try:
|
||||
language = detect(spoken_text)
|
||||
except:
|
||||
language = 'unknown'
|
||||
|
||||
# **Sentiment Analysis**
|
||||
blob = TextBlob(spoken_text)
|
||||
polarity = blob.sentiment.polarity
|
||||
if polarity > 0:
|
||||
sentiment = 'positive'
|
||||
elif polarity < 0:
|
||||
sentiment = 'negative'
|
||||
else:
|
||||
sentiment = 'neutral'
|
||||
|
||||
# **Subject, Action, Target Extraction**
|
||||
# Only perform NLP if the language is English (for spaCy compatibility)
|
||||
if language == 'en':
|
||||
doc = nlp(spoken_text)
|
||||
action = None
|
||||
subject = None
|
||||
target = None
|
||||
# Look for the main verb (action) and its subject and object
|
||||
for token in doc:
|
||||
if token.dep_ == 'ROOT' and token.pos_ == 'VERB':
|
||||
action = token.text
|
||||
subject_tokens = [w for w in token.children if w.dep_ == 'nsubj']
|
||||
target_tokens = [w for w in token.children if w.dep_ == 'dobj']
|
||||
if subject_tokens:
|
||||
subject = subject_tokens[0].text
|
||||
if target_tokens:
|
||||
target = target_tokens[0].text
|
||||
break
|
||||
else:
|
||||
subject, action, target = None, None, None
|
||||
|
||||
# **Return Results**
|
||||
# Convert None to empty strings for ComfyUI compatibility
|
||||
return (
|
||||
lines,
|
||||
words,
|
||||
characters,
|
||||
language,
|
||||
sentiment,
|
||||
polarity,
|
||||
type_,
|
||||
character or "",
|
||||
spoken_text or "",
|
||||
subject or "",
|
||||
action or "",
|
||||
target or ""
|
||||
)
|
||||
@@ -1638,7 +1638,8 @@ class TextGeneratorOutfitFemale:
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_OUTFIT",)
|
||||
RETURN_TYPES = ("GEN_OUTFIT,STRING",)
|
||||
RETURN_NAMES = ("GEN_OUTFIT",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -1796,7 +1797,8 @@ class TextGeneratorOutfitMale:
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_OUTFIT",)
|
||||
RETURN_TYPES = ("GEN_OUTFIT,STRING",)
|
||||
RETURN_NAMES = ("GEN_OUTFIT",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -1976,13 +1978,14 @@ class TextGeneratorCharacterFemale:
|
||||
"CUSTOM_PROMPT": ("STRING", {"multiline": True, "default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"add_GEN_CHARACTER": ("GEN_CHARACTER",),
|
||||
"GEN_OUTFIT": ("GEN_OUTFIT",),
|
||||
"GEN_POSE": ("GEN_POSE",),
|
||||
"add_GEN_CHARACTER": ("GEN_CHARACTER,STRING",),
|
||||
"GEN_OUTFIT": ("GEN_OUTFIT,STRING",),
|
||||
"GEN_POSE": ("GEN_POSE,STRING",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_CHARACTER",)
|
||||
RETURN_TYPES = ("GEN_CHARACTER,STRING",)
|
||||
RETURN_NAMES = ("GEN_CHARACTER",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -2120,13 +2123,14 @@ class TextGeneratorCharacterMale:
|
||||
"CUSTOM_PROMPT": ("STRING", {"multiline": True, "default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"add_GEN_CHARACTER": ("GEN_CHARACTER",),
|
||||
"GEN_OUTFIT": ("GEN_OUTFIT",),
|
||||
"GEN_POSE": ("GEN_POSE",),
|
||||
"add_GEN_CHARACTER": ("GEN_CHARACTER,STRING",),
|
||||
"GEN_OUTFIT": ("GEN_OUTFIT,STRING",),
|
||||
"GEN_POSE": ("GEN_POSE,STRING",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_CHARACTER",)
|
||||
RETURN_TYPES = ("GEN_CHARACTER,STRING",)
|
||||
RETURN_NAMES = ("GEN_CHARACTER",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -2253,7 +2257,8 @@ class TextGeneratorStyle:
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_STYLE",)
|
||||
RETURN_TYPES = ("GEN_STYLE,STRING",)
|
||||
RETURN_NAMES = ("GEN_STYLE",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -2316,7 +2321,8 @@ class TextGeneratorScene:
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_SCENE",)
|
||||
RETURN_TYPES = ("GEN_SCENE,STRING",)
|
||||
RETURN_NAMES = ("GEN_SCENE",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -2373,9 +2379,9 @@ class TextGenerator:
|
||||
"CUSTOM_PROMPT": ("STRING", {"multiline": True, "default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"GEN_STYLE": ("GEN_STYLE",),
|
||||
"GEN_CHARACTER": ("GEN_CHARACTER",),
|
||||
"GEN_SCENE": ("GEN_SCENE",),
|
||||
"GEN_STYLE": ("GEN_STYLE,STRING",),
|
||||
"GEN_CHARACTER": ("GEN_CHARACTER,STRING",),
|
||||
"GEN_SCENE": ("GEN_SCENE,STRING",),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2889,12 +2895,13 @@ class TextGeneratorCharacterPose:
|
||||
"CUSTOM_PROMPT": ("STRING", {"multiline": True, "default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"GEN_OBJECT": ("GEN_OBJECT",),
|
||||
"add_GEN_POSE": ("GEN_POSE",),
|
||||
"GEN_OBJECT": ("GEN_OBJECT,STRING",),
|
||||
"add_GEN_POSE": ("GEN_POSE,STRING",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_POSE",)
|
||||
RETURN_TYPES = ("GEN_POSE,STRING",)
|
||||
RETURN_NAMES = ("GEN_POSE",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -2968,11 +2975,12 @@ class TextGeneratorCharacterObject:
|
||||
"CUSTOM_PROMPT_SUFFIX": ("STRING", {"default": "", "multiline": True}),
|
||||
},
|
||||
"optional": {
|
||||
"add_GEN_OBJECT": ("GEN_OBJECT",),
|
||||
"add_GEN_OBJECT": ("GEN_OBJECT,STRING",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_OBJECT",)
|
||||
RETURN_TYPES = ("GEN_OBJECT,STRING",)
|
||||
RETURN_NAMES = ("GEN_OBJECT",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
@@ -3017,11 +3025,12 @@ class TextGeneratorCharacterCreature:
|
||||
"CUSTOM_PROMPT": ("STRING", {"multiline": True, "default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"add_GEN_CHARACTER": ("GEN_CHARACTER",),
|
||||
"add_GEN_CHARACTER": ("GEN_CHARACTER,STRING",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("GEN_CHARACTER",)
|
||||
RETURN_TYPES = ("GEN_CHARACTER,STRING",)
|
||||
RETURN_NAMES = ("GEN_CHARACTER",)
|
||||
FUNCTION = "generate"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
|
||||
@@ -98,21 +98,25 @@ class TextToSpeech:
|
||||
|
||||
try:
|
||||
response = requests.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
response.raise_for_status() # Raises an HTTPError for 4xx/5xx status codes
|
||||
|
||||
audio_data = io.BytesIO()
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
audio_data.write(chunk)
|
||||
|
||||
audio_data.seek(0)
|
||||
# Check if the audio data is empty
|
||||
if audio_data.getbuffer().nbytes == 0:
|
||||
raise ValueError("Received empty audio data from server")
|
||||
|
||||
return audio_data
|
||||
|
||||
except requests.RequestException as e:
|
||||
print(f"Error generating audio: {e}")
|
||||
return io.BytesIO()
|
||||
raise # Re-raise the exception to stop the workflow
|
||||
except Exception as e:
|
||||
print(f"Unexpected error: {e}")
|
||||
return io.BytesIO()
|
||||
raise # Re-raise any other unexpected exceptions
|
||||
|
||||
def play_audio(self, audio: AudioSegment) -> None:
|
||||
if sys.platform.startswith('win'):
|
||||
@@ -195,28 +199,6 @@ class TextToSpeech:
|
||||
audio_output, _, duration = self.process_audio_data(autoplay, audio_data, full_path if save_audio else None)
|
||||
return (audio_output, save_path, full_path, duration)
|
||||
|
||||
# GET VOICE FROM TTS SERVER (Not good for now)
|
||||
# @PromptServer.instance.routes.post("/bjornulf_TTS_get_voices")
|
||||
# async def get_voices(request):
|
||||
# try:
|
||||
# data = await request.json()
|
||||
# TTS_url = data.get('url', 'http://localhost:8020')
|
||||
|
||||
# # Use requests instead of client_session
|
||||
# response = requests.get(f"{TTS_url}/speakers")
|
||||
# response.raise_for_status() # Raise an exception for bad status codes
|
||||
# voices = response.json()
|
||||
|
||||
# # Transform the response to just get the voice_ids
|
||||
# voice_ids = [voice["voice_id"] for voice in voices]
|
||||
# return web.json_response({"voices": voice_ids})
|
||||
# except requests.RequestException as e:
|
||||
# print(f"Error fetching voices: {str(e)}")
|
||||
# return web.json_response({"error": str(e)}, status=500)
|
||||
# except Exception as e:
|
||||
# print(f"Unexpected error: {str(e)}")
|
||||
# return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
# Scan folder
|
||||
@PromptServer.instance.routes.post("/bjornulf_TTS_get_voices")
|
||||
async def get_voices(request):
|
||||
|
||||
@@ -9,6 +9,6 @@ class TextToVariable:
|
||||
CATEGORY = "Custom"
|
||||
|
||||
def process(self, variable_name, text_value):
|
||||
text_value = text_value.replace("\n", ";")
|
||||
text_value = text_value.replace("\n", "")
|
||||
output_string = f"{variable_name} = {text_value}"
|
||||
return (output_string,)
|
||||
@@ -3,13 +3,17 @@ import os
|
||||
import shutil
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
import glob
|
||||
import subprocess
|
||||
|
||||
class VideoPingPong:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"optional": {
|
||||
"images": ("IMAGE",),
|
||||
"video_path": ("STRING", {"default": ""}),
|
||||
"use_python_ffmpeg": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -18,56 +22,75 @@ class VideoPingPong:
|
||||
FUNCTION = "pingpong_images"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def pingpong_images(self, images):
|
||||
# Create a clean folder to store the images
|
||||
def extract_frames(self, video_path, temp_dir, use_python_ffmpeg):
|
||||
"""Extract frames from a video file using FFmpeg, preserving original settings."""
|
||||
if use_python_ffmpeg:
|
||||
try:
|
||||
import ffmpeg
|
||||
(
|
||||
ffmpeg
|
||||
.input(video_path)
|
||||
.output(os.path.join(temp_dir, 'frame_%04d.png'), start_number=0)
|
||||
.run()
|
||||
)
|
||||
except ImportError:
|
||||
raise RuntimeError("ffmpeg-python is not installed. Please install it or set use_python_ffmpeg to False.")
|
||||
except ffmpeg.Error as e:
|
||||
raise RuntimeError(f"Failed to extract frames using ffmpeg-python: {e}")
|
||||
else:
|
||||
ffmpeg_cmd = ['ffmpeg', '-i', video_path, '-start_number', '0', os.path.join(temp_dir, 'frame_%04d.png')]
|
||||
try:
|
||||
subprocess.run(ffmpeg_cmd, check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(f"Failed to extract frames using FFmpeg: {e}")
|
||||
|
||||
def pingpong_images(self, images=None, video_path="", use_python_ffmpeg=False):
|
||||
"""Generate a ping-pong sequence from images or a video file, prioritizing images if provided."""
|
||||
temp_dir = "temp_pingpong"
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
os.makedirs(temp_dir)
|
||||
|
||||
try:
|
||||
# Save each image in the temporary directory
|
||||
num_images = images.shape[0]
|
||||
for i in range(num_images):
|
||||
img_tensor = images[i]
|
||||
img_pil = Image.fromarray((img_tensor.cpu().numpy() * 255).astype('uint8'))
|
||||
img_path = os.path.join(temp_dir, f"image_{i:04d}.png")
|
||||
img_pil.save(img_path)
|
||||
if images is not None:
|
||||
num_frames = images.shape[0]
|
||||
for i in range(num_frames):
|
||||
img_tensor = images[i]
|
||||
img_pil = Image.fromarray((img_tensor.cpu().numpy() * 255).astype('uint8'))
|
||||
img_path = os.path.join(temp_dir, f"frame_{i:04d}.png")
|
||||
img_pil.save(img_path)
|
||||
elif video_path and os.path.exists(video_path):
|
||||
self.extract_frames(video_path, temp_dir, use_python_ffmpeg)
|
||||
else:
|
||||
raise ValueError("Either images or a valid video_path must be provided")
|
||||
|
||||
# Create the pingpong sequence
|
||||
pingpong_list = list(range(num_images)) + list(range(num_images - 2, 0, -1))
|
||||
frame_files = sorted(glob.glob(os.path.join(temp_dir, "frame_*.png")))
|
||||
num_frames = len(frame_files)
|
||||
if num_frames == 0:
|
||||
raise RuntimeError("No frames available to process")
|
||||
|
||||
pingpong_list = list(range(num_frames)) + list(range(num_frames - 2, 0, -1))
|
||||
|
||||
# Process images in batches
|
||||
batch_size = 10
|
||||
pingpong_tensors = []
|
||||
|
||||
for i in range(0, len(pingpong_list), batch_size):
|
||||
batch = pingpong_list[i:i+batch_size]
|
||||
batch = pingpong_list[i:i + batch_size]
|
||||
batch_tensors = []
|
||||
|
||||
for j in batch:
|
||||
img_path = os.path.join(temp_dir, f"image_{j:04d}.png")
|
||||
img_path = os.path.join(temp_dir, f"frame_{j:04d}.png")
|
||||
img_pil = Image.open(img_path)
|
||||
img_np = np.array(img_pil).astype(np.float32) / 255.0
|
||||
img_tensor = torch.from_numpy(img_np)
|
||||
batch_tensors.append(img_tensor)
|
||||
|
||||
# Close the image to free up memory
|
||||
img_pil.close()
|
||||
|
||||
# Stack the batch tensors
|
||||
batch_tensor = torch.stack(batch_tensors)
|
||||
pingpong_tensors.append(batch_tensor)
|
||||
|
||||
# Clear unnecessary variables
|
||||
del batch_tensors
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Concatenate all batches
|
||||
pingpong_tensor = torch.cat(pingpong_tensors, dim=0)
|
||||
|
||||
finally:
|
||||
# Clean up the temporary directory
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
return (pingpong_tensor,)
|
||||
113
video_preview.py
@@ -3,18 +3,28 @@ import shutil
|
||||
import time
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
import numpy as np
|
||||
import cv2
|
||||
import tempfile
|
||||
|
||||
SUPPORTED_EXTENSIONS = {'.mp4', '.webm', '.ogg', '.mov', '.mkv'}
|
||||
# Supported extensions for video inputs
|
||||
SUPPORTED_VIDEO_EXTENSIONS = {'.mp4', '.webm', '.ogg', '.mov', '.mkv'}
|
||||
|
||||
class VideoPreview:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"video_path": ("STRING", {"forceInput": True}),
|
||||
"fps_for_IMAGES": ("FLOAT", {"default": 24.0, "min": 1.0, "max": 60.0}),
|
||||
"autoplay": ("BOOLEAN", {"default": False}),
|
||||
"mute": ("BOOLEAN", {"default": True}),
|
||||
"loop": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
"optional": {
|
||||
"video_path": ("STRING", {"forceInput": True, "default": ""}),
|
||||
"IMAGES": ("IMAGE", {"default": None}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ()
|
||||
@@ -22,31 +32,84 @@ class VideoPreview:
|
||||
CATEGORY = "Bjornulf"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def preview_video(self, video_path, autoplay, mute):
|
||||
def preview_video(self, fps_for_IMAGES, autoplay, mute, loop, video_path="", IMAGES=None):
|
||||
try:
|
||||
if not video_path or not isinstance(video_path, str):
|
||||
raise ValueError("Invalid video path provided")
|
||||
|
||||
video_path = os.path.abspath(video_path)
|
||||
if not os.path.exists(video_path):
|
||||
raise FileNotFoundError(f"Video file not found: {video_path}")
|
||||
|
||||
ext = Path(video_path).suffix.lower()
|
||||
if ext not in SUPPORTED_EXTENSIONS:
|
||||
raise ValueError(f"Unsupported video format: {ext}. Supported formats: {', '.join(SUPPORTED_EXTENSIONS)}")
|
||||
|
||||
# Destination directory for preview videos
|
||||
dest_dir = os.path.join("output", "Bjornulf", "preview_video")
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
|
||||
file_hash = hashlib.md5(open(video_path,'rb').read()).hexdigest()[:8]
|
||||
timestamp = int(time.time())
|
||||
base_name = Path(video_path).stem
|
||||
dest_name = f"{base_name}_{timestamp}_{file_hash}{ext}"
|
||||
dest_path = os.path.join(dest_dir, dest_name)
|
||||
# Determine which input is provided
|
||||
if video_path and isinstance(video_path, str) and video_path.strip():
|
||||
video_path = os.path.abspath(video_path)
|
||||
if not os.path.exists(video_path):
|
||||
raise FileNotFoundError(f"Video file not found: {video_path}")
|
||||
|
||||
if not os.path.exists(dest_path):
|
||||
shutil.copy2(video_path, dest_path)
|
||||
ext = Path(video_path).suffix.lower() # e.g., '.mp4', '.webm'
|
||||
if ext not in SUPPORTED_VIDEO_EXTENSIONS:
|
||||
raise ValueError(f"Unsupported video format: {ext}. Supported formats: {', '.join(SUPPORTED_VIDEO_EXTENSIONS)}")
|
||||
|
||||
final_video_path = video_path
|
||||
|
||||
# Generate unique filename with original extension
|
||||
file_hash = hashlib.md5(open(final_video_path, 'rb').read()).hexdigest()[:8]
|
||||
timestamp = int(time.time())
|
||||
base_name = "video_preview" # More descriptive than "image_sequence"
|
||||
dest_name = f"{base_name}_{timestamp}_{file_hash}{ext}" # Keeps original extension
|
||||
dest_path = os.path.join(dest_dir, dest_name)
|
||||
|
||||
shutil.copy2(final_video_path, dest_path)
|
||||
print(f"Video copied to: {dest_path}")
|
||||
|
||||
elif IMAGES is not None and len(IMAGES) > 0:
|
||||
# Use a unique temporary directory for this run
|
||||
with tempfile.TemporaryDirectory(prefix="bjornulf_temp_video_") as temp_dir:
|
||||
# Convert image tensors to files in the unique temp directory
|
||||
image_files = []
|
||||
for i, img_tensor in enumerate(IMAGES):
|
||||
# Convert tensor (H, W, C) in range [0, 1] to numpy array in range [0, 255]
|
||||
img_np = (img_tensor.numpy() * 255).astype(np.uint8)
|
||||
# Ensure RGB format for OpenCV (ComfyUI IMAGES are typically RGB)
|
||||
img_path = os.path.join(temp_dir, f"frame_{i:04d}.png")
|
||||
cv2.imwrite(img_path, cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR))
|
||||
image_files.append(img_path)
|
||||
|
||||
if not image_files:
|
||||
raise ValueError("No valid IMAGES provided to create a video.")
|
||||
|
||||
# Create temporary video using FFmpeg
|
||||
output_video = os.path.join(temp_dir, "temp_video.mp4")
|
||||
pattern = os.path.join(temp_dir, "frame_%04d.png")
|
||||
cmd = [
|
||||
"ffmpeg",
|
||||
"-framerate", str(fps_for_IMAGES),
|
||||
"-i", pattern,
|
||||
"-c:v", "libx264",
|
||||
"-pix_fmt", "yuv420p",
|
||||
"-y", # Overwrite output file if it exists
|
||||
output_video
|
||||
]
|
||||
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
if not os.path.exists(output_video):
|
||||
raise RuntimeError("Failed to create temporary video from IMAGES.")
|
||||
|
||||
final_video_path = output_video
|
||||
|
||||
# Generate unique destination filename
|
||||
file_hash = hashlib.md5(open(final_video_path, 'rb').read()).hexdigest()[:8]
|
||||
timestamp = int(time.time())
|
||||
base_name = "image_sequence"
|
||||
dest_name = f"{base_name}_{timestamp}_{file_hash}.mp4"
|
||||
dest_path = os.path.join(dest_dir, dest_name)
|
||||
|
||||
# Copy the video to the preview directory
|
||||
if not os.path.exists(dest_path):
|
||||
shutil.copy2(final_video_path, dest_path)
|
||||
|
||||
else:
|
||||
raise ValueError("Either 'video_path' or 'IMAGES' must be provided.")
|
||||
|
||||
# Successful return with video data
|
||||
return {
|
||||
"ui": {
|
||||
"video": [dest_name, "Bjornulf/preview_video"],
|
||||
@@ -54,15 +117,17 @@ class VideoPreview:
|
||||
"width": 512,
|
||||
"height": 512,
|
||||
"autoplay": autoplay,
|
||||
"mute": mute
|
||||
"mute": mute,
|
||||
"loop": loop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
# Error case: return an empty list for "video" to prevent iteration error
|
||||
return {
|
||||
"ui": {
|
||||
"error": str(e),
|
||||
"video": None
|
||||
"video": [], # Changed from None to []
|
||||
"error": str(e)
|
||||
}
|
||||
}
|
||||
@@ -26,4 +26,52 @@ app.registerExtension({
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.LoadCivitAILinks",
|
||||
async nodeCreated(node) {
|
||||
if (node.comfyClass === "Bjornulf_LoadCivitAILinks") {
|
||||
// Add a refresh button widget
|
||||
const refreshButton = node.addWidget(
|
||||
"button",
|
||||
"Refresh File List",
|
||||
null,
|
||||
() => {
|
||||
fetch("/get_civitai_links_files", {
|
||||
method: "POST",
|
||||
})
|
||||
.then((response) => response.json())
|
||||
.then((data) => {
|
||||
if (data.success) {
|
||||
// Update the dropdown with the new file list
|
||||
const dropdownWidget = node.widgets.find(w => w.name === "selected_file");
|
||||
if (dropdownWidget) {
|
||||
dropdownWidget.options.values = ["Not selected", ...data.files];
|
||||
dropdownWidget.value = "Not selected";
|
||||
app.ui.dialog.show(
|
||||
"[LoadCivitAILinks] File list refreshed successfully!"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
app.ui.dialog.show(
|
||||
`[LoadCivitAILinks] Failed to refresh file list: ${
|
||||
data.error || "Unknown error"
|
||||
}`
|
||||
);
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error(
|
||||
"[LoadCivitAILinks] Error fetching links files:",
|
||||
error
|
||||
);
|
||||
app.ui.dialog.show(
|
||||
"[LoadCivitAILinks] An error occurred while refreshing the file list."
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
81
web/js/audio_preview.js
Normal file
@@ -0,0 +1,81 @@
|
||||
import { api } from '../../../scripts/api.js';
|
||||
import { app } from "../../../scripts/app.js";
|
||||
|
||||
// Function to display the audio preview
|
||||
function displayAudioPreview(component, filename, category, autoplay, mute, loop) {
|
||||
let audioWidget = component._audioWidget;
|
||||
|
||||
// Create the audio widget if it doesn't exist
|
||||
if (!audioWidget) {
|
||||
const container = document.createElement("div");
|
||||
|
||||
// Add the DOM widget to the component
|
||||
audioWidget = component.addDOMWidget("Bjornulf", "preview", container, {
|
||||
serialize: false,
|
||||
hideOnZoom: false,
|
||||
getValue() {
|
||||
return container.value;
|
||||
},
|
||||
setValue(v) {
|
||||
container.value = v;
|
||||
},
|
||||
});
|
||||
|
||||
// Initialize widget properties
|
||||
audioWidget.value = { hidden: false, paused: false, params: {} };
|
||||
audioWidget.parentElement = document.createElement("div");
|
||||
audioWidget.parentElement.className = "audio_preview";
|
||||
audioWidget.parentElement.style.width = "100%";
|
||||
audioWidget.parentElement.style.marginBottom = "50px";
|
||||
container.appendChild(audioWidget.parentElement);
|
||||
|
||||
// Create the audio element
|
||||
audioWidget.audioElement = document.createElement("audio");
|
||||
audioWidget.audioElement.controls = true;
|
||||
audioWidget.audioElement.style.width = "100%";
|
||||
|
||||
// Hide the audio player on error
|
||||
audioWidget.audioElement.addEventListener("error", () => {
|
||||
audioWidget.parentElement.hidden = true;
|
||||
});
|
||||
|
||||
audioWidget.parentElement.hidden = audioWidget.value.hidden;
|
||||
audioWidget.parentElement.appendChild(audioWidget.audioElement);
|
||||
component._audioWidget = audioWidget; // Store for reuse
|
||||
}
|
||||
|
||||
// Set audio source and properties
|
||||
const params = {
|
||||
"filename": filename,
|
||||
"subfolder": category,
|
||||
"type": "temp",
|
||||
"rand": Math.random().toString().slice(2, 12) // Cache-busting random parameter
|
||||
};
|
||||
|
||||
const urlParams = new URLSearchParams(params);
|
||||
audioWidget.audioElement.src = `api/view?${urlParams.toString()}`;
|
||||
audioWidget.audioElement.autoplay = autoplay && !audioWidget.value.paused && !audioWidget.value.hidden;
|
||||
audioWidget.audioElement.loop = loop;
|
||||
}
|
||||
|
||||
// Register the extension
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.AudioPreview",
|
||||
async beforeRegisterNodeDef(nodeType, nodeData, appInstance) {
|
||||
if (nodeData?.name === "Bjornulf_AudioPreview") {
|
||||
// Define behavior when the node executes
|
||||
nodeType.prototype.onExecuted = function(data) {
|
||||
const autoplay = this.widgets.find(w => w.name === "autoplay")?.value ?? false;
|
||||
const loop = this.widgets.find(w => w.name === "loop")?.value ?? false;
|
||||
displayAudioPreview(this, data.audio[0], data.audio[1], autoplay, loop);
|
||||
};
|
||||
|
||||
// Override computeSize to set a fixed height
|
||||
nodeType.prototype.computeSize = function() {
|
||||
const size = LiteGraph.LGraphNode.prototype.computeSize.call(this);
|
||||
size[1] = 150; // Fixed height of 150px
|
||||
return size;
|
||||
};
|
||||
}
|
||||
}
|
||||
});
|
||||
53
web/js/fix_face.js
Normal file
@@ -0,0 +1,53 @@
|
||||
import { app } from "../../../scripts/app.js";
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.FixFace",
|
||||
async nodeCreated(node) {
|
||||
if (node.comfyClass === "Bjornulf_FixFace") {
|
||||
const updateInputs = () => {
|
||||
const initialWidth = node.size[0];
|
||||
const numFacesWidget = node.widgets.find(w => w.name === "number_of_faces");
|
||||
if (!numFacesWidget) return;
|
||||
|
||||
const numFaces = numFacesWidget.value;
|
||||
|
||||
// Initialize node.inputs if it doesn't exist
|
||||
if (!node.inputs) {
|
||||
node.inputs = [];
|
||||
}
|
||||
|
||||
// Filter existing FACE_SETTINGS inputs
|
||||
const existingInputs = node.inputs.filter(input => input.name.startsWith('FACE_SETTINGS_'));
|
||||
|
||||
// Add or remove inputs based on number_of_faces
|
||||
if (existingInputs.length < numFaces) {
|
||||
for (let i = existingInputs.length + 1; i <= numFaces; i++) {
|
||||
const inputName = `FACE_SETTINGS_${i}`;
|
||||
if (!node.inputs.find(input => input.name === inputName)) {
|
||||
node.addInput(inputName, "FACE_SETTINGS");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
node.inputs = node.inputs.filter(input => !input.name.startsWith('FACE_SETTINGS_') ||
|
||||
parseInt(input.name.split('_')[2]) <= numFaces);
|
||||
}
|
||||
|
||||
node.setSize(node.computeSize());
|
||||
node.size[0] = initialWidth; // Keep width fixed
|
||||
};
|
||||
|
||||
// Move number_of_faces widget to the top and set callback
|
||||
const numFacesWidget = node.widgets.find(w => w.name === "number_of_faces");
|
||||
if (numFacesWidget) {
|
||||
node.widgets = [numFacesWidget, ...node.widgets.filter(w => w !== numFacesWidget)];
|
||||
numFacesWidget.callback = () => {
|
||||
updateInputs();
|
||||
app.graph.setDirtyCanvas(true);
|
||||
};
|
||||
}
|
||||
|
||||
// Initial update after node creation
|
||||
setTimeout(updateInputs, 0);
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -1,122 +1,135 @@
|
||||
import { app } from "../../../scripts/app.js";
|
||||
import { api } from "../../../scripts/api.js";
|
||||
|
||||
// Helper function to construct image URLs for ComfyUI
|
||||
function imageDataToUrl(data) {
|
||||
return api.apiURL(
|
||||
`/view?filename=${encodeURIComponent(data.filename)}&type=${data.type}&subfolder=${data.subfolder}${app.getPreviewFormatParam()}`
|
||||
`/view?filename=${encodeURIComponent(data.filename)}&type=${
|
||||
data.type
|
||||
}&subfolder=${data.subfolder}` + app.getPreviewFormatParam()
|
||||
);
|
||||
}
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.FourImageViewer",
|
||||
async nodeCreated(node) {
|
||||
// Ensure this applies only to the specific node type
|
||||
if (node.comfyClass !== "Bjornulf_FourImageViewer") return;
|
||||
|
||||
const marginTop = 90;
|
||||
const verticalOffset = -30;
|
||||
const minSize = 256; // Minimum size for the node
|
||||
const maxSize = 2048; // Maximum size for the node
|
||||
node.size = [512, 512 + marginTop];
|
||||
node.images = new Array(4).fill(null);
|
||||
const padding = 10;
|
||||
// Constants for layout and sizing
|
||||
const marginTop = 90; // Space at the top for node UI elements
|
||||
const verticalOffset = -30; // Adjustment for canvas positioning
|
||||
const minSize = 256; // Minimum size of the node
|
||||
const maxSize = 2048; // Maximum size of the node
|
||||
const padding = 10; // Padding around the image area
|
||||
|
||||
// Add resize handles
|
||||
// Initialize node properties
|
||||
node.size = [512, 512 + marginTop];
|
||||
node.data = {
|
||||
images: new Array(4).fill(null), // Array to hold up to 4 images
|
||||
sliderX: null, // X position of the vertical slider
|
||||
sliderY: null, // Y position of the horizontal slider
|
||||
};
|
||||
|
||||
// Enable resizing
|
||||
node.flags |= LiteGraph.RESIZABLE;
|
||||
|
||||
node.onResize = function(size) {
|
||||
// Ensure square aspect ratio (excluding marginTop)
|
||||
const minDimension = Math.max(minSize, Math.min(size[0], size[1] - marginTop));
|
||||
// Handle resizing to maintain square aspect ratio
|
||||
node.onResize = function (size) {
|
||||
const minDimension = Math.max(
|
||||
minSize,
|
||||
Math.min(size[0], size[1] - marginTop)
|
||||
);
|
||||
const maxDimension = Math.min(maxSize, minDimension);
|
||||
size[0] = maxDimension;
|
||||
size[1] = maxDimension + marginTop;
|
||||
|
||||
// Update slider positions proportionally
|
||||
|
||||
const fullImgWidth = size[0] - 3 * padding;
|
||||
const fullImgHeight = size[1] - padding - marginTop;
|
||||
|
||||
// Only update sliders if they exist (node has been initialized)
|
||||
if (node.hasOwnProperty('sliderX')) {
|
||||
|
||||
// Update slider positions proportionally
|
||||
if (node.data.hasOwnProperty("sliderX")) {
|
||||
const oldWidth = node.size[0] - 3 * padding;
|
||||
const oldHeight = node.size[1] - padding - marginTop;
|
||||
|
||||
// Calculate relative positions (0 to 1)
|
||||
const relativeX = (node.sliderX - padding) / oldWidth;
|
||||
const relativeY = (node.sliderY - marginTop) / oldHeight;
|
||||
|
||||
// Update slider positions
|
||||
node.sliderX = padding + (fullImgWidth * relativeX);
|
||||
node.sliderY = marginTop + (fullImgHeight * relativeY);
|
||||
const relativeX = (node.data.sliderX - padding) / oldWidth;
|
||||
const relativeY = (node.data.sliderY - marginTop) / oldHeight;
|
||||
node.data.sliderX = padding + fullImgWidth * relativeX;
|
||||
node.data.sliderY = marginTop + fullImgHeight * relativeY;
|
||||
} else {
|
||||
// Initial slider positions
|
||||
node.sliderX = padding + fullImgWidth / 2;
|
||||
node.sliderY = marginTop + fullImgHeight / 2;
|
||||
node.data.sliderX = padding + fullImgWidth / 2;
|
||||
node.data.sliderY = marginTop + fullImgHeight / 2;
|
||||
}
|
||||
|
||||
|
||||
node.size = size;
|
||||
return size;
|
||||
};
|
||||
|
||||
// Full area where images get drawn
|
||||
const fullImgWidth = node.size[0] - 3 * padding;
|
||||
const fullImgHeight = node.size[1] - padding - marginTop;
|
||||
node.sliderX = padding + fullImgWidth / 2;
|
||||
node.sliderY = marginTop + fullImgHeight / 2;
|
||||
|
||||
node.onMouseDown = function(e) {
|
||||
// Handle mouse down to move sliders
|
||||
node.onMouseDown = function (e) {
|
||||
const rect = node.getBounding();
|
||||
const [clickX, clickY] = [
|
||||
e.canvasX - rect[0],
|
||||
e.canvasY - rect[1] + verticalOffset
|
||||
e.canvasY - rect[1] + verticalOffset,
|
||||
];
|
||||
|
||||
const imgWidth = rect[2] - 3 * padding;
|
||||
const imgHeight = rect[3] - padding - marginTop;
|
||||
const xStart = padding;
|
||||
const xEnd = xStart + imgWidth;
|
||||
const yStart = marginTop;
|
||||
const yEnd = yStart + imgHeight;
|
||||
|
||||
if (clickX >= xStart && clickX <= xEnd && clickY >= yStart && clickY <= yEnd) {
|
||||
const hasImage2 = node.images[1] !== null;
|
||||
const hasImage3 = node.images[2] !== null;
|
||||
const hasImage4 = node.images[3] !== null;
|
||||
const xStart = padding;
|
||||
const xEnd = xStart + (node.size[0] - 3 * padding);
|
||||
const yStart = marginTop;
|
||||
const yEnd = yStart + (node.size[1] - padding - marginTop);
|
||||
|
||||
if (
|
||||
clickX >= xStart &&
|
||||
clickX <= xEnd &&
|
||||
clickY >= yStart &&
|
||||
clickY <= yEnd
|
||||
) {
|
||||
const hasImage2 = node.data.images[1] !== null;
|
||||
const hasImage3 = node.data.images[2] !== null;
|
||||
const hasImage4 = node.data.images[3] !== null;
|
||||
|
||||
// Lock sliderY to bottom if only two images are present
|
||||
if (hasImage2 && !hasImage3 && !hasImage4) {
|
||||
node.sliderY = yEnd;
|
||||
node.data.sliderY = yEnd;
|
||||
}
|
||||
node.sliderX = Math.max(xStart, Math.min(clickX, xEnd));
|
||||
node.sliderY = hasImage3 || hasImage4
|
||||
? Math.max(yStart, Math.min(clickY, yEnd))
|
||||
: yEnd;
|
||||
|
||||
node.data.sliderX = Math.max(xStart, Math.min(clickX, xEnd));
|
||||
node.data.sliderY =
|
||||
hasImage3 || hasImage4
|
||||
? Math.max(yStart, Math.min(clickY, yEnd))
|
||||
: yEnd;
|
||||
|
||||
app.graph.setDirtyCanvas(true, true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
node.onExecuted = async function(message) {
|
||||
node.images = new Array(4).fill(null);
|
||||
// Load images when the node is executed
|
||||
node.onExecuted = async function (message) {
|
||||
node.data.images = new Array(4).fill(null);
|
||||
for (let i = 1; i <= 4; i++) {
|
||||
const images = message[`images_${i}`] || [];
|
||||
if (images.length) {
|
||||
const imgData = images[0];
|
||||
const img = new Image();
|
||||
img.src = imageDataToUrl(imgData);
|
||||
await new Promise(resolve => (img.onload = img.onerror = resolve));
|
||||
node.images[i - 1] = img;
|
||||
await new Promise((resolve) => (img.onload = img.onerror = resolve));
|
||||
node.data.images[i - 1] = img;
|
||||
}
|
||||
}
|
||||
app.graph.setDirtyCanvas(true, true);
|
||||
};
|
||||
|
||||
node.onDrawForeground = function(ctx) {
|
||||
const padding = 10;
|
||||
// Render images and sliders
|
||||
node.onDrawForeground = function (ctx) {
|
||||
const xStart = padding;
|
||||
const xEnd = xStart + (node.size[0] - 3 * padding);
|
||||
const yStart = marginTop;
|
||||
const yEnd = yStart + (node.size[1] - padding - marginTop);
|
||||
const fullImgWidth = node.size[0] - 3 * padding;
|
||||
const fullImgHeight = node.size[1] - padding - marginTop;
|
||||
|
||||
|
||||
// Calculate fitted rectangle for image display
|
||||
function getFittedDestRect(dx, dy, dWidth, dHeight, targetRatio) {
|
||||
let newWidth = dWidth;
|
||||
let newHeight = dWidth / targetRatio;
|
||||
@@ -128,18 +141,34 @@ app.registerExtension({
|
||||
const offsetY = dy + (dHeight - newHeight) / 2;
|
||||
return [offsetX, offsetY, newWidth, newHeight];
|
||||
}
|
||||
|
||||
|
||||
// Draw a cropped image within specified bounds
|
||||
function drawCroppedImage(img, dx, dy, dWidth, dHeight) {
|
||||
if (!img) return;
|
||||
let targetRatio = dWidth / dHeight;
|
||||
if (node.images[0] && node.images[0].naturalWidth && node.images[0].naturalHeight) {
|
||||
targetRatio = node.images[0].naturalWidth / node.images[0].naturalHeight;
|
||||
if (
|
||||
node.data.images[0] &&
|
||||
node.data.images[0].naturalWidth &&
|
||||
node.data.images[0].naturalHeight
|
||||
) {
|
||||
targetRatio =
|
||||
node.data.images[0].naturalWidth /
|
||||
node.data.images[0].naturalHeight;
|
||||
}
|
||||
|
||||
const [ndx, ndy, ndWidth, ndHeight] = getFittedDestRect(dx, dy, dWidth, dHeight, targetRatio);
|
||||
|
||||
|
||||
const [ndx, ndy, ndWidth, ndHeight] = getFittedDestRect(
|
||||
dx,
|
||||
dy,
|
||||
dWidth,
|
||||
dHeight,
|
||||
targetRatio
|
||||
);
|
||||
|
||||
const imgRatio = img.naturalWidth / img.naturalHeight;
|
||||
let sx = 0, sy = 0, sWidth = img.naturalWidth, sHeight = img.naturalHeight;
|
||||
let sx = 0,
|
||||
sy = 0,
|
||||
sWidth = img.naturalWidth,
|
||||
sHeight = img.naturalHeight;
|
||||
if (imgRatio > targetRatio) {
|
||||
sWidth = img.naturalHeight * targetRatio;
|
||||
sx = (img.naturalWidth - sWidth) / 2;
|
||||
@@ -147,64 +176,139 @@ app.registerExtension({
|
||||
sHeight = img.naturalWidth / targetRatio;
|
||||
sy = (img.naturalHeight - sHeight) / 2;
|
||||
}
|
||||
ctx.drawImage(img, sx, sy, sWidth, sHeight, ndx, ndy, ndWidth, ndHeight);
|
||||
ctx.drawImage(
|
||||
img,
|
||||
sx,
|
||||
sy,
|
||||
sWidth,
|
||||
sHeight,
|
||||
ndx,
|
||||
ndy,
|
||||
ndWidth,
|
||||
ndHeight
|
||||
);
|
||||
}
|
||||
|
||||
const connectedImages = node.images.slice(1).filter(img => img !== null).length;
|
||||
|
||||
|
||||
const connectedImages = node.data.images
|
||||
.slice(1)
|
||||
.filter((img) => img !== null).length;
|
||||
|
||||
if (connectedImages === 0) {
|
||||
if (node.images[0]) {
|
||||
drawCroppedImage(node.images[0], xStart, yStart, fullImgWidth, fullImgHeight);
|
||||
// Single image display
|
||||
if (node.data.images[0]) {
|
||||
drawCroppedImage(
|
||||
node.data.images[0],
|
||||
xStart,
|
||||
yStart,
|
||||
fullImgWidth,
|
||||
fullImgHeight
|
||||
);
|
||||
}
|
||||
} else if (connectedImages === 1 && node.images[1]) {
|
||||
const splitX = node.sliderX;
|
||||
} else if (connectedImages === 1 && node.data.images[1]) {
|
||||
// Two images with vertical split
|
||||
const splitX = node.data.sliderX;
|
||||
ctx.save();
|
||||
ctx.beginPath();
|
||||
ctx.rect(xStart, yStart, splitX - xStart, fullImgHeight);
|
||||
ctx.clip();
|
||||
drawCroppedImage(node.images[0], xStart, yStart, fullImgWidth, fullImgHeight);
|
||||
drawCroppedImage(
|
||||
node.data.images[0],
|
||||
xStart,
|
||||
yStart,
|
||||
fullImgWidth,
|
||||
fullImgHeight
|
||||
);
|
||||
ctx.restore();
|
||||
|
||||
|
||||
ctx.save();
|
||||
ctx.beginPath();
|
||||
ctx.rect(splitX, yStart, xEnd - splitX, fullImgHeight);
|
||||
ctx.clip();
|
||||
drawCroppedImage(node.images[1], xStart, yStart, fullImgWidth, fullImgHeight);
|
||||
drawCroppedImage(
|
||||
node.data.images[1],
|
||||
xStart,
|
||||
yStart,
|
||||
fullImgWidth,
|
||||
fullImgHeight
|
||||
);
|
||||
ctx.restore();
|
||||
} else {
|
||||
// Three or four images with quadrants
|
||||
const drawQuadrant = (imgIndex, clipX, clipY, clipW, clipH) => {
|
||||
if (!node.images[imgIndex]) return;
|
||||
if (!node.data.images[imgIndex]) return;
|
||||
ctx.save();
|
||||
ctx.beginPath();
|
||||
ctx.rect(clipX, clipY, clipW, clipH);
|
||||
ctx.clip();
|
||||
drawCroppedImage(node.images[imgIndex], xStart, yStart, fullImgWidth, fullImgHeight);
|
||||
drawCroppedImage(
|
||||
node.data.images[imgIndex],
|
||||
xStart,
|
||||
yStart,
|
||||
fullImgWidth,
|
||||
fullImgHeight
|
||||
);
|
||||
ctx.restore();
|
||||
};
|
||||
|
||||
drawQuadrant(0, xStart, yStart, node.sliderX - xStart, node.sliderY - yStart);
|
||||
drawQuadrant(1, node.sliderX, yStart, xEnd - node.sliderX, node.sliderY - yStart);
|
||||
|
||||
if (node.images[3] === null) {
|
||||
drawQuadrant(2, xStart, node.sliderY, xEnd - xStart, yEnd - node.sliderY);
|
||||
|
||||
drawQuadrant(
|
||||
0,
|
||||
xStart,
|
||||
yStart,
|
||||
node.data.sliderX - xStart,
|
||||
node.data.sliderY - yStart
|
||||
);
|
||||
drawQuadrant(
|
||||
1,
|
||||
node.data.sliderX,
|
||||
yStart,
|
||||
xEnd - node.data.sliderX,
|
||||
node.data.sliderY - yStart
|
||||
);
|
||||
|
||||
if (node.data.images[3] === null) {
|
||||
drawQuadrant(
|
||||
2,
|
||||
xStart,
|
||||
node.data.sliderY,
|
||||
xEnd - xStart,
|
||||
yEnd - node.data.sliderY
|
||||
);
|
||||
} else {
|
||||
drawQuadrant(2, xStart, node.sliderY, node.sliderX - xStart, yEnd - node.sliderY);
|
||||
drawQuadrant(3, node.sliderX, node.sliderY, xEnd - node.sliderX, yEnd - node.sliderY);
|
||||
drawQuadrant(
|
||||
2,
|
||||
xStart,
|
||||
node.data.sliderY,
|
||||
node.data.sliderX - xStart,
|
||||
yEnd - node.data.sliderY
|
||||
);
|
||||
drawQuadrant(
|
||||
3,
|
||||
node.data.sliderX,
|
||||
node.data.sliderY,
|
||||
xEnd - node.data.sliderX,
|
||||
yEnd - node.data.sliderY
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Draw sliders
|
||||
ctx.strokeStyle = "#FFF";
|
||||
ctx.lineWidth = 1;
|
||||
if (connectedImages > 0) {
|
||||
ctx.beginPath();
|
||||
ctx.moveTo(node.sliderX, yStart);
|
||||
ctx.lineTo(node.sliderX, yEnd);
|
||||
if (node.data.images[3] === null && node.data.images[2] !== null) {
|
||||
ctx.moveTo(node.data.sliderX, yStart);
|
||||
ctx.lineTo(node.data.sliderX, node.data.sliderY);
|
||||
} else {
|
||||
ctx.moveTo(node.data.sliderX, yStart);
|
||||
ctx.lineTo(node.data.sliderX, yEnd);
|
||||
}
|
||||
if (connectedImages >= 2) {
|
||||
ctx.moveTo(xStart, node.sliderY);
|
||||
ctx.lineTo(xEnd, node.sliderY);
|
||||
ctx.moveTo(xStart, node.data.sliderY);
|
||||
ctx.lineTo(xEnd, node.data.sliderY);
|
||||
}
|
||||
ctx.stroke();
|
||||
}
|
||||
};
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
47
web/js/load_text.js
Normal file
@@ -0,0 +1,47 @@
|
||||
import { app } from "../../../scripts/app.js";
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.LoadTextFromFolder",
|
||||
async nodeCreated(node) {
|
||||
if (node.comfyClass === "Bjornulf_LoadTextFromFolder") {
|
||||
// Add a refresh button widget
|
||||
// Assuming this is inside your node's setup function
|
||||
const refreshButton = node.addWidget(
|
||||
"button",
|
||||
"Refresh File List",
|
||||
null,
|
||||
() => {
|
||||
fetch("/get_text_files", {
|
||||
method: "POST",
|
||||
})
|
||||
.then((response) => response.json())
|
||||
.then((data) => {
|
||||
if (data.success) {
|
||||
// Update the dropdown with the new file list
|
||||
node.widgets[0].options.values = data.files; // Assuming the dropdown is the first widget
|
||||
node.widgets[0].value = data.files[0] || ""; // Set default value
|
||||
app.ui.dialog.show(
|
||||
"[LoadTextFromFolder] File list refreshed successfully!"
|
||||
);
|
||||
} else {
|
||||
app.ui.dialog.show(
|
||||
`[LoadTextFromFolder] Failed to refresh file list: ${
|
||||
data.error || "Unknown error"
|
||||
}`
|
||||
);
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error(
|
||||
"[LoadTextFromFolder] Error fetching text files:",
|
||||
error
|
||||
);
|
||||
app.ui.dialog.show(
|
||||
"[LoadTextFromFolder] An error occurred while refreshing the file list."
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
58
web/js/math_node.js
Normal file
@@ -0,0 +1,58 @@
|
||||
import { app } from "../../../scripts/app.js";
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.MathNode",
|
||||
async nodeCreated(node) {
|
||||
if (node.comfyClass === "Bjornulf_MathNode") {
|
||||
const updateInputs = () => {
|
||||
const initialWidth = node.size[0];
|
||||
const numInputsWidget = node.widgets.find(w => w.name === "num_inputs");
|
||||
if (!numInputsWidget) return;
|
||||
|
||||
const numInputs = numInputsWidget.value;
|
||||
|
||||
// Initialize node.inputs if it doesn't exist
|
||||
if (!node.inputs) {
|
||||
node.inputs = [];
|
||||
}
|
||||
|
||||
// Filter existing value inputs
|
||||
const existingInputs = node.inputs.filter(input => input.name.startsWith("value_"));
|
||||
|
||||
// Add new inputs if needed
|
||||
if (existingInputs.length < numInputs) {
|
||||
for (let i = existingInputs.length + 1; i <= numInputs; i++) {
|
||||
const inputName = `value_${i}`;
|
||||
if (!node.inputs.find(input => input.name === inputName)) {
|
||||
node.addInput(inputName); // Type is defined in Python
|
||||
}
|
||||
}
|
||||
}
|
||||
// Remove excess inputs if too many
|
||||
else {
|
||||
node.inputs = node.inputs.filter(input =>
|
||||
!input.name.startsWith("value_") ||
|
||||
parseInt(input.name.split("_")[1]) <= numInputs
|
||||
);
|
||||
}
|
||||
|
||||
// Adjust node size while preserving width
|
||||
node.setSize(node.computeSize());
|
||||
node.size[0] = initialWidth;
|
||||
};
|
||||
|
||||
// Ensure num_inputs widget is at the top and set its callback
|
||||
const numInputsWidget = node.widgets.find(w => w.name === "num_inputs");
|
||||
if (numInputsWidget) {
|
||||
node.widgets = [numInputsWidget, ...node.widgets.filter(w => w !== numInputsWidget)];
|
||||
numInputsWidget.callback = () => {
|
||||
updateInputs();
|
||||
app.graph.setDirtyCanvas(true);
|
||||
};
|
||||
}
|
||||
|
||||
// Perform initial input update after node creation
|
||||
setTimeout(updateInputs, 0);
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -1,44 +1,44 @@
|
||||
import { app } from "../../../scripts/app.js";
|
||||
// import { app } from "../../../scripts/app.js";
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.ImageNoteLoadImage",
|
||||
async nodeCreated(node) {
|
||||
// Ensure the node is of the specific class
|
||||
if (node.comfyClass !== "Bjornulf_ImageNoteLoadImage") return;
|
||||
console.log("node created");
|
||||
// app.registerExtension({
|
||||
// name: "Bjornulf.ImageNoteLoadImage",
|
||||
// async nodeCreated(node) {
|
||||
// // Ensure the node is of the specific class
|
||||
// if (node.comfyClass !== "Bjornulf_ImageNoteLoadImage") return;
|
||||
// console.log("node created");
|
||||
|
||||
// Store the initial node size
|
||||
let prevSize = [...node.size];
|
||||
let stableCount = 0;
|
||||
const minStableFrames = 3; // Number of frames the size must remain stable
|
||||
// // Store the initial node size
|
||||
// let prevSize = [...node.size];
|
||||
// let stableCount = 0;
|
||||
// const minStableFrames = 3; // Number of frames the size must remain stable
|
||||
|
||||
// Function to check if the node's size has stabilized
|
||||
const checkSizeStable = () => {
|
||||
if (node.size[0] === prevSize[0] && node.size[1] === prevSize[1]) {
|
||||
stableCount++;
|
||||
if (stableCount >= minStableFrames) {
|
||||
// Size has been stable, simulate a resize to trigger layout update
|
||||
const originalSize = [...node.size];
|
||||
node.setSize([originalSize[0] + 1, originalSize[1]]); // Slightly increase width
|
||||
setTimeout(() => {
|
||||
node.setSize(originalSize); // Revert to original size
|
||||
app.graph.setDirtyCanvas(true, true); // Trigger canvas redraw
|
||||
}, 0);
|
||||
} else {
|
||||
// Size is stable but not for enough frames yet, check again
|
||||
requestAnimationFrame(checkSizeStable);
|
||||
}
|
||||
} else {
|
||||
// Size changed, reset counter and update prevSize
|
||||
prevSize = [...node.size];
|
||||
stableCount = 0;
|
||||
requestAnimationFrame(checkSizeStable);
|
||||
}
|
||||
};
|
||||
// // Function to check if the node's size has stabilized
|
||||
// const checkSizeStable = () => {
|
||||
// if (node.size[0] === prevSize[0] && node.size[1] === prevSize[1]) {
|
||||
// stableCount++;
|
||||
// if (stableCount >= minStableFrames) {
|
||||
// // Size has been stable, simulate a resize to trigger layout update
|
||||
// const originalSize = [...node.size];
|
||||
// node.setSize([originalSize[0] + 1, originalSize[1]]); // Slightly increase width
|
||||
// setTimeout(() => {
|
||||
// node.setSize(originalSize); // Revert to original size
|
||||
// app.graph.setDirtyCanvas(true, true); // Trigger canvas redraw
|
||||
// }, 0);
|
||||
// } else {
|
||||
// // Size is stable but not for enough frames yet, check again
|
||||
// requestAnimationFrame(checkSizeStable);
|
||||
// }
|
||||
// } else {
|
||||
// // Size changed, reset counter and update prevSize
|
||||
// prevSize = [...node.size];
|
||||
// stableCount = 0;
|
||||
// requestAnimationFrame(checkSizeStable);
|
||||
// }
|
||||
// };
|
||||
|
||||
// Start checking after a short delay to allow node initialization
|
||||
setTimeout(() => {
|
||||
requestAnimationFrame(checkSizeStable);
|
||||
}, 5000);
|
||||
}
|
||||
});
|
||||
// // Start checking after a short delay to allow node initialization
|
||||
// setTimeout(() => {
|
||||
// requestAnimationFrame(checkSizeStable);
|
||||
// }, 5000);
|
||||
// }
|
||||
// });
|
||||
|
||||
@@ -7,10 +7,10 @@ app.registerExtension({
|
||||
async nodeCreated(node) {
|
||||
if (node.comfyClass === "Bjornulf_OllamaTalk") {
|
||||
// Set seed widget to hidden input
|
||||
const seedWidget = node.widgets.find((w) => w.name === "seed");
|
||||
if (seedWidget) {
|
||||
seedWidget.type = "HIDDEN";
|
||||
}
|
||||
// const seedWidget = node.widgets.find((w) => w.name === "seed");
|
||||
// if (seedWidget) {
|
||||
// seedWidget.type = "HIDDEN";
|
||||
// }
|
||||
|
||||
// Function to update the Reset Button text
|
||||
const updateResetButtonTextNode = () => {
|
||||
|
||||
186
web/js/style_selector.js
Normal file
@@ -0,0 +1,186 @@
|
||||
// // style_selector.js
|
||||
// import { app } from "../../../scripts/app.js";
|
||||
|
||||
// app.registerExtension({
|
||||
// name: "Bjornulf.StyleSelector",
|
||||
// async nodeCreated(node) {
|
||||
// // Only apply to the Bjornulf_StyleSelector node
|
||||
// if (node.comfyClass !== "Bjornulf_StyleSelector") return;
|
||||
|
||||
// // Find the widgets for LOOP_random_LIST and LOOP_style_LIST
|
||||
// const loopRandomWidget = node.widgets.find(w => w.name === "LOOP_random_LIST");
|
||||
// const loopStyleWidget = node.widgets.find(w => w.name === "LOOP_style_LIST");
|
||||
|
||||
// // Check if widgets exist to avoid errors
|
||||
// if (!loopRandomWidget || !loopStyleWidget) {
|
||||
// console.error("LOOP widgets not found in Bjornulf_StyleSelector node.");
|
||||
// return;
|
||||
// }
|
||||
|
||||
// // Function to toggle the other widget off when one is turned on
|
||||
// const toggleExclusive = (widgetToToggle, otherWidget) => {
|
||||
// if (widgetToToggle.value === true) {
|
||||
// otherWidget.value = false;
|
||||
// }
|
||||
// };
|
||||
|
||||
// // Add event listeners to handle toggling
|
||||
// loopRandomWidget.callback = () => {
|
||||
// toggleExclusive(loopRandomWidget, loopStyleWidget);
|
||||
// };
|
||||
|
||||
// loopStyleWidget.callback = () => {
|
||||
// toggleExclusive(loopStyleWidget, loopRandomWidget);
|
||||
// };
|
||||
|
||||
// // Ensure initial state has only one enabled (if both are true, disable one)
|
||||
// if (loopRandomWidget.value && loopStyleWidget.value) {
|
||||
// loopStyleWidget.value = false; // Default to disabling LOOP_style_LIST
|
||||
// }
|
||||
|
||||
// // Find the category and style widgets
|
||||
// const categoryWidget = node.widgets.find(w => w.name === "category");
|
||||
// const styleWidget = node.widgets.find(w => w.name === "style");
|
||||
|
||||
// // Define categories and styles (must match the Python file)
|
||||
// const BRANCHES = {
|
||||
// "Painting": [
|
||||
// "Renaissance", "Baroque", "Rococo", "Neoclassicism",
|
||||
// "Romanticism", "Realism", "Impressionism", "Post-Impressionism",
|
||||
// "Expressionism", "Fauvism", "Cubism", "Futurism", "Dadaism",
|
||||
// "Surrealism", "Abstract Expressionism", "Pop Art", "Op Art",
|
||||
// "Minimalism"
|
||||
// ],
|
||||
// "Photography": [
|
||||
// "Black and White", "Color", "Vintage", "Sepia Tone", "HDR",
|
||||
// "Long Exposure", "Macro", "Portrait", "Landscape", "Street",
|
||||
// "Fashion", "Analog Film", "Cinematic"
|
||||
// ],
|
||||
// "Digital Art": [
|
||||
// "Digital Painting", "Vector Art", "Pixel Art", "Fractal Art",
|
||||
// "Algorithmic Art", "Glitch Art"
|
||||
// ],
|
||||
// "3D Rendering": [
|
||||
// "Low Poly", "Voxel", "Isometric", "Ray Tracing"
|
||||
// ],
|
||||
// "Illustration": [
|
||||
// "Line Art", "Cartoon", "Comic Book", "Manga", "Anime",
|
||||
// "Technical Illustration", "Botanical Illustration",
|
||||
// "Architectural Rendering", "Concept Art", "Storyboard Art"
|
||||
// ],
|
||||
// };
|
||||
|
||||
// // Function to update the style dropdown based on the selected category
|
||||
// const updateStyles = () => {
|
||||
// const selectedCategory = categoryWidget.value;
|
||||
// const styles = BRANCHES[selectedCategory] || [];
|
||||
// styleWidget.options.values = styles;
|
||||
// if (styles.length > 0) {
|
||||
// styleWidget.value = styles[0]; // Set to the first style
|
||||
// } else {
|
||||
// styleWidget.value = ""; // Fallback if no styles
|
||||
// }
|
||||
// node.setDirtyCanvas(true); // Refresh the UI
|
||||
// };
|
||||
|
||||
// // Initialize the style dropdown
|
||||
// updateStyles();
|
||||
|
||||
// // Update the style dropdown whenever the category changes
|
||||
// categoryWidget.callback = updateStyles;
|
||||
// }
|
||||
// });
|
||||
import { app } from "../../../scripts/app.js";
|
||||
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.StyleSelector",
|
||||
async nodeCreated(node) {
|
||||
if (node.comfyClass !== "Bjornulf_StyleSelector") return;
|
||||
|
||||
// Find loop widgets
|
||||
const loopRandomWidget = node.widgets.find(w => w.name === "LOOP_random_LIST");
|
||||
const loopStyleWidget = node.widgets.find(w => w.name === "LOOP_style_LIST");
|
||||
const loopSequentialWidget = node.widgets.find(w => w.name === "LOOP_SEQUENTIAL");
|
||||
|
||||
// Function to toggle the other widget off when one is turned on
|
||||
const toggleExclusive = (widgetToToggle, otherWidget) => {
|
||||
if (widgetToToggle.value === true) {
|
||||
otherWidget.value = false;
|
||||
}
|
||||
};
|
||||
|
||||
// Add event listeners to handle toggling
|
||||
loopRandomWidget.callback = () => {
|
||||
toggleExclusive(loopRandomWidget, loopStyleWidget);
|
||||
};
|
||||
loopStyleWidget.callback = () => {
|
||||
toggleExclusive(loopStyleWidget, loopRandomWidget);
|
||||
};
|
||||
|
||||
// Ensure initial state has only one enabled (if both are true, disable one)
|
||||
if (loopRandomWidget.value && loopStyleWidget.value) {
|
||||
loopStyleWidget.value = false; // Default to disabling LOOP_style_LIST
|
||||
}
|
||||
|
||||
// Add reset button for style list counter
|
||||
const styleResetButton = node.addWidget(
|
||||
"button",
|
||||
"Reset Style Counter",
|
||||
null,
|
||||
async () => {
|
||||
try {
|
||||
const response = await fetch("/reset_style_list_counter", { method: "POST" });
|
||||
const data = await response.json();
|
||||
if (data.success) {
|
||||
app.ui.dialog.show("[Style Selector] Style counter reset successfully.");
|
||||
} else {
|
||||
app.ui.dialog.show("[Style Selector] Failed to reset style counter.");
|
||||
}
|
||||
} catch (error) {
|
||||
app.ui.dialog.show("[Style Selector] Error resetting style counter.");
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Add reset button for model list counter
|
||||
const modelResetButton = node.addWidget(
|
||||
"button",
|
||||
"Reset Model Counter",
|
||||
null,
|
||||
async () => {
|
||||
try {
|
||||
const response = await fetch("/reset_model_list_counter", { method: "POST" });
|
||||
const data = await response.json();
|
||||
if (data.success) {
|
||||
app.ui.dialog.show("[Style Selector] Model counter reset successfully.");
|
||||
} else {
|
||||
app.ui.dialog.show("[Style Selector] Failed to reset model counter.");
|
||||
}
|
||||
} catch (error) {
|
||||
app.ui.dialog.show("[Style Selector] Error resetting model counter.");
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Function to update visibility of reset buttons
|
||||
const updateButtonVisibility = () => {
|
||||
const sequentialEnabled = loopSequentialWidget.value;
|
||||
styleResetButton.type = sequentialEnabled && loopStyleWidget.value ? "button" : "hidden";
|
||||
modelResetButton.type = sequentialEnabled && loopRandomWidget.value ? "button" : "hidden";
|
||||
};
|
||||
|
||||
// Initial update of button visibility
|
||||
setTimeout(updateButtonVisibility, 0);
|
||||
|
||||
// Update visibility when widgets change
|
||||
loopSequentialWidget.callback = updateButtonVisibility;
|
||||
loopStyleWidget.callback = () => {
|
||||
toggleExclusive(loopStyleWidget, loopRandomWidget);
|
||||
updateButtonVisibility();
|
||||
};
|
||||
loopRandomWidget.callback = () => {
|
||||
toggleExclusive(loopRandomWidget, loopStyleWidget);
|
||||
updateButtonVisibility();
|
||||
};
|
||||
}
|
||||
});
|
||||
@@ -31,7 +31,8 @@ app.registerExtension({
|
||||
// Function to update voices based on selected language
|
||||
const updateVoicesForLanguage = async (selectedLanguage) => {
|
||||
try {
|
||||
const response = await fetch('/bjornulf_xtts_get_voices', {
|
||||
// const response = await fetch('/bjornulf_xtts_get_voices', {
|
||||
const response = await fetch('/bjornulf_TTS_get_voices', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
import { api } from '../../../scripts/api.js';
|
||||
import { app } from "../../../scripts/app.js";
|
||||
|
||||
function displayVideoPreview(component, filename, category, autoplay, mute) {
|
||||
// Function to display the video preview
|
||||
function displayVideoPreview(component, filename, category, autoplay, mute, loop) {
|
||||
let videoWidget = component._videoWidget;
|
||||
|
||||
// Create the video widget if it doesn't exist
|
||||
if (!videoWidget) {
|
||||
// Create the widget if it doesn't exist
|
||||
var container = document.createElement("div");
|
||||
const container = document.createElement("div");
|
||||
const currentNode = component;
|
||||
|
||||
// Add the DOM widget to the component
|
||||
videoWidget = component.addDOMWidget("videopreview", "preview", container, {
|
||||
serialize: false,
|
||||
hideOnZoom: false,
|
||||
@@ -17,73 +21,83 @@ function displayVideoPreview(component, filename, category, autoplay, mute) {
|
||||
container.value = v;
|
||||
},
|
||||
});
|
||||
|
||||
// Define how the widget computes its size
|
||||
videoWidget.computeSize = function(width) {
|
||||
if (this.aspectRatio && !this.parentElement.hidden) {
|
||||
let height = (currentNode.size[0] - 20) / this.aspectRatio + 10;
|
||||
if (!(height > 0)) {
|
||||
height = 0;
|
||||
}
|
||||
return [width, height];
|
||||
return [width, height > 0 ? height : 0];
|
||||
}
|
||||
return [width, -4];
|
||||
};
|
||||
|
||||
// Initialize widget properties
|
||||
videoWidget.value = { hidden: false, paused: false, params: {} };
|
||||
videoWidget.parentElement = document.createElement("div");
|
||||
videoWidget.parentElement.className = "video_preview";
|
||||
videoWidget.parentElement.style['width'] = "100%";
|
||||
videoWidget.parentElement.style.width = "100%";
|
||||
container.appendChild(videoWidget.parentElement);
|
||||
|
||||
// Create the video element
|
||||
videoWidget.videoElement = document.createElement("video");
|
||||
videoWidget.videoElement.controls = true;
|
||||
videoWidget.videoElement.loop = false;
|
||||
videoWidget.videoElement.muted = false;
|
||||
videoWidget.videoElement.style['width'] = "100%";
|
||||
videoWidget.videoElement.style.width = "100%";
|
||||
|
||||
// Update aspect ratio when metadata is loaded
|
||||
videoWidget.videoElement.addEventListener("loadedmetadata", () => {
|
||||
videoWidget.aspectRatio = videoWidget.videoElement.videoWidth / videoWidget.videoElement.videoHeight;
|
||||
adjustSize(component);
|
||||
});
|
||||
|
||||
// Hide the video on error
|
||||
videoWidget.videoElement.addEventListener("error", () => {
|
||||
videoWidget.parentElement.hidden = true;
|
||||
adjustSize(component);
|
||||
});
|
||||
|
||||
videoWidget.parentElement.hidden = videoWidget.value.hidden;
|
||||
videoWidget.videoElement.autoplay = !videoWidget.value.paused && !videoWidget.value.hidden;
|
||||
videoWidget.videoElement.hidden = false;
|
||||
videoWidget.parentElement.appendChild(videoWidget.videoElement);
|
||||
component._videoWidget = videoWidget; // Store the widget for future reference
|
||||
component._videoWidget = videoWidget; // Store for reuse
|
||||
}
|
||||
|
||||
// Update the video source
|
||||
let params = {
|
||||
// Set video source and properties
|
||||
const params = {
|
||||
"filename": filename,
|
||||
"subfolder": category,
|
||||
"type": "output",
|
||||
"rand": Math.random().toString().slice(2, 12)
|
||||
"rand": Math.random().toString().slice(2, 12) // Cache-busting random parameter
|
||||
};
|
||||
const urlParams = new URLSearchParams(params);
|
||||
if(mute) videoWidget.videoElement.muted = true;
|
||||
else videoWidget.videoElement.muted = false;
|
||||
if(autoplay) videoWidget.videoElement.autoplay = !videoWidget.value.paused && !videoWidget.value.hidden;
|
||||
else videoWidget.videoElement.autoplay = false;
|
||||
// videoWidget.videoElement.src = `http://localhost:8188/api/view?${urlParams.toString()}`;
|
||||
videoWidget.videoElement.src = `api/view?${urlParams.toString()}`;
|
||||
|
||||
adjustSize(component); // Adjust the component size
|
||||
const urlParams = new URLSearchParams(params);
|
||||
videoWidget.videoElement.src = `api/view?${urlParams.toString()}`;
|
||||
videoWidget.videoElement.muted = mute;
|
||||
videoWidget.videoElement.autoplay = autoplay && !videoWidget.value.paused && !videoWidget.value.hidden;
|
||||
videoWidget.videoElement.loop = loop;
|
||||
|
||||
// Adjust the component size after setting the video
|
||||
adjustSize(component);
|
||||
}
|
||||
|
||||
// Function to adjust the component size
|
||||
function adjustSize(component) {
|
||||
component.setSize([component.size[0], component.computeSize([component.size[0], component.size[1]])[1]]);
|
||||
const newSize = component.computeSize([component.size[0], component.size[1]]);
|
||||
component.setSize([component.size[0], newSize[1]]);
|
||||
component?.graph?.setDirtyCanvas(true);
|
||||
}
|
||||
|
||||
// Register the extension
|
||||
app.registerExtension({
|
||||
name: "Bjornulf.VideoPreview",
|
||||
async beforeRegisterNodeDef(nodeType, nodeData, appInstance) {
|
||||
if (nodeData?.name == "Bjornulf_VideoPreview") {
|
||||
nodeType.prototype.onExecuted = function (data) {
|
||||
if (nodeData?.name === "Bjornulf_VideoPreview") {
|
||||
nodeType.prototype.onExecuted = function(data) {
|
||||
// Retrieve widget values with defaults
|
||||
const autoplay = this.widgets.find(w => w.name === "autoplay")?.value ?? false;
|
||||
const mute = this.widgets.find(w => w.name === "mute")?.value ?? true;
|
||||
displayVideoPreview(this, data.video[0], data.video[1], autoplay, mute);
|
||||
const loop = this.widgets.find(w => w.name === "loop")?.value ?? false;
|
||||
|
||||
// Display the video preview with the retrieved values
|
||||
displayVideoPreview(this, data.video[0], data.video[1], autoplay, mute, loop);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,12 @@ import csv
|
||||
from itertools import cycle
|
||||
|
||||
#{red|blue}
|
||||
#{left|right|middle|group=LR}+{left|right|middle|group=LR}+{left|right|middle|group=LR}
|
||||
#{left|right|middle|group=LMR}+{left|right|middle|group=LMR}+{left|right|middle|group=LMR}
|
||||
#{A(80%)|B(15%)|C(5%)}
|
||||
#2 {apple|orange|banana|static_group=FRUIT}s, one {apple|orange|banana|static_group=FRUIT} on the left, one {apple|orange|banana|static_group=FRUIT} on the right
|
||||
#Double layer variable : <<CHAR>>
|
||||
#CHAR = JESSICA
|
||||
#JESSICA = en/jess.wav
|
||||
|
||||
class WriteTextAdvanced:
|
||||
@classmethod
|
||||
@@ -28,6 +32,7 @@ class WriteTextAdvanced:
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def find_variables(self, text):
|
||||
"""Identify nested curly brace sections in the text."""
|
||||
stack = []
|
||||
variables = []
|
||||
for i, char in enumerate(text):
|
||||
@@ -40,10 +45,12 @@ class WriteTextAdvanced:
|
||||
'end': i + 1,
|
||||
'nesting': nesting
|
||||
})
|
||||
# Sort by descending nesting level and position to process inner variables first
|
||||
variables.sort(key=lambda x: (-x['nesting'], -x['end']))
|
||||
return variables
|
||||
|
||||
def parse_option(self, part):
|
||||
"""Parse options within curly braces, handling CSV and weighted choices."""
|
||||
if part.startswith('%csv='):
|
||||
try:
|
||||
filename = part.split('=', 1)[1].strip()
|
||||
@@ -56,49 +63,66 @@ class WriteTextAdvanced:
|
||||
return (option.strip(), float(weight.split('%)')[0]))
|
||||
return part.strip()
|
||||
|
||||
def process_content(self, content, seed):
|
||||
random.seed(seed)
|
||||
parts = []
|
||||
def process_content(self, content, base_seed, position):
|
||||
"""Process content within curly braces, handling groups and random choices."""
|
||||
# Use position to vary the seed for independent random choices
|
||||
random.seed(base_seed + position)
|
||||
parts = [p.strip() for p in content.split('|')]
|
||||
options = []
|
||||
weights = []
|
||||
group_defined = False
|
||||
group_name = None
|
||||
static_group = None
|
||||
cycling_group = None
|
||||
|
||||
for p in content.split('|'):
|
||||
p = p.strip()
|
||||
if p.startswith('group='):
|
||||
group_name = p.split('=', 1)[1].strip()
|
||||
group_defined = True
|
||||
continue
|
||||
|
||||
parsed = self.parse_option(p)
|
||||
if isinstance(parsed, list): # CSV data
|
||||
parts.extend(parsed)
|
||||
weights.extend([1]*len(parsed))
|
||||
elif isinstance(parsed, tuple): # Weighted option
|
||||
parts.append(parsed[0])
|
||||
weights.append(parsed[1])
|
||||
for p in parts:
|
||||
if p.startswith('static_group='):
|
||||
static_group = p.split('=', 1)[1].strip()
|
||||
elif p.startswith('group='):
|
||||
cycling_group = p.split('=', 1)[1].strip()
|
||||
else:
|
||||
parts.append(parsed)
|
||||
weights.append(1)
|
||||
|
||||
if group_defined:
|
||||
return {'type': 'group', 'name': group_name, 'options': parts}
|
||||
|
||||
if any(w != 1 for w in weights):
|
||||
total = sum(weights)
|
||||
if total == 0: weights = [1]*len(parts)
|
||||
return random.choices(parts, weights=[w/total for w in weights])[0]
|
||||
|
||||
return random.choice(parts) if parts else ''
|
||||
parsed = self.parse_option(p)
|
||||
if isinstance(parsed, list): # CSV data
|
||||
options.extend(parsed)
|
||||
weights.extend([1] * len(parsed))
|
||||
elif isinstance(parsed, tuple): # Weighted option
|
||||
options.append(parsed[0])
|
||||
weights.append(parsed[1])
|
||||
else:
|
||||
options.append(parsed)
|
||||
weights.append(1)
|
||||
|
||||
if static_group and cycling_group:
|
||||
raise ValueError("Cannot specify both static_group and group in the same curly brace section.")
|
||||
|
||||
if static_group:
|
||||
return {'type': 'static_group', 'name': static_group, 'options': options, 'weights': weights}
|
||||
elif cycling_group:
|
||||
return {'type': 'cycling_group', 'name': cycling_group, 'options': options, 'weights': weights}
|
||||
else:
|
||||
if options:
|
||||
if any(w != 1 for w in weights):
|
||||
total = sum(weights)
|
||||
if total == 0:
|
||||
weights = [1] * len(options)
|
||||
return random.choices(options, weights=[w / total for w in weights])[0]
|
||||
else:
|
||||
return random.choice(options)
|
||||
return ''
|
||||
|
||||
def write_text_special(self, text, variables="", seed=None):
|
||||
"""Main function to process text with special syntax."""
|
||||
if seed is None or seed == 0:
|
||||
seed = int(time.time() * 1000)
|
||||
random.seed(seed)
|
||||
|
||||
# Handle variables
|
||||
# Process text: remove comments and empty lines
|
||||
text_lines = [line.strip() for line in text.split('\n')
|
||||
if line.strip() and not line.strip().startswith('#')]
|
||||
text = '\n'.join(text_lines)
|
||||
|
||||
# Replace predefined variables
|
||||
var_dict = {}
|
||||
for line in variables.split('\n'):
|
||||
var_lines = [line.strip() for line in variables.split('\n')
|
||||
if line.strip() and not line.strip().startswith('#')]
|
||||
for line in var_lines:
|
||||
if '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
var_dict[key.strip()] = value.strip()
|
||||
@@ -107,23 +131,36 @@ class WriteTextAdvanced:
|
||||
|
||||
# Process nested variables
|
||||
variables = self.find_variables(text)
|
||||
static_groups = {}
|
||||
cycling_groups = {}
|
||||
substitutions = []
|
||||
groups = {}
|
||||
|
||||
for var in variables:
|
||||
start, end = var['start'], var['end']
|
||||
content = text[start+1:end-1]
|
||||
processed = self.process_content(content, seed)
|
||||
content = text[start + 1:end - 1]
|
||||
# Pass the position (start) to vary the seed for non-group choices
|
||||
processed = self.process_content(content, seed, start)
|
||||
|
||||
if isinstance(processed, dict):
|
||||
if processed['type'] == 'group':
|
||||
group_name = processed['name']
|
||||
if group_name not in groups:
|
||||
groups[group_name] = []
|
||||
groups[group_name].append({
|
||||
if processed['type'] == 'static_group':
|
||||
name = processed['name']
|
||||
if name not in static_groups:
|
||||
static_groups[name] = []
|
||||
static_groups[name].append({
|
||||
'start': start,
|
||||
'end': end,
|
||||
'options': processed['options']
|
||||
'options': processed['options'],
|
||||
'weights': processed['weights']
|
||||
})
|
||||
elif processed['type'] == 'cycling_group':
|
||||
name = processed['name']
|
||||
if name not in cycling_groups:
|
||||
cycling_groups[name] = []
|
||||
cycling_groups[name].append({
|
||||
'start': start,
|
||||
'end': end,
|
||||
'options': processed['options'],
|
||||
'weights': processed['weights']
|
||||
})
|
||||
else:
|
||||
substitutions.append({
|
||||
@@ -132,15 +169,35 @@ class WriteTextAdvanced:
|
||||
'sub': processed
|
||||
})
|
||||
|
||||
# Handle groups
|
||||
for group_name, matches in groups.items():
|
||||
# Handle static groups: choose one value per group name
|
||||
random.seed(seed) # Reset seed for consistent static group behavior
|
||||
for name, matches in static_groups.items():
|
||||
if not matches or not matches[0]['options']:
|
||||
continue
|
||||
options = matches[0]['options']
|
||||
weights = matches[0]['weights']
|
||||
if any(w != 1 for w in weights):
|
||||
total = sum(weights)
|
||||
if total == 0:
|
||||
weights = [1] * len(options)
|
||||
chosen = random.choices(options, weights=[w / total for w in weights])[0]
|
||||
else:
|
||||
chosen = random.choice(options) if options else ''
|
||||
for m in matches:
|
||||
substitutions.append({
|
||||
'start': m['start'],
|
||||
'end': m['end'],
|
||||
'sub': chosen
|
||||
})
|
||||
|
||||
# Handle cycling groups: cycle through shuffled options
|
||||
random.seed(seed) # Reset seed for consistent cycling group behavior
|
||||
for name, matches in cycling_groups.items():
|
||||
if not matches or not matches[0]['options']:
|
||||
continue
|
||||
|
||||
options = matches[0]['options']
|
||||
permuted = random.sample(options, len(options))
|
||||
perm_cycle = cycle(permuted)
|
||||
|
||||
for m in matches:
|
||||
substitutions.append({
|
||||
'start': m['start'],
|
||||
@@ -148,7 +205,7 @@ class WriteTextAdvanced:
|
||||
'sub': next(perm_cycle)
|
||||
})
|
||||
|
||||
# Apply regular substitutions
|
||||
# Apply substitutions in reverse order
|
||||
substitutions.sort(key=lambda x: -x['start'])
|
||||
result_text = text
|
||||
for sub in substitutions:
|
||||
@@ -158,4 +215,5 @@ class WriteTextAdvanced:
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(s, text, variables="", seed=None):
|
||||
"""Check if inputs have changed."""
|
||||
return (text, variables, seed)
|
||||