mirror of
https://github.com/tusharbhutt/Endless-Nodes.git
synced 2026-03-21 20:42:12 -03:00
Add files via upload
This commit is contained in:
358
endless_nodes.py
358
endless_nodes.py
@@ -46,8 +46,8 @@ from PIL.PngImagePlugin import PngInfo
|
||||
from colorama import init, Fore, Back, Style
|
||||
from os.path import join
|
||||
from warnings import filterwarnings
|
||||
# import ImageReward as RM
|
||||
# import clip
|
||||
import ImageReward as RM
|
||||
import clip
|
||||
import colorama
|
||||
import datetime
|
||||
import folder_paths
|
||||
@@ -56,7 +56,7 @@ import json
|
||||
import math
|
||||
import numpy as np
|
||||
import os
|
||||
#import pytorch_lightning as pl
|
||||
import pytorch_lightning as pl
|
||||
import re
|
||||
import socket
|
||||
import statistics
|
||||
@@ -64,7 +64,7 @@ import sys
|
||||
import time
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
# import random
|
||||
import random
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
|
||||
|
||||
@@ -324,6 +324,42 @@ class EndlessNode_XLParameterizer:
|
||||
|
||||
def Parameterizer(self,base_width,base_height,base_crop_w,base_crop_h,base_target_w,base_target_h,refiner_width,refiner_height,refiner_ascore):
|
||||
return(base_width,base_height,base_crop_w,base_crop_h,base_target_w,base_target_h,refiner_width,refiner_height,refiner_ascore)
|
||||
|
||||
|
||||
|
||||
|
||||
#----------------------------------------------
|
||||
# CLIP text encode box without prompt (short)
|
||||
|
||||
class EndlessNode_XLGlobalEnvoy:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"width": ("INT", {"default": 1024, "min": 64, "max": 8192, "step": 16}),
|
||||
"height": ("INT", {"default": 1024, "min": 64, "max": 8192, "step": 16}),
|
||||
"start": ("INT", {"default": 0, "min": 0, "max": 2048, "step": 1}),
|
||||
"switchover": ("INT", {"default": 0, "min": 0, "max": 2048, "step": 1}),
|
||||
"stop": ("INT", {"default": 1, "min": 1, "max": 2048, "step": 1}),
|
||||
"percstep": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
"display_names": {"percstep": "Switchover Percentage",
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("INT","INT","INT","INT","INT",)
|
||||
RETURN_NAMES = ("Width","Height","Start Step", "Switchover at Step", "End Step")
|
||||
FUNCTION = "global_envoy"
|
||||
|
||||
CATEGORY = "Endless 🌊✨/Parameters"
|
||||
|
||||
|
||||
def global_envoy(self,width, height,start,switchover,stop,percstep):
|
||||
if percstep != 0.0:
|
||||
switchover = round(stop*percstep)
|
||||
return(width,height,start, stop, switchover)
|
||||
|
||||
#----------------------------------------------
|
||||
# Text Encode Combo Box with prompt
|
||||
@@ -404,91 +440,117 @@ class EndlessNode_ComboXLParameterizer:
|
||||
#----------------------------------------------
|
||||
# Aesthetic Scoring Node
|
||||
|
||||
# folder_paths.folder_names_and_paths["aesthetic"] = ([os.path.join(folder_paths.models_dir,"aesthetic")], folder_paths.supported_pt_extensions)
|
||||
folder_paths.folder_names_and_paths["aesthetic"] = ([os.path.join(folder_paths.models_dir,"aesthetic")], folder_paths.supported_pt_extensions)
|
||||
|
||||
|
||||
# class MLP(pl.LightningModule):
|
||||
# def __init__(self, input_size, xcol='emb', ycol='avg_rating'):
|
||||
# super().__init__()
|
||||
# self.input_size = input_size
|
||||
# self.xcol = xcol
|
||||
# self.ycol = ycol
|
||||
# self.layers = nn.Sequential(
|
||||
# nn.Linear(self.input_size, 1024),
|
||||
# #nn.ReLU(),
|
||||
# nn.Dropout(0.2),
|
||||
# nn.Linear(1024, 128),
|
||||
# #nn.ReLU(),
|
||||
# nn.Dropout(0.2),
|
||||
# nn.Linear(128, 64),
|
||||
# #nn.ReLU(),
|
||||
# nn.Dropout(0.1),
|
||||
# nn.Linear(64, 16),
|
||||
# #nn.ReLU(),
|
||||
# nn.Linear(16, 1)
|
||||
# )
|
||||
# def forward(self, x):
|
||||
# return self.layers(x)
|
||||
# def training_step(self, batch, batch_idx):
|
||||
# x = batch[self.xcol]
|
||||
# y = batch[self.ycol].reshape(-1, 1)
|
||||
# x_hat = self.layers(x)
|
||||
# loss = F.mse_loss(x_hat, y)
|
||||
# return loss
|
||||
# def validation_step(self, batch, batch_idx):
|
||||
# x = batch[self.xcol]
|
||||
# y = batch[self.ycol].reshape(-1, 1)
|
||||
# x_hat = self.layers(x)
|
||||
# loss = F.mse_loss(x_hat, y)
|
||||
# return loss
|
||||
# def configure_optimizers(self):
|
||||
# optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
|
||||
# return optimizer
|
||||
# def normalized(a, axis=-1, order=2):
|
||||
# import numpy as np # pylint: disable=import-outside-toplevel
|
||||
# l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
|
||||
# l2[l2 == 0] = 1
|
||||
# return a / np.expand_dims(l2, axis)
|
||||
class MLP(pl.LightningModule):
|
||||
def __init__(self, input_size, xcol='emb', ycol='avg_rating'):
|
||||
super().__init__()
|
||||
self.input_size = input_size
|
||||
self.xcol = xcol
|
||||
self.ycol = ycol
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(self.input_size, 1024),
|
||||
#nn.ReLU(),
|
||||
nn.Dropout(0.2),
|
||||
nn.Linear(1024, 128),
|
||||
#nn.ReLU(),
|
||||
nn.Dropout(0.2),
|
||||
nn.Linear(128, 64),
|
||||
#nn.ReLU(),
|
||||
nn.Dropout(0.1),
|
||||
nn.Linear(64, 16),
|
||||
#nn.ReLU(),
|
||||
nn.Linear(16, 1)
|
||||
)
|
||||
def forward(self, x):
|
||||
return self.layers(x)
|
||||
def training_step(self, batch, batch_idx):
|
||||
x = batch[self.xcol]
|
||||
y = batch[self.ycol].reshape(-1, 1)
|
||||
x_hat = self.layers(x)
|
||||
loss = F.mse_loss(x_hat, y)
|
||||
return loss
|
||||
def validation_step(self, batch, batch_idx):
|
||||
x = batch[self.xcol]
|
||||
y = batch[self.ycol].reshape(-1, 1)
|
||||
x_hat = self.layers(x)
|
||||
loss = F.mse_loss(x_hat, y)
|
||||
return loss
|
||||
def configure_optimizers(self):
|
||||
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
|
||||
return optimizer
|
||||
def normalized(a, axis=-1, order=2):
|
||||
import numpy as np # pylint: disable=import-outside-toplevel
|
||||
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
|
||||
l2[l2 == 0] = 1
|
||||
return a / np.expand_dims(l2, axis)
|
||||
|
||||
|
||||
# class EndlessNode_Scoring:
|
||||
# def __init__(self):
|
||||
# pass
|
||||
class EndlessNode_Scoring:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"model_name": (folder_paths.get_filename_list("aesthetic"), {"multiline": False, "default": "chadscorer.pth"}),
|
||||
"image": ("IMAGE",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("NUMBER","FLOAT","STRING")
|
||||
FUNCTION = "calc_score"
|
||||
CATEGORY = "Endless 🌊✨/Scoring"
|
||||
|
||||
def calc_score(self, model_name, image):
|
||||
m_path = folder_paths.folder_names_and_paths["aesthetic"][0]
|
||||
m_path2 = os.path.join(m_path[0], model_name)
|
||||
model = MLP(768) # CLIP embedding dim is 768 for CLIP ViT L 14
|
||||
s = torch.load(m_path2)
|
||||
model.load_state_dict(s)
|
||||
model.to("cuda")
|
||||
model.eval()
|
||||
device = "cuda"
|
||||
model2, preprocess = clip.load("ViT-L/14", device=device) # RN50x64
|
||||
tensor_image = image[0]
|
||||
img = (tensor_image * 255).to(torch.uint8).numpy()
|
||||
pil_image = Image.fromarray(img, mode='RGB')
|
||||
image2 = preprocess(pil_image).unsqueeze(0).to(device)
|
||||
with torch.no_grad():
|
||||
image_features = model2.encode_image(image2)
|
||||
im_emb_arr = normalized(image_features.cpu().detach().numpy())
|
||||
prediction = model(torch.from_numpy(im_emb_arr).to(device).type(torch.cuda.FloatTensor))
|
||||
final_prediction = round(float(prediction[0]), 2)
|
||||
del model
|
||||
return (final_prediction,final_prediction,str(final_prediction),)
|
||||
|
||||
|
||||
|
||||
## This may help in some way to return the score results to a dialog box.
|
||||
# class OutputString:
|
||||
# @classmethod
|
||||
# def INPUT_TYPES(cls):
|
||||
# return {
|
||||
# "required": {
|
||||
# "string": ("STRING", {}),
|
||||
# }
|
||||
# }
|
||||
|
||||
# RETURN_TYPES = ()
|
||||
# FUNCTION = "output_string"
|
||||
|
||||
# OUTPUT_NODE = True
|
||||
|
||||
# CATEGORY = "utils"
|
||||
|
||||
# def output_string(self, string):
|
||||
# return { "ui": { "string": string } }
|
||||
|
||||
|
||||
# @classmethod
|
||||
# def INPUT_TYPES(cls):
|
||||
# return {
|
||||
# "required": {
|
||||
# "model_name": (folder_paths.get_filename_list("aesthetic"), {"multiline": False, "default": "chadscorer.pth"}),
|
||||
# "image": ("IMAGE",),
|
||||
# }
|
||||
# }
|
||||
|
||||
# RETURN_TYPES = ("NUMBER","IMAGE")
|
||||
# FUNCTION = "calc_score"
|
||||
# CATEGORY = "Endless 🌊✨/Scoring"
|
||||
|
||||
# def calc_score(self, model_name, image):
|
||||
# m_path = folder_paths.folder_names_and_paths["aesthetic"][0]
|
||||
# m_path2 = os.path.join(m_path[0], model_name)
|
||||
# model = MLP(768) # CLIP embedding dim is 768 for CLIP ViT L 14
|
||||
# s = torch.load(m_path2)
|
||||
# model.load_state_dict(s)
|
||||
# model.to("cuda")
|
||||
# model.eval()
|
||||
# device = "cuda"
|
||||
# model2, preprocess = clip.load("ViT-L/14", device=device) # RN50x64
|
||||
# tensor_image = image[0]
|
||||
# img = (tensor_image * 255).to(torch.uint8).numpy()
|
||||
# pil_image = Image.fromarray(img, mode='RGB')
|
||||
# image2 = preprocess(pil_image).unsqueeze(0).to(device)
|
||||
# with torch.no_grad():
|
||||
# image_features = model2.encode_image(image2)
|
||||
# im_emb_arr = normalized(image_features.cpu().detach().numpy())
|
||||
# prediction = model(torch.from_numpy(im_emb_arr).to(device).type(torch.cuda.FloatTensor))
|
||||
# final_prediction = round(float(prediction[0]), 2)
|
||||
# del model
|
||||
# return (final_prediction,)
|
||||
|
||||
# #---------------------------------------------- NOT WORKING, NEED TO LOOK AT IT
|
||||
# # Aesthetic Scoring Node with Scoring passed to image
|
||||
@@ -538,41 +600,41 @@ class EndlessNode_ComboXLParameterizer:
|
||||
#----------------------------------------------
|
||||
# Image Reward Scoring
|
||||
|
||||
# class EndlessNode_ImageReward:
|
||||
# def __init__(self):
|
||||
# self.model = None
|
||||
class EndlessNode_ImageReward:
|
||||
def __init__(self):
|
||||
self.model = None
|
||||
|
||||
# @classmethod
|
||||
# def INPUT_TYPES(cls):
|
||||
# return {
|
||||
# "required": {
|
||||
# "model": ("STRING", {"multiline": False, "default": "ImageReward-v1.0"}),
|
||||
# "prompt": ("STRING", {"multiline": True, "forceInput": True}),
|
||||
# "images": ("IMAGE",),
|
||||
# },
|
||||
# }
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"model": ("STRING", {"multiline": False, "default": "ImageReward-v1.0"}),
|
||||
"prompt": ("STRING", {"multiline": True, "forceInput": True}),
|
||||
"images": ("IMAGE",),
|
||||
},
|
||||
}
|
||||
|
||||
# RETURN_TYPES = ("FLOAT", "STRING", "FLOAT", "STRING")
|
||||
# RETURN_NAMES = ("SCORE_FLOAT", "SCORE_STRING", "VALUE_FLOAT", "VALUE_STRING")
|
||||
RETURN_TYPES = ("FLOAT", "STRING", "FLOAT", "STRING")
|
||||
RETURN_NAMES = ("SCORE_FLOAT", "SCORE_STRING", "VALUE_FLOAT", "VALUE_STRING")
|
||||
|
||||
# CATEGORY = "Endless 🌊✨/Scoring"
|
||||
CATEGORY = "Endless 🌊✨/Scoring"
|
||||
|
||||
# FUNCTION = "process_images"
|
||||
FUNCTION = "process_images"
|
||||
|
||||
# def process_images(self, model, prompt, images,): #rounded):
|
||||
# if self.model is None:
|
||||
# self.model = RM.load(model)
|
||||
def process_images(self, model, prompt, images,): #rounded):
|
||||
if self.model is None:
|
||||
self.model = RM.load(model)
|
||||
|
||||
# score = 0.0
|
||||
# for image in images:
|
||||
# # convert to PIL image
|
||||
# i = 255.0 * image.cpu().numpy()
|
||||
# img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
||||
# score += self.model.score(prompt, [img])
|
||||
# score /= len(images)
|
||||
# # assume std dev follows normal distribution curve
|
||||
# valuescale = 0.5 * (1 + math.erf(score / math.sqrt(2))) * 10 # *10 to get a value between -10
|
||||
# return (score, str(score), valuescale, str(valuescale))
|
||||
score = 0.0
|
||||
for image in images:
|
||||
# convert to PIL image
|
||||
i = 255.0 * image.cpu().numpy()
|
||||
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
||||
score += self.model.score(prompt, [img])
|
||||
score /= len(images)
|
||||
# assume std dev follows normal distribution curve
|
||||
valuescale = 0.5 * (1 + math.erf(score / math.sqrt(2))) * 10 # *10 to get a value between -10
|
||||
return (score, str(score), valuescale, str(valuescale),)
|
||||
|
||||
|
||||
# #---------------------------------------------- NOT WORKING, NEED TO LOOK AT
|
||||
@@ -657,13 +719,13 @@ class EndlessNode_ImageSaver:
|
||||
image_folder=None, json_folder=None, prompt=None, extra_pnginfo=None):
|
||||
|
||||
# Replace illegal characters in the filename prefix with dashes
|
||||
filename_prefix = re.sub(r'[<>:"\/\\|?*]', '-', filename_prefix)
|
||||
filename_prefix = re.sub(r'[<>:"/\\|?*]', '-', filename_prefix)
|
||||
|
||||
# Set IMG Extension
|
||||
img_extension = '.png'
|
||||
|
||||
counter = 1
|
||||
|
||||
|
||||
results = list()
|
||||
|
||||
for image in images:
|
||||
@@ -671,6 +733,13 @@ class EndlessNode_ImageSaver:
|
||||
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
||||
|
||||
metadata = PngInfo()
|
||||
|
||||
def encode_emoji(obj):
|
||||
if isinstance(obj, str):
|
||||
return obj.encode('utf-8', 'surrogatepass').decode('utf-8')
|
||||
return obj
|
||||
|
||||
|
||||
if prompt is not None:
|
||||
metadata.add_text("prompt", json.dumps(prompt))
|
||||
if extra_pnginfo is not None:
|
||||
@@ -680,16 +749,16 @@ class EndlessNode_ImageSaver:
|
||||
img_file, json_file = self.generate_filenames(filename_prefix, delimiter, counter,
|
||||
filename_number_padding, filename_number_start,
|
||||
img_extension, image_folder, json_folder)
|
||||
|
||||
|
||||
try:
|
||||
if img_extension == '.png':
|
||||
img.save(img_file, pnginfo=metadata, compress_level=4)
|
||||
elif img_extension == '.jpeg':
|
||||
img.save(img_file, quality=100, optimize=True)
|
||||
|
||||
with open(json_file, 'w', encoding='utf-8', newline='\n') as f:
|
||||
if prompt is not None:
|
||||
f.write(json.dumps(prompt, indent=4))
|
||||
f.write("Prompt:\n" + json.dumps(prompt, indent="\t", default=encode_emoji, ensure_ascii=False))
|
||||
f.write("\nExtra PNG Info:\n" + json.dumps(extra_pnginfo, indent="\t", default=encode_emoji, ensure_ascii=False))
|
||||
|
||||
print(Fore.GREEN + f"+ File(s) saved to: {img_file}")
|
||||
|
||||
@@ -723,18 +792,25 @@ class EndlessNode_ImageSaver:
|
||||
img_file = f"{filename_prefix}{delimiter}{counter:0{filename_number_padding}}{img_extension}"
|
||||
json_file = f"{filename_prefix}{delimiter}{counter:0{filename_number_padding}}.json"
|
||||
|
||||
|
||||
# Apply placeholders for date and time in filenames
|
||||
img_file = self.replace_date_time_placeholders(img_file)
|
||||
json_file = self.replace_date_time_placeholders(json_file)
|
||||
|
||||
# Construct full paths for image and text files based on folders provided
|
||||
if image_folder:
|
||||
img_file = os.path.join(image_folder, img_file)
|
||||
image_folder = self.replace_date_time_placeholders(image_folder)
|
||||
img_folder_path = os.path.join(self.output_dir, image_folder) if not image_folder.startswith(self.output_dir) else image_folder
|
||||
os.makedirs(img_folder_path, exist_ok=True) # Create image folder if it doesn't exist
|
||||
img_file = os.path.join(img_folder_path, img_file)
|
||||
else:
|
||||
img_file = os.path.join(self.output_dir, img_file)
|
||||
|
||||
if json_folder:
|
||||
json_file = os.path.join(json_folder, json_file)
|
||||
json_folder = self.replace_date_time_placeholders(json_folder)
|
||||
json_folder_path = os.path.join(self.output_dir, json_folder) if not json_folder.startswith(self.output_dir) else json_folder
|
||||
os.makedirs(json_folder_path, exist_ok=True) # Create json folder if it doesn't exist
|
||||
json_file = os.path.join(json_folder_path, json_file)
|
||||
else:
|
||||
json_file = os.path.join(os.path.dirname(img_file), json_file)
|
||||
|
||||
@@ -753,34 +829,48 @@ class EndlessNode_ImageSaver:
|
||||
json_file = self.replace_date_time_placeholders(json_file)
|
||||
|
||||
if image_folder:
|
||||
img_file = os.path.join(image_folder, img_file)
|
||||
image_folder = self.replace_date_time_placeholders(image_folder)
|
||||
img_folder_path = os.path.join(self.output_dir, image_folder) if not image_folder.startswith(self.output_dir) else image_folder
|
||||
os.makedirs(img_folder_path, exist_ok=True) # Create image folder if it doesn't exist
|
||||
img_file = os.path.join(img_folder_path, img_file)
|
||||
else:
|
||||
img_file = os.path.join(self.output_dir, img_file)
|
||||
|
||||
if json_folder:
|
||||
json_file = os.path.join(json_folder, json_file)
|
||||
json_folder = self.replace_date_time_placeholders(json_folder)
|
||||
json_folder_path = os.path.join(self.output_dir, json_folder) if not json_folder.startswith(self.output_dir) else json_folder
|
||||
os.makedirs(json_folder_path, exist_ok=True) # Create json folder if it doesn't exist
|
||||
json_file = os.path.join(json_folder_path, json_file)
|
||||
else:
|
||||
json_file = os.path.join(os.path.dirname(img_file), json_file)
|
||||
|
||||
return img_file, json_file
|
||||
|
||||
def replace_date_time_placeholders(self, filename):
|
||||
# Replace date and time placeholders with actual date and time strings
|
||||
now = datetime.datetime.now()
|
||||
placeholders = {
|
||||
'%Y': now.strftime('%Y'), # Year with century as a decimal number
|
||||
'%y': now.strftime('%y'), # Year without century as a zero-padded decimal number
|
||||
'%m': now.strftime('%m'), # Month as a zero-padded decimal number
|
||||
'%d': now.strftime('%d'), # Day of the month as a zero-padded decimal number
|
||||
'%H': now.strftime('%H'), # Hour (24-hour clock) as a zero-padded decimal number
|
||||
'%M': now.strftime('%M'), # Minute as a zero-padded decimal number
|
||||
'%S': now.strftime('%S'), # Second as a zero-padded decimal number
|
||||
}
|
||||
def replace_match(match):
|
||||
placeholder = match.group(0)
|
||||
try:
|
||||
formatted_value = now.strftime(placeholder)
|
||||
return formatted_value
|
||||
except ValueError:
|
||||
return placeholder
|
||||
|
||||
for placeholder, replacement in placeholders.items():
|
||||
filename = filename.replace(placeholder, replacement)
|
||||
# Define the pattern to match date and time placeholders
|
||||
pattern = r'%[a-zA-Z]'
|
||||
|
||||
# Get the current datetime
|
||||
now = datetime.datetime.now()
|
||||
|
||||
# Use re.sub to find and replace all placeholders
|
||||
filename = re.sub(pattern, replace_match, filename)
|
||||
|
||||
return filename
|
||||
|
||||
# def truncate_string(s, length):
|
||||
# if len(s) > length:
|
||||
# return s[:length]
|
||||
# return s
|
||||
|
||||
# ______________________________________________________________________________________________________________________________________________________________
|
||||
# CONVERTER NODES BLOCK #
|
||||
#
|
||||
@@ -1007,7 +1097,7 @@ class EndlessNode_NumtoString:
|
||||
"required": {"NumberValue": ("NUMBER",)},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING")
|
||||
RETURN_TYPES = ("STRING",)
|
||||
FUNCTION = "inputnum"
|
||||
|
||||
def inputnum(self, NumberValue):
|
||||
|
||||
Reference in New Issue
Block a user