mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-21 20:52:11 -03:00
...
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -5,3 +5,4 @@ clear_vram.py
|
||||
web/js/clear_vram.js
|
||||
speakers
|
||||
*.text
|
||||
web/js/*.txt
|
||||
@@ -1,11 +0,0 @@
|
||||
class CustomStringType:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"value": ("STRING", {"multiline": True})}}
|
||||
|
||||
RETURN_TYPES = ("CUSTOM_STRING",)
|
||||
FUNCTION = "passthrough"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def passthrough(self, value):
|
||||
return (value,)
|
||||
@@ -1,33 +0,0 @@
|
||||
import torch
|
||||
import gc
|
||||
|
||||
class ClearVRAM:
|
||||
def __init__(self):
|
||||
self.cleared = False
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"clear": ("BOOLEAN", {"default": False, "label": "Clear VRAM"})
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
FUNCTION = "clear_vram"
|
||||
CATEGORY = "memory_management"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def clear_vram(self, clear):
|
||||
if clear and not self.cleared:
|
||||
torch.cuda.empty_cache()
|
||||
gc.collect()
|
||||
torch.cuda.synchronize()
|
||||
print("VRAM cleared")
|
||||
self.cleared = True
|
||||
return ("VRAM cleared",)
|
||||
elif not clear:
|
||||
self.cleared = False
|
||||
return ("Ready to clear VRAM",)
|
||||
else:
|
||||
return ("VRAM already cleared",)
|
||||
@@ -1,31 +0,0 @@
|
||||
import random
|
||||
import hashlib
|
||||
|
||||
class TextToStringAndSeed:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"text": ("STRING", {"forceInput": True}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_NAMES = ("text","random_seed")
|
||||
RETURN_TYPES = ("STRING", "INT")
|
||||
FUNCTION = "process"
|
||||
CATEGORY = "utils"
|
||||
|
||||
def process(self, text):
|
||||
# Generate a hash from the input text
|
||||
text_hash = hashlib.md5(text.encode()).hexdigest()
|
||||
|
||||
# Use the hash to seed the random number generator
|
||||
random.seed(text_hash)
|
||||
|
||||
# Generate a random seed (integer)
|
||||
random_seed = random.randint(0, 2**32 - 1)
|
||||
|
||||
# Reset the random seed to ensure randomness in subsequent calls
|
||||
random.seed()
|
||||
|
||||
return (text, random_seed)
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "bjornulf_custom_nodes"
|
||||
description = "Nodes: Ollama, Text to Speech, Combine Texts, Random Texts, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images, Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..."
|
||||
version = "0.53"
|
||||
description = "61 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech."
|
||||
version = "0.54"
|
||||
license = {file = "LICENSE"}
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import random
|
||||
|
||||
class RandomCheckpoint:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"number_of_inputs": ("INT", {"default": 2, "min": 2, "max": 10, "step": 1}),
|
||||
"model_1": ("MODEL", {"forceInput": True}),
|
||||
"clip_1": ("CLIP", {"forceInput": True}),
|
||||
"vae_1": ("VAE", {"forceInput": True}),
|
||||
"model_2": ("MODEL", {"forceInput": True}),
|
||||
"clip_2": ("CLIP", {"forceInput": True}),
|
||||
"vae_2": ("VAE", {"forceInput": True}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
||||
FUNCTION = "random_select"
|
||||
|
||||
def random_select(self, number_of_inputs, **kwargs):
|
||||
selected_index = random.randint(1, number_of_inputs)
|
||||
|
||||
selected_model = kwargs[f"model_{selected_index}"]
|
||||
selected_clip = kwargs[f"clip_{selected_index}"]
|
||||
selected_vae = kwargs[f"vae_{selected_index}"]
|
||||
|
||||
return (selected_model, selected_clip, selected_vae)
|
||||
@@ -1,60 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import json
|
||||
from PIL.PngImagePlugin import PngInfo
|
||||
|
||||
class SaveApiImage:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE", {"forceInput": True}),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||
}
|
||||
|
||||
FUNCTION = "save_api_image"
|
||||
RETURN_TYPES = ()
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def save_api_image(self, image, prompt=None, extra_pnginfo=None):
|
||||
# Ensure the output directory exists
|
||||
os.makedirs("./output/", exist_ok=True)
|
||||
|
||||
# Convert the image from ComfyUI format to PIL Image
|
||||
i = 255. * image.cpu().numpy()
|
||||
if i.ndim > 3:
|
||||
i = np.squeeze(i)
|
||||
if i.ndim == 2:
|
||||
i = i[:, :, np.newaxis]
|
||||
|
||||
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
||||
|
||||
# Determine the next available filename
|
||||
counter = 1
|
||||
while True:
|
||||
filename = f"./output/api_{counter:05d}.png"
|
||||
if not os.path.exists(filename):
|
||||
break
|
||||
counter += 1
|
||||
|
||||
# Prepare metadata
|
||||
metadata = PngInfo()
|
||||
if prompt is not None:
|
||||
metadata.add_text("prompt", json.dumps(prompt))
|
||||
if extra_pnginfo is not None:
|
||||
for k, v in extra_pnginfo.items():
|
||||
metadata.add_text(k, json.dumps(v))
|
||||
|
||||
# Save the image with the determined filename and metadata
|
||||
img.save(filename, format="PNG", pnginfo=metadata)
|
||||
|
||||
# Write the number of the last image to a text file with leading zeroes
|
||||
with open("./output/api_next_image.txt", "w") as f:
|
||||
f.write(f"api_{counter+1:05d}.png")
|
||||
|
||||
print(f"Image saved as: {filename}")
|
||||
|
||||
return {"ui": {"images": [{"filename": filename, "type": "output"}]}}
|
||||
Reference in New Issue
Block a user