This commit is contained in:
justumen
2024-11-11 05:37:44 +01:00
parent 4da1a2846e
commit 840e62d00c
3 changed files with 89 additions and 76 deletions

View File

@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.54 🔗 # 🔗 Comfyui : Bjornulf_custom_nodes v0.56 🔗
A list of 61 custom nodes for Comfyui : Display, manipulate, and edit text, images, videos, loras and more. A list of 61 custom nodes for Comfyui : Display, manipulate, and edit text, images, videos, loras and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech. You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
@@ -262,8 +262,9 @@ cd /where/you/installed/ComfyUI && python main.py
- **v0.49**: New node : Loop Sequential (Integer) - Loop through a range of integer values. (But once per workflow run), audio sync is smarter and adapt the video duration to the audio duration. - **v0.49**: New node : Loop Sequential (Integer) - Loop through a range of integer values. (But once per workflow run), audio sync is smarter and adapt the video duration to the audio duration.
- **v0.50**: allow audio in Images to Video path (tmp video). Add three new nodes : Concat Videos, combine video/audio and Loop Sequential (input Lines). save text changes to write inside Comfyui folder. Fix random line from input outputing LIST. ❗ Breaking change to audio/video sync node, allowing different types as input. - **v0.50**: allow audio in Images to Video path (tmp video). Add three new nodes : Concat Videos, combine video/audio and Loop Sequential (input Lines). save text changes to write inside Comfyui folder. Fix random line from input outputing LIST. ❗ Breaking change to audio/video sync node, allowing different types as input.
- **v0.51**: Fix some issues with audio/video sync node. Add two new nodes : merge images/videos vertical and horizontal. add requirements.txt and ollama_ip.txt - **v0.51**: Fix some issues with audio/video sync node. Add two new nodes : merge images/videos vertical and horizontal. add requirements.txt and ollama_ip.txt
- **v0.52-53**: Rever name git to Bjornulf_custom_nodes, match registry comfy - **v0.52-53**: Revert name git to Bjornulf_custom_nodes, match registry comfy
- **v0.54-55**: add opencv-python to requirements.txt - **v0.54-55**: add opencv-python to requirements.txt
- **0.56**: ❗Breaking changes : ollama node simplified, no ollama_ip.txt needed, waiting for collection ollama nodes to be ready.
# 📝 Nodes descriptions # 📝 Nodes descriptions

158
ollama.py
View File

@@ -1,51 +1,64 @@
import ollama import ollama
from ollama import Client # pip install ollama from ollama import Client
import logging import logging
import hashlib import hashlib
import os import os
class ollamaLoader: class ollamaLoader:
@classmethod # _available_models = None # Class variable to cache models
def read_host_from_file(cls, filename='ollama_ip.txt'):
try: # @classmethod
# Get the directory where the script is located # def read_host_from_file(cls, filename='ollama_ip.txt'):
script_dir = os.path.dirname(os.path.realpath(__file__)) # try:
file_path = os.path.join(script_dir, filename) # script_dir = os.path.dirname(os.path.realpath(__file__))
# file_path = os.path.join(script_dir, filename)
# print(f"Looking for file at: {file_path}")
# with open(file_path, 'r') as f:
# host = f.read().strip()
# if host:
# logging.info(f"Using host from {file_path}: {host}")
# return host
# else:
# logging.warning(f"{file_path} is empty. Falling back to default hosts.")
# except Exception as e:
# logging.error(f"Failed to read host from {file_path}: {e}")
# return None
# Print the constructed file path for verification # @classmethod
print(f"Looking for file at: {file_path}") # def get_available_models(cls):
# # Return cached models if available
# if cls._available_models is not None:
# return cls._available_models
# models = ["none"] # Default fallback
# host = cls.read_host_from_file()
# def try_connect(host_url):
# try:
# client = Client(host=host_url)
# list_models = client.list()
# return [model['name'] for model in list_models['models']]
# except Exception as e:
# logging.error(f"Error fetching models from {host_url}: {e}")
# return None
with open(file_path, 'r') as f: # # Try user-specified host first
host = f.read().strip() # if host:
if host: # result = try_connect(host)
logging.info(f"Using host from {file_path}: {host}") # if result:
return host # models = result
else:
logging.warning(f"{file_path} is empty. Falling back to default hosts.")
except Exception as e:
logging.error(f"Failed to read host from {file_path}: {e}")
return None # Return None if reading fails
@classmethod # # Try default hosts if necessary
def get_available_models(cls): # if models == ["none"]:
host = cls.read_host_from_file() # for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
if host: # result = try_connect(default_host)
try: # if result:
client = Client(host=host) # models = result
list_models = client.list() # break
return [model['name'] for model in list_models['models']]
except Exception as e: # cls._available_models = models # Cache the results
logging.error(f"Error fetching models from {host}: {e}") # return models
# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e:
logging.error(f"Error fetching models from {default_host}: {e}")
return ["none"] # Return a default model if fetching fails
@classmethod @classmethod
def INPUT_TYPES(cls): def INPUT_TYPES(cls):
@@ -59,10 +72,12 @@ class ollamaLoader:
"yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, " "yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, "
"very large nose." "very large nose."
) )
# Lazy load models only when the node is actually used
return { return {
"required": { "required": {
"user_prompt": ("STRING", {"multiline": True}), "user_prompt": ("STRING", {"multiline": True}),
"selected_model": (cls.get_available_models(),), "selected_model": ("STRING", {"default": "llama3.2:1b"} ), # Default to none, will be populated when node is used
"ollama_url": ("STRING", {"default": "http://0.0.0.0:11434"}),
"system_prompt": ("STRING", { "system_prompt": ("STRING", {
"multiline": True, "multiline": True,
"default": default_system_prompt "default": default_system_prompt
@@ -79,50 +94,47 @@ class ollamaLoader:
def __init__(self): def __init__(self):
self.last_content_hash = None self.last_content_hash = None
# # Update available models when the node is actually instantiated
# self.__class__._available_models = self.get_available_models()
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed): def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram,ollama_url, seed):
# Generate a hash of the current content
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest() content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()
# Check if the content has changed
if content_hash != self.last_content_hash: if content_hash != self.last_content_hash:
# Content has changed, use the provided seed
self.last_content_hash = content_hash self.last_content_hash = content_hash
else: else:
# Content hasn't changed, set seed to None to prevent randomization
seed = None seed = None
keep_alive_minutes = 1 if keep_1min_in_vram else 0 keep_alive_minutes = 1 if keep_1min_in_vram else 0
# host = self.read_host_from_file()
host = ollama_url
def try_generate(host_url):
try:
client = Client(host=host_url)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({host_url}): {response['response']}")
return response['response']
except Exception as e:
logging.error(f"Connection to {host_url} failed: {e}")
return None
host = self.read_host_from_file() # Try user-specified host first
if host: if host:
try: result = try_generate(host)
client = Client(host=host) if result:
response = client.generate( return (result,)
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to {host} failed: {e}")
# Fallback to default hosts if reading from file fails # Try default hosts
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]: # for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try: # result = try_generate(default_host)
client = Client(host=default_host) # if result:
response = client.generate( # return (result,)
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({default_host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to {default_host} failed: {e}")
logging.error("All connection attempts failed.") logging.error("All connection attempts failed.")
return ("Connection to Ollama failed.",) return ("Connection to Ollama failed.",)

View File

@@ -1,7 +1,7 @@
[project] [project]
name = "bjornulf_custom_nodes" name = "bjornulf_custom_nodes"
description = "61 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech." description = "61 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech."
version = "0.55" version = "0.56"
license = {file = "LICENSE"} license = {file = "LICENSE"}
[project.urls] [project.urls]