mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-21 12:42:11 -03:00
0.56
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.54 🔗
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.56 🔗
|
||||
|
||||
A list of 61 custom nodes for Comfyui : Display, manipulate, and edit text, images, videos, loras and more.
|
||||
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
|
||||
@@ -262,8 +262,9 @@ cd /where/you/installed/ComfyUI && python main.py
|
||||
- **v0.49**: New node : Loop Sequential (Integer) - Loop through a range of integer values. (But once per workflow run), audio sync is smarter and adapt the video duration to the audio duration.
|
||||
- **v0.50**: allow audio in Images to Video path (tmp video). Add three new nodes : Concat Videos, combine video/audio and Loop Sequential (input Lines). save text changes to write inside Comfyui folder. Fix random line from input outputing LIST. ❗ Breaking change to audio/video sync node, allowing different types as input.
|
||||
- **v0.51**: Fix some issues with audio/video sync node. Add two new nodes : merge images/videos vertical and horizontal. add requirements.txt and ollama_ip.txt
|
||||
- **v0.52-53**: Rever name git to Bjornulf_custom_nodes, match registry comfy
|
||||
- **v0.52-53**: Revert name git to Bjornulf_custom_nodes, match registry comfy
|
||||
- **v0.54-55**: add opencv-python to requirements.txt
|
||||
- **0.56**: ❗Breaking changes : ollama node simplified, no ollama_ip.txt needed, waiting for collection ollama nodes to be ready.
|
||||
|
||||
# 📝 Nodes descriptions
|
||||
|
||||
|
||||
154
ollama.py
154
ollama.py
@@ -1,51 +1,64 @@
|
||||
import ollama
|
||||
from ollama import Client # pip install ollama
|
||||
from ollama import Client
|
||||
import logging
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
class ollamaLoader:
|
||||
@classmethod
|
||||
def read_host_from_file(cls, filename='ollama_ip.txt'):
|
||||
try:
|
||||
# Get the directory where the script is located
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
file_path = os.path.join(script_dir, filename)
|
||||
# _available_models = None # Class variable to cache models
|
||||
|
||||
# Print the constructed file path for verification
|
||||
print(f"Looking for file at: {file_path}")
|
||||
# @classmethod
|
||||
# def read_host_from_file(cls, filename='ollama_ip.txt'):
|
||||
# try:
|
||||
# script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
# file_path = os.path.join(script_dir, filename)
|
||||
# print(f"Looking for file at: {file_path}")
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
host = f.read().strip()
|
||||
if host:
|
||||
logging.info(f"Using host from {file_path}: {host}")
|
||||
return host
|
||||
else:
|
||||
logging.warning(f"{file_path} is empty. Falling back to default hosts.")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to read host from {file_path}: {e}")
|
||||
return None # Return None if reading fails
|
||||
# with open(file_path, 'r') as f:
|
||||
# host = f.read().strip()
|
||||
# if host:
|
||||
# logging.info(f"Using host from {file_path}: {host}")
|
||||
# return host
|
||||
# else:
|
||||
# logging.warning(f"{file_path} is empty. Falling back to default hosts.")
|
||||
# except Exception as e:
|
||||
# logging.error(f"Failed to read host from {file_path}: {e}")
|
||||
# return None
|
||||
|
||||
@classmethod
|
||||
def get_available_models(cls):
|
||||
host = cls.read_host_from_file()
|
||||
if host:
|
||||
try:
|
||||
client = Client(host=host)
|
||||
list_models = client.list()
|
||||
return [model['name'] for model in list_models['models']]
|
||||
except Exception as e:
|
||||
logging.error(f"Error fetching models from {host}: {e}")
|
||||
# @classmethod
|
||||
# def get_available_models(cls):
|
||||
# # Return cached models if available
|
||||
# if cls._available_models is not None:
|
||||
# return cls._available_models
|
||||
|
||||
# Fallback to default hosts if reading from file fails
|
||||
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
|
||||
try:
|
||||
client = Client(host=default_host)
|
||||
list_models = client.list()
|
||||
return [model['name'] for model in list_models['models']]
|
||||
except Exception as e:
|
||||
logging.error(f"Error fetching models from {default_host}: {e}")
|
||||
return ["none"] # Return a default model if fetching fails
|
||||
# models = ["none"] # Default fallback
|
||||
# host = cls.read_host_from_file()
|
||||
|
||||
# def try_connect(host_url):
|
||||
# try:
|
||||
# client = Client(host=host_url)
|
||||
# list_models = client.list()
|
||||
# return [model['name'] for model in list_models['models']]
|
||||
# except Exception as e:
|
||||
# logging.error(f"Error fetching models from {host_url}: {e}")
|
||||
# return None
|
||||
|
||||
# # Try user-specified host first
|
||||
# if host:
|
||||
# result = try_connect(host)
|
||||
# if result:
|
||||
# models = result
|
||||
|
||||
# # Try default hosts if necessary
|
||||
# if models == ["none"]:
|
||||
# for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
|
||||
# result = try_connect(default_host)
|
||||
# if result:
|
||||
# models = result
|
||||
# break
|
||||
|
||||
# cls._available_models = models # Cache the results
|
||||
# return models
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
@@ -59,10 +72,12 @@ class ollamaLoader:
|
||||
"yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, "
|
||||
"very large nose."
|
||||
)
|
||||
# Lazy load models only when the node is actually used
|
||||
return {
|
||||
"required": {
|
||||
"user_prompt": ("STRING", {"multiline": True}),
|
||||
"selected_model": (cls.get_available_models(),),
|
||||
"selected_model": ("STRING", {"default": "llama3.2:1b"} ), # Default to none, will be populated when node is used
|
||||
"ollama_url": ("STRING", {"default": "http://0.0.0.0:11434"}),
|
||||
"system_prompt": ("STRING", {
|
||||
"multiline": True,
|
||||
"default": default_system_prompt
|
||||
@@ -79,50 +94,47 @@ class ollamaLoader:
|
||||
|
||||
def __init__(self):
|
||||
self.last_content_hash = None
|
||||
# # Update available models when the node is actually instantiated
|
||||
# self.__class__._available_models = self.get_available_models()
|
||||
|
||||
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed):
|
||||
# Generate a hash of the current content
|
||||
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram,ollama_url, seed):
|
||||
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()
|
||||
|
||||
# Check if the content has changed
|
||||
if content_hash != self.last_content_hash:
|
||||
# Content has changed, use the provided seed
|
||||
self.last_content_hash = content_hash
|
||||
else:
|
||||
# Content hasn't changed, set seed to None to prevent randomization
|
||||
seed = None
|
||||
|
||||
keep_alive_minutes = 1 if keep_1min_in_vram else 0
|
||||
# host = self.read_host_from_file()
|
||||
host = ollama_url
|
||||
|
||||
host = self.read_host_from_file()
|
||||
def try_generate(host_url):
|
||||
try:
|
||||
client = Client(host=host_url)
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
system=system_prompt,
|
||||
prompt=user_prompt,
|
||||
keep_alive=f"{keep_alive_minutes}m"
|
||||
)
|
||||
logging.info(f"Ollama response ({host_url}): {response['response']}")
|
||||
return response['response']
|
||||
except Exception as e:
|
||||
logging.error(f"Connection to {host_url} failed: {e}")
|
||||
return None
|
||||
|
||||
# Try user-specified host first
|
||||
if host:
|
||||
try:
|
||||
client = Client(host=host)
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
system=system_prompt,
|
||||
prompt=user_prompt,
|
||||
keep_alive=f"{keep_alive_minutes}m"
|
||||
)
|
||||
logging.info(f"Ollama response ({host}): {response['response']}")
|
||||
return (response['response'],)
|
||||
except Exception as e:
|
||||
logging.error(f"Connection to {host} failed: {e}")
|
||||
result = try_generate(host)
|
||||
if result:
|
||||
return (result,)
|
||||
|
||||
# Fallback to default hosts if reading from file fails
|
||||
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
|
||||
try:
|
||||
client = Client(host=default_host)
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
system=system_prompt,
|
||||
prompt=user_prompt,
|
||||
keep_alive=f"{keep_alive_minutes}m"
|
||||
)
|
||||
logging.info(f"Ollama response ({default_host}): {response['response']}")
|
||||
return (response['response'],)
|
||||
except Exception as e:
|
||||
logging.error(f"Connection to {default_host} failed: {e}")
|
||||
# Try default hosts
|
||||
# for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
|
||||
# result = try_generate(default_host)
|
||||
# if result:
|
||||
# return (result,)
|
||||
|
||||
logging.error("All connection attempts failed.")
|
||||
return ("Connection to Ollama failed.",)
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "bjornulf_custom_nodes"
|
||||
description = "61 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech."
|
||||
version = "0.55"
|
||||
version = "0.56"
|
||||
license = {file = "LICENSE"}
|
||||
|
||||
[project.urls]
|
||||
|
||||
Reference in New Issue
Block a user