This commit is contained in:
justumen
2024-11-11 05:37:44 +01:00
parent 4da1a2846e
commit 840e62d00c
3 changed files with 89 additions and 76 deletions

158
ollama.py
View File

@@ -1,51 +1,64 @@
import ollama
from ollama import Client # pip install ollama
from ollama import Client
import logging
import hashlib
import os
class ollamaLoader:
@classmethod
def read_host_from_file(cls, filename='ollama_ip.txt'):
try:
# Get the directory where the script is located
script_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(script_dir, filename)
# _available_models = None # Class variable to cache models
# @classmethod
# def read_host_from_file(cls, filename='ollama_ip.txt'):
# try:
# script_dir = os.path.dirname(os.path.realpath(__file__))
# file_path = os.path.join(script_dir, filename)
# print(f"Looking for file at: {file_path}")
# with open(file_path, 'r') as f:
# host = f.read().strip()
# if host:
# logging.info(f"Using host from {file_path}: {host}")
# return host
# else:
# logging.warning(f"{file_path} is empty. Falling back to default hosts.")
# except Exception as e:
# logging.error(f"Failed to read host from {file_path}: {e}")
# return None
# Print the constructed file path for verification
print(f"Looking for file at: {file_path}")
# @classmethod
# def get_available_models(cls):
# # Return cached models if available
# if cls._available_models is not None:
# return cls._available_models
# models = ["none"] # Default fallback
# host = cls.read_host_from_file()
# def try_connect(host_url):
# try:
# client = Client(host=host_url)
# list_models = client.list()
# return [model['name'] for model in list_models['models']]
# except Exception as e:
# logging.error(f"Error fetching models from {host_url}: {e}")
# return None
with open(file_path, 'r') as f:
host = f.read().strip()
if host:
logging.info(f"Using host from {file_path}: {host}")
return host
else:
logging.warning(f"{file_path} is empty. Falling back to default hosts.")
except Exception as e:
logging.error(f"Failed to read host from {file_path}: {e}")
return None # Return None if reading fails
# # Try user-specified host first
# if host:
# result = try_connect(host)
# if result:
# models = result
@classmethod
def get_available_models(cls):
host = cls.read_host_from_file()
if host:
try:
client = Client(host=host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e:
logging.error(f"Error fetching models from {host}: {e}")
# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e:
logging.error(f"Error fetching models from {default_host}: {e}")
return ["none"] # Return a default model if fetching fails
# # Try default hosts if necessary
# if models == ["none"]:
# for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
# result = try_connect(default_host)
# if result:
# models = result
# break
# cls._available_models = models # Cache the results
# return models
@classmethod
def INPUT_TYPES(cls):
@@ -59,10 +72,12 @@ class ollamaLoader:
"yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, "
"very large nose."
)
# Lazy load models only when the node is actually used
return {
"required": {
"user_prompt": ("STRING", {"multiline": True}),
"selected_model": (cls.get_available_models(),),
"selected_model": ("STRING", {"default": "llama3.2:1b"} ), # Default to none, will be populated when node is used
"ollama_url": ("STRING", {"default": "http://0.0.0.0:11434"}),
"system_prompt": ("STRING", {
"multiline": True,
"default": default_system_prompt
@@ -79,50 +94,47 @@ class ollamaLoader:
def __init__(self):
self.last_content_hash = None
# # Update available models when the node is actually instantiated
# self.__class__._available_models = self.get_available_models()
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed):
# Generate a hash of the current content
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram,ollama_url, seed):
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()
# Check if the content has changed
if content_hash != self.last_content_hash:
# Content has changed, use the provided seed
self.last_content_hash = content_hash
else:
# Content hasn't changed, set seed to None to prevent randomization
seed = None
keep_alive_minutes = 1 if keep_1min_in_vram else 0
# host = self.read_host_from_file()
host = ollama_url
def try_generate(host_url):
try:
client = Client(host=host_url)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({host_url}): {response['response']}")
return response['response']
except Exception as e:
logging.error(f"Connection to {host_url} failed: {e}")
return None
host = self.read_host_from_file()
# Try user-specified host first
if host:
try:
client = Client(host=host)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to {host} failed: {e}")
result = try_generate(host)
if result:
return (result,)
# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({default_host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to {default_host} failed: {e}")
# Try default hosts
# for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
# result = try_generate(default_host)
# if result:
# return (result,)
logging.error("All connection attempts failed.")
return ("Connection to Ollama failed.",)
return ("Connection to Ollama failed.",)