This commit is contained in:
justumen
2024-09-23 15:14:20 +02:00
parent c8dcb96652
commit 423947fc25
6 changed files with 82 additions and 17 deletions

View File

@@ -1,15 +1,26 @@
import ollama
from ollama import Client # pip install ollama
import logging
import hashlib
class ollamaLoader:
@classmethod
def get_available_models(cls):
try:
list_models = ollama.list()
# First try with 127.0.0.1
client = Client(host="http://127.0.0.1:11434")
list_models = client.list() # Assuming list() is part of the Client class
return [model['name'] for model in list_models['models']]
except Exception as e:
print(f"Error fetching models: {e}")
return ["dolphin-llama3"] # Return a default model if fetching fails
except Exception as e1:
print(f"Error fetching models from 127.0.0.1: {e1}")
try:
# Fallback to 0.0.0.0
client = Client(host="http://0.0.0.0:11434")
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e2:
print(f"Error fetching models from 0.0.0.0: {e2}")
return ["none"] # Return a default model if fetching fails
@classmethod
def INPUT_TYPES(cls):
@@ -22,6 +33,8 @@ class ollamaLoader:
"multiline": True,
"default": default_system_prompt
}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"keep_1min_in_vram": ("BOOLEAN", {"default": False})
}
}
@@ -30,14 +43,56 @@ class ollamaLoader:
FUNCTION = "connect_2_ollama"
CATEGORY = "Bjornulf"
def connect_2_ollama(self, user_prompt, selected_model, system_prompt):
def __init__(self):
self.last_content_hash = None
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed):
# Generate a hash of the current content
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()
# Check if the content has changed
if content_hash != self.last_content_hash:
# Content has changed, use the provided seed
self.last_content_hash = content_hash
else:
# Content hasn't changed, set seed to None to prevent randomization
seed = None
keep_alive_minutes = 0
if(keep_1min_in_vram):
keep_alive_minutes = 1
keep_alive = 0
client = Client(host="http://127.0.0.1:11434")
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive) + "m"
)
print("Ollama response : ", response['response'])
# client = Client(host="http://0.0.0.0:11434")
# response = client.generate(
# model=selected_model,
# system=system_prompt,
# prompt=user_prompt,
# keep_alive=str(keep_alive_minutes) + "m"
# )
try:
# First attempt with 127.0.0.1
client = Client(host="http://127.0.0.1:11434")
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive_minutes) + "m"
)
logging.info("Ollama response (127.0.0.1): " + response['response'])
except Exception as e:
logging.warning(f"Connection to 127.0.0.1 failed: {e}")
try:
# Fallback to 0.0.0.0 if 127.0.0.1 fails
client = Client(host="http://0.0.0.0:11434")
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive_minutes) + "m"
)
logging.info("Ollama response (0.0.0.0): " + response['response'])
except Exception as e:
logging.error(f"Connection to 0.0.0.0 also failed: {e}")
logging.info("Ollama response : " + response['response'])
return (response['response'],)