mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-21 20:52:11 -03:00
0.43
This commit is contained in:
16
README.md
16
README.md
@@ -1,4 +1,4 @@
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.42 🔗
|
||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.43 🔗
|
||||
|
||||
# Coffee : ☕☕☕☕☕ 5/5
|
||||
|
||||
@@ -71,6 +71,7 @@
|
||||
|
||||
## 🦙 AI 🦙
|
||||
`19.` [🦙 Ollama](#19----ollama)
|
||||
`31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
|
||||
|
||||
## 🔊 Audio 🔊
|
||||
`31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
|
||||
@@ -211,6 +212,7 @@ cd /where/you/installed/ComfyUI && python main.py
|
||||
- **v0.40**: Add variables management to Loop Advanced Write Text node. Add menu for all nodes to the README.
|
||||
- **v0.41**: Two new nodes : image details and combine images. Also ❗ Big changes to the If-Else node. (+many minor changes)
|
||||
- **v0.42**: Better README with category nodes, changes some node titles
|
||||
- **v0.43**: Add control_after_generate to Ollama and allow to keep in VRAM for 1 minute if needed. (For chaining quick generations.) Add fallback to 0.0.0.0
|
||||
|
||||
# 📝 Nodes descriptions
|
||||
|
||||
@@ -367,12 +369,20 @@ Save image in a specific folder : `my_folder/00001.png`, `my_folder/00002.png`,
|
||||
Also allow multiple nested folders, like for example : `animal/dog/small`.
|
||||
|
||||
## 19 - 🦙 Ollama
|
||||

|
||||

|
||||
|
||||
**Description:**
|
||||
Will generate detailed text based of what you give it.
|
||||
I recommend using `mistral-nemo` if you can run it, but it's up to you. (Might have to tweak the system prompt a bit)
|
||||
⚠️ Warning : Having an ollama node that will run for each generation might be a bit heavy on your VRAM. Think about if you really need it or not.
|
||||
|
||||
You also have `control_after_generate` to force the node to rerun for every workflow run. (Even if there is no modification of the node or its inputs.)
|
||||
|
||||
You have the option to keep in in you VRAM for a minute with `keep_1min_in_vram`. (If you plan having to generate many times with the same prompt)
|
||||
Each run will be significantly faster, but not free your VRAM for something else.
|
||||
|
||||

|
||||
|
||||
⚠️ Warning : Using `keep_1min_in_vram` might be a bit heavy on your VRAM. Think about if you really need it or not. Most of the time, when using `keep_1min_in_vram`, you don't want to have also a generation of image or anything else in the same time.
|
||||
|
||||
## 20 - 📹 Video Ping Pong
|
||||

|
||||
|
||||
81
ollama.py
81
ollama.py
@@ -1,15 +1,26 @@
|
||||
import ollama
|
||||
from ollama import Client # pip install ollama
|
||||
import logging
|
||||
import hashlib
|
||||
|
||||
class ollamaLoader:
|
||||
@classmethod
|
||||
def get_available_models(cls):
|
||||
try:
|
||||
list_models = ollama.list()
|
||||
# First try with 127.0.0.1
|
||||
client = Client(host="http://127.0.0.1:11434")
|
||||
list_models = client.list() # Assuming list() is part of the Client class
|
||||
return [model['name'] for model in list_models['models']]
|
||||
except Exception as e:
|
||||
print(f"Error fetching models: {e}")
|
||||
return ["dolphin-llama3"] # Return a default model if fetching fails
|
||||
except Exception as e1:
|
||||
print(f"Error fetching models from 127.0.0.1: {e1}")
|
||||
try:
|
||||
# Fallback to 0.0.0.0
|
||||
client = Client(host="http://0.0.0.0:11434")
|
||||
list_models = client.list()
|
||||
return [model['name'] for model in list_models['models']]
|
||||
except Exception as e2:
|
||||
print(f"Error fetching models from 0.0.0.0: {e2}")
|
||||
return ["none"] # Return a default model if fetching fails
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
@@ -22,6 +33,8 @@ class ollamaLoader:
|
||||
"multiline": True,
|
||||
"default": default_system_prompt
|
||||
}),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"keep_1min_in_vram": ("BOOLEAN", {"default": False})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,14 +43,56 @@ class ollamaLoader:
|
||||
FUNCTION = "connect_2_ollama"
|
||||
CATEGORY = "Bjornulf"
|
||||
|
||||
def connect_2_ollama(self, user_prompt, selected_model, system_prompt):
|
||||
def __init__(self):
|
||||
self.last_content_hash = None
|
||||
|
||||
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed):
|
||||
# Generate a hash of the current content
|
||||
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()
|
||||
|
||||
# Check if the content has changed
|
||||
if content_hash != self.last_content_hash:
|
||||
# Content has changed, use the provided seed
|
||||
self.last_content_hash = content_hash
|
||||
else:
|
||||
# Content hasn't changed, set seed to None to prevent randomization
|
||||
seed = None
|
||||
|
||||
keep_alive_minutes = 0
|
||||
if(keep_1min_in_vram):
|
||||
keep_alive_minutes = 1
|
||||
|
||||
keep_alive = 0
|
||||
client = Client(host="http://127.0.0.1:11434")
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
system=system_prompt,
|
||||
prompt=user_prompt,
|
||||
keep_alive=str(keep_alive) + "m"
|
||||
)
|
||||
print("Ollama response : ", response['response'])
|
||||
# client = Client(host="http://0.0.0.0:11434")
|
||||
# response = client.generate(
|
||||
# model=selected_model,
|
||||
# system=system_prompt,
|
||||
# prompt=user_prompt,
|
||||
# keep_alive=str(keep_alive_minutes) + "m"
|
||||
# )
|
||||
try:
|
||||
# First attempt with 127.0.0.1
|
||||
client = Client(host="http://127.0.0.1:11434")
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
system=system_prompt,
|
||||
prompt=user_prompt,
|
||||
keep_alive=str(keep_alive_minutes) + "m"
|
||||
)
|
||||
logging.info("Ollama response (127.0.0.1): " + response['response'])
|
||||
except Exception as e:
|
||||
logging.warning(f"Connection to 127.0.0.1 failed: {e}")
|
||||
try:
|
||||
# Fallback to 0.0.0.0 if 127.0.0.1 fails
|
||||
client = Client(host="http://0.0.0.0:11434")
|
||||
response = client.generate(
|
||||
model=selected_model,
|
||||
system=system_prompt,
|
||||
prompt=user_prompt,
|
||||
keep_alive=str(keep_alive_minutes) + "m"
|
||||
)
|
||||
logging.info("Ollama response (0.0.0.0): " + response['response'])
|
||||
except Exception as e:
|
||||
logging.error(f"Connection to 0.0.0.0 also failed: {e}")
|
||||
logging.info("Ollama response : " + response['response'])
|
||||
return (response['response'],)
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "bjornulf_custom_nodes"
|
||||
description = "Nodes: Ollama, Text to Speech, Combine Texts, Random Texts, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images, Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..."
|
||||
version = "0.42"
|
||||
version = "0.43"
|
||||
license = {file = "LICENSE"}
|
||||
|
||||
[project.urls]
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 355 KiB |
BIN
screenshots/ollama_1.png
Normal file
BIN
screenshots/ollama_1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 180 KiB |
BIN
screenshots/ollama_2.png
Normal file
BIN
screenshots/ollama_2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 151 KiB |
Reference in New Issue
Block a user