This commit is contained in:
justumen
2024-09-23 15:14:20 +02:00
parent c8dcb96652
commit 423947fc25
6 changed files with 82 additions and 17 deletions

View File

@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.42 🔗 # 🔗 Comfyui : Bjornulf_custom_nodes v0.43 🔗
# Coffee : ☕☕☕☕☕ 5/5 # Coffee : ☕☕☕☕☕ 5/5
@@ -71,6 +71,7 @@
## 🦙 AI 🦙 ## 🦙 AI 🦙
`19.` [🦙 Ollama](#19----ollama) `19.` [🦙 Ollama](#19----ollama)
`31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
## 🔊 Audio 🔊 ## 🔊 Audio 🔊
`31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language) `31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
@@ -211,6 +212,7 @@ cd /where/you/installed/ComfyUI && python main.py
- **v0.40**: Add variables management to Loop Advanced Write Text node. Add menu for all nodes to the README. - **v0.40**: Add variables management to Loop Advanced Write Text node. Add menu for all nodes to the README.
- **v0.41**: Two new nodes : image details and combine images. Also ❗ Big changes to the If-Else node. (+many minor changes) - **v0.41**: Two new nodes : image details and combine images. Also ❗ Big changes to the If-Else node. (+many minor changes)
- **v0.42**: Better README with category nodes, changes some node titles - **v0.42**: Better README with category nodes, changes some node titles
- **v0.43**: Add control_after_generate to Ollama and allow to keep in VRAM for 1 minute if needed. (For chaining quick generations.) Add fallback to 0.0.0.0
# 📝 Nodes descriptions # 📝 Nodes descriptions
@@ -367,12 +369,20 @@ Save image in a specific folder : `my_folder/00001.png`, `my_folder/00002.png`,
Also allow multiple nested folders, like for example : `animal/dog/small`. Also allow multiple nested folders, like for example : `animal/dog/small`.
## 19 - 🦙 Ollama ## 19 - 🦙 Ollama
![Show Text](screenshots/ollama.png) ![Ollama](screenshots/ollama_1.png)
**Description:** **Description:**
Will generate detailed text based of what you give it. Will generate detailed text based of what you give it.
I recommend using `mistral-nemo` if you can run it, but it's up to you. (Might have to tweak the system prompt a bit) I recommend using `mistral-nemo` if you can run it, but it's up to you. (Might have to tweak the system prompt a bit)
⚠️ Warning : Having an ollama node that will run for each generation might be a bit heavy on your VRAM. Think about if you really need it or not.
You also have `control_after_generate` to force the node to rerun for every workflow run. (Even if there is no modification of the node or its inputs.)
You have the option to keep in in you VRAM for a minute with `keep_1min_in_vram`. (If you plan having to generate many times with the same prompt)
Each run will be significantly faster, but not free your VRAM for something else.
![Ollama](screenshots/ollama_2.png)
⚠️ Warning : Using `keep_1min_in_vram` might be a bit heavy on your VRAM. Think about if you really need it or not. Most of the time, when using `keep_1min_in_vram`, you don't want to have also a generation of image or anything else in the same time.
## 20 - 📹 Video Ping Pong ## 20 - 📹 Video Ping Pong
![Video Ping Pong](screenshots/video_pingpong.png) ![Video Ping Pong](screenshots/video_pingpong.png)

View File

@@ -1,15 +1,26 @@
import ollama import ollama
from ollama import Client # pip install ollama from ollama import Client # pip install ollama
import logging
import hashlib
class ollamaLoader: class ollamaLoader:
@classmethod @classmethod
def get_available_models(cls): def get_available_models(cls):
try: try:
list_models = ollama.list() # First try with 127.0.0.1
client = Client(host="http://127.0.0.1:11434")
list_models = client.list() # Assuming list() is part of the Client class
return [model['name'] for model in list_models['models']] return [model['name'] for model in list_models['models']]
except Exception as e: except Exception as e1:
print(f"Error fetching models: {e}") print(f"Error fetching models from 127.0.0.1: {e1}")
return ["dolphin-llama3"] # Return a default model if fetching fails try:
# Fallback to 0.0.0.0
client = Client(host="http://0.0.0.0:11434")
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e2:
print(f"Error fetching models from 0.0.0.0: {e2}")
return ["none"] # Return a default model if fetching fails
@classmethod @classmethod
def INPUT_TYPES(cls): def INPUT_TYPES(cls):
@@ -22,6 +33,8 @@ class ollamaLoader:
"multiline": True, "multiline": True,
"default": default_system_prompt "default": default_system_prompt
}), }),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"keep_1min_in_vram": ("BOOLEAN", {"default": False})
} }
} }
@@ -30,14 +43,56 @@ class ollamaLoader:
FUNCTION = "connect_2_ollama" FUNCTION = "connect_2_ollama"
CATEGORY = "Bjornulf" CATEGORY = "Bjornulf"
def connect_2_ollama(self, user_prompt, selected_model, system_prompt): def __init__(self):
self.last_content_hash = None
def connect_2_ollama(self, user_prompt, selected_model, system_prompt, keep_1min_in_vram, seed):
# Generate a hash of the current content
content_hash = hashlib.md5((user_prompt + selected_model + system_prompt).encode()).hexdigest()
# Check if the content has changed
if content_hash != self.last_content_hash:
# Content has changed, use the provided seed
self.last_content_hash = content_hash
else:
# Content hasn't changed, set seed to None to prevent randomization
seed = None
keep_alive_minutes = 0
if(keep_1min_in_vram):
keep_alive_minutes = 1
keep_alive = 0 keep_alive = 0
client = Client(host="http://127.0.0.1:11434") # client = Client(host="http://0.0.0.0:11434")
response = client.generate( # response = client.generate(
model=selected_model, # model=selected_model,
system=system_prompt, # system=system_prompt,
prompt=user_prompt, # prompt=user_prompt,
keep_alive=str(keep_alive) + "m" # keep_alive=str(keep_alive_minutes) + "m"
) # )
print("Ollama response : ", response['response']) try:
# First attempt with 127.0.0.1
client = Client(host="http://127.0.0.1:11434")
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive_minutes) + "m"
)
logging.info("Ollama response (127.0.0.1): " + response['response'])
except Exception as e:
logging.warning(f"Connection to 127.0.0.1 failed: {e}")
try:
# Fallback to 0.0.0.0 if 127.0.0.1 fails
client = Client(host="http://0.0.0.0:11434")
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive_minutes) + "m"
)
logging.info("Ollama response (0.0.0.0): " + response['response'])
except Exception as e:
logging.error(f"Connection to 0.0.0.0 also failed: {e}")
logging.info("Ollama response : " + response['response'])
return (response['response'],) return (response['response'],)

View File

@@ -1,7 +1,7 @@
[project] [project]
name = "bjornulf_custom_nodes" name = "bjornulf_custom_nodes"
description = "Nodes: Ollama, Text to Speech, Combine Texts, Random Texts, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images, Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..." description = "Nodes: Ollama, Text to Speech, Combine Texts, Random Texts, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images, Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..."
version = "0.42" version = "0.43"
license = {file = "LICENSE"} license = {file = "LICENSE"}
[project.urls] [project.urls]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 355 KiB

BIN
screenshots/ollama_1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 180 KiB

BIN
screenshots/ollama_2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 151 KiB