This commit is contained in:
justumen
2024-09-25 10:02:46 +02:00
parent 78ee607a12
commit d48bcd250a
5 changed files with 87 additions and 50 deletions

View File

@@ -1,4 +1,4 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.43 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v0.44 🔗
# Coffee : ☕☕☕☕☕ 5/5
@@ -29,7 +29,7 @@
`27.` [♻ Loop (All Lines from input)](#27----loop-all-lines-from-input)
`33.` [♻ Loop (All Lines from input 🔗 combine by lines)](#33----loop-all-lines-from-input--combine-by-lines)
`38.` [♻🖼 Loop (Images)](#38----loop-images)
`39.` [♻ Loop (✒🗔 Advanced Write Text)](#39----loop--advanced-write-text)
`39.` [♻ Loop (✒🗔 Advanced Write Text + 🅰️ variables)](#39----loop--advanced-write-text)
`42.` [♻ Loop (Model+Clip+Vae) - aka Checkpoint / Model](#42----loop-modelclipvae---aka-checkpoint--model)
## 🎲 Randomization 🎲
@@ -213,6 +213,7 @@ cd /where/you/installed/ComfyUI && python main.py
- **v0.41**: Two new nodes : image details and combine images. Also ❗ Big changes to the If-Else node. (+many minor changes)
- **v0.42**: Better README with category nodes, changes some node titles
- **v0.43**: Add control_after_generate to Ollama and allow to keep in VRAM for 1 minute if needed. (For chaining quick generations.) Add fallback to 0.0.0.0
- **v0.44**: Allow ollama to have a cusom url in the file `ollama_ip.txt` in the comfyui custom nodes folder. Minor changes, add details/updates to README.
# 📝 Nodes descriptions
@@ -384,6 +385,8 @@ Each run will be significantly faster, but not free your VRAM for something else
⚠️ Warning : Using `keep_1min_in_vram` might be a bit heavy on your VRAM. Think about if you really need it or not. Most of the time, when using `keep_1min_in_vram`, you don't want to have also a generation of image or anything else in the same time.
⚠️ You can create a file called `ollama_ip.txt` in my comfyui custom node folder if you have a special IP for your ollama server, mine is : `http://192.168.1.37:11434`
## 20 - 📹 Video Ping Pong
![Video Ping Pong](screenshots/video_pingpong.png)
@@ -733,9 +736,13 @@ But for example, if you want to use my node `select an image, pick`, you need to
![combine images](screenshots/combine_images_2.png)
You can notice that there is no visible difference when you use `all_in_one` with `preview image` node. (this is why I added the `show text` node, not that show text will make it blue, because it's an image/tensor.)
You can notice that there is no visible difference when you use `all_in_one` with `preview image` node. (this is why I added the `show text` node, note that show text will make it blue, because it's an image/tensor.)
When you use `combine image` node, you can actually also send many images at once, it will combine them all.
Here is an example with `Load images from folder` node, `Image details` node and `Combine images` node. (Of course it can't have `all_in_one` set to True in this situation because the images have different resolutions) :
![combine images](screenshots/combine_images_3.png)
![combine images](screenshots/combine_images_3.png)
Here another simple example taking a few selected images from a folder and combining them (For later processing for example) :
![combine images](screenshots/combine_images_4.png)

120
ollama.py
View File

@@ -2,29 +2,63 @@ import ollama
from ollama import Client # pip install ollama
import logging
import hashlib
import os
class ollamaLoader:
@classmethod
def get_available_models(cls):
def read_host_from_file(cls, filename='ollama_ip.txt'):
try:
# First try with 127.0.0.1
client = Client(host="http://127.0.0.1:11434")
list_models = client.list() # Assuming list() is part of the Client class
return [model['name'] for model in list_models['models']]
except Exception as e1:
print(f"Error fetching models from 127.0.0.1: {e1}")
# Get the directory where the script is located
script_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(script_dir, filename)
# Print the constructed file path for verification
print(f"Looking for file at: {file_path}")
with open(file_path, 'r') as f:
host = f.read().strip()
if host:
logging.info(f"Using host from {file_path}: {host}")
return host
else:
logging.warning(f"{file_path} is empty. Falling back to default hosts.")
except Exception as e:
logging.error(f"Failed to read host from {file_path}: {e}")
return None # Return None if reading fails
@classmethod
def get_available_models(cls):
host = cls.read_host_from_file()
if host:
try:
# Fallback to 0.0.0.0
client = Client(host="http://0.0.0.0:11434")
client = Client(host=host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e2:
print(f"Error fetching models from 0.0.0.0: {e2}")
return ["none"] # Return a default model if fetching fails
except Exception as e:
logging.error(f"Error fetching models from {host}: {e}")
# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
list_models = client.list()
return [model['name'] for model in list_models['models']]
except Exception as e:
logging.error(f"Error fetching models from {default_host}: {e}")
return ["none"] # Return a default model if fetching fails
@classmethod
def INPUT_TYPES(cls):
default_system_prompt = "Describe a specific example of an object, animal, person, or landscape based on a given general idea. Start with a clear and concise overall description in the first sentence. Then, provide a detailed depiction of its physical features, focusing on colors, size, clothing, eyes, and other distinguishing characteristics. Use commas to separate each detail and avoid listing them. Ensure each description is vivid, precise, and specific to one unique instance of the subject. Refrain from using poetic language and giving it a name.\nExample input: man\n Example output: \nAn overweight old man sitting on a bench, wearing a blue hat, yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, very large nose."
default_system_prompt = (
"Describe a specific example of an object, animal, person, or landscape based on a given general idea. "
"Start with a clear and concise overall description in the first sentence. Then, provide a detailed depiction "
"of its physical features, focusing on colors, size, clothing, eyes, and other distinguishing characteristics. "
"Use commas to separate each detail and avoid listing them. Ensure each description is vivid, precise, and "
"specific to one unique instance of the subject. Refrain from using poetic language and giving it a name.\n"
"Example input: man\n Example output: \nAn overweight old man sitting on a bench, wearing a blue hat, "
"yellow pants, orange jacket and black shirt, sunglasses, very long beard, very pale skin, long white hair, "
"very large nose."
)
return {
"required": {
"user_prompt": ("STRING", {"multiline": True}),
@@ -57,42 +91,38 @@ class ollamaLoader:
else:
# Content hasn't changed, set seed to None to prevent randomization
seed = None
keep_alive_minutes = 0
if(keep_1min_in_vram):
keep_alive_minutes = 1
keep_alive = 0
# client = Client(host="http://0.0.0.0:11434")
# response = client.generate(
# model=selected_model,
# system=system_prompt,
# prompt=user_prompt,
# keep_alive=str(keep_alive_minutes) + "m"
# )
try:
# First attempt with 127.0.0.1
client = Client(host="http://127.0.0.1:11434")
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive_minutes) + "m"
)
logging.info("Ollama response (127.0.0.1): " + response['response'])
except Exception as e:
logging.warning(f"Connection to 127.0.0.1 failed: {e}")
keep_alive_minutes = 1 if keep_1min_in_vram else 0
host = self.read_host_from_file()
if host:
try:
# Fallback to 0.0.0.0 if 127.0.0.1 fails
client = Client(host="http://0.0.0.0:11434")
client = Client(host=host)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=str(keep_alive_minutes) + "m"
keep_alive=f"{keep_alive_minutes}m"
)
logging.info("Ollama response (0.0.0.0): " + response['response'])
logging.info(f"Ollama response ({host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to 0.0.0.0 also failed: {e}")
logging.info("Ollama response : " + response['response'])
return (response['response'],)
logging.error(f"Connection to {host} failed: {e}")
# Fallback to default hosts if reading from file fails
for default_host in ["http://127.0.0.1:11434", "http://0.0.0.0:11434"]:
try:
client = Client(host=default_host)
response = client.generate(
model=selected_model,
system=system_prompt,
prompt=user_prompt,
keep_alive=f"{keep_alive_minutes}m"
)
logging.info(f"Ollama response ({default_host}): {response['response']}")
return (response['response'],)
except Exception as e:
logging.error(f"Connection to {default_host} failed: {e}")
logging.error("All connection attempts failed.")
return ("Connection to Ollama failed.",)

View File

@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "Nodes: Ollama, Text to Speech, Combine Texts, Random Texts, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images, Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..."
version = "0.43"
version = "0.44"
license = {file = "LICENSE"}
[project.urls]

Binary file not shown.

After

Width:  |  Height:  |  Size: 553 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 100 KiB