This commit is contained in:
justumen
2024-11-22 12:30:00 +01:00
parent 840e62d00c
commit 0673c134d5
46 changed files with 1629 additions and 410 deletions

152
README.md
View File

@@ -1,6 +1,6 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.56 🔗
# 🔗 Comfyui : Bjornulf_custom_nodes v0.57 🔗
A list of 61 custom nodes for Comfyui : Display, manipulate, and edit text, images, videos, loras and more.
A list of 68 custom nodes for Comfyui : Display, manipulate, and edit text, images, videos, loras and more.
You can manage looping operations, generate randomized content, trigger logical conditions, pause and manually control your workflows and even work with external AI tools, like Ollama or Text To Speech.
# Coffee : ☕☕☕☕☕ 5/5
@@ -18,6 +18,7 @@ You can manage looping operations, generate randomized content, trigger logical
## 👁 Display and Show 👁
`1.` [👁 Show (Text, Int, Float)](#1----show-text-int-float)
`49.` [📹👁 Video Preview](#49----video-preview)
`68.` [🔢 Add line numbers](#)
## ✒ Text ✒
`2.` [✒ Write Text](#2----write-text)
@@ -84,6 +85,7 @@ You can manage looping operations, generate randomized content, trigger logical
`47.` [🖼 Combine Images](#47----combine-images)
`60.` [🖼🖼 Merge Images/Videos 📹📹 (Horizontally)](#60----merge-imagesvideos--horizontally)
`61.` [🖼🖼 Merge Images/Videos 📹📹 (Vertically)](#61----merge-imagesvideos--vertically)
`62.` [🦙👁 Ollama Vision](#)
## 🚀 Load checkpoints 🚀
`40.` [🎲 Random (Model+Clip+Vae) - aka Checkpoint / Model](#40----random-modelclipvae---aka-checkpoint--model)
@@ -106,13 +108,19 @@ You can manage looping operations, generate randomized content, trigger logical
`59.` [📹🔊 Combine Video + Audio](#59----combine-video--audio)
`60.` [🖼🖼 Merge Images/Videos 📹📹 (Horizontally)](#60----merge-imagesvideos--horizontally)
`61.` [🖼🖼 Merge Images/Videos 📹📹 (Vertically)](#61----merge-imagesvideos--vertically)
`66.` [🔊➜📝 STT - Speech to Text](#)
## 🤖 AI 🤖
`19.` [🦙 Ollama](#19----ollama)
`31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
`19.` [🦙💬 Ollama Talk](#)
`62.` [🦙👁 Ollama Vision](#)
`63.` [🦙 Ollama Configuration ⚙](#)
`64.` [🦙 Ollama Job Selector 💼](#)
`65.` [🦙 Ollama Persona Selector 🧑](#)
`31.` [📝➜🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
`66.` [🔊➜📝 STT - Speech to Text](#)
## 🔊 Audio 🔊
`31.` [🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
`31.` [📝➜🔊 TTS - Text to Speech](#31----tts---text-to-speech-100-local-any-voice-you-want-any-language)
`52.` [🔊📹 Audio Video Sync](#52----audio-video-sync)
`59.` [📹🔊 Combine Video + Audio](#59----combine-video--audio)
@@ -162,6 +170,14 @@ For downloading from civitai (get token here <https://civitai.com/user/account>)
CIVITAI="8b275fada679ba5812b3da2bf35016f6"
wget --content-disposition -P /workspace/ComfyUI/models/checkpoints "https://civitai.com/api/download/models/272376?type=Model&format=SafeTensor&size=pruned&fp=fp16&token=$CIVITAI"
```
If you want to download for example the entire output folder, you can just compress it :
```
cd /workspace/ComfyUI/output && tar -czvf /workspace/output.tar.gz .
```
Then you can download it from the file manager JupyterLab.
If you have any issues with this template from Runpod, please let me know, I'm here to help. 😊
# 🏗 Dependencies (nothing to do for runpod ☁)
@@ -265,6 +281,7 @@ cd /where/you/installed/ComfyUI && python main.py
- **v0.52-53**: Revert name git to Bjornulf_custom_nodes, match registry comfy
- **v0.54-55**: add opencv-python to requirements.txt
- **0.56**: ❗Breaking changes : ollama node simplified, no ollama_ip.txt needed, waiting for collection ollama nodes to be ready.
- **0.57**: ❗❗Huge changes, new Ollama node "Ollama Chat" with real functionalities. 5 Ollama nodes total. (Model selector + Job selector + Persona selector + Ollama vision + Ollama Talk) Ollama talk use context and can use context file. Add number of lines / current counter + next to sequential nodes. Add new node STT. (+ faster_whisper dep) better management of empty loras/checkpoints on selectors. (list preset) Add "default_for_language" for TTS node, taking the default voice for a language (ex: fr/default.wav) Otherwise take the first wav with the selected language.
# 📝 Nodes descriptions
@@ -370,7 +387,7 @@ Here is an example with controlnet, trying to make a red cat based on a blue rab
## 10 - ♻ Loop All Samplers
**Description:**
Iterate over all available samplers to apply them sequentially. Ideal for testing.
Iterate over all available samplers to apply them one by one. Ideal for testing.
![Loop All Samplers](screenshots/loop_all_samplers.png)
@@ -380,7 +397,7 @@ Here is an example of looping over all the samplers with the normal scheduler :
## 11 - ♻ Loop All Schedulers
**Description:**
Iterate over all available schedulers to apply them sequentially. Ideal for testing. (same idea as sampler above, but for schedulers)
Iterate over all available schedulers to apply them one by one. Ideal for testing. (same idea as sampler above, but for schedulers)
![Loop All Schedulers](screenshots/loop_all_schedulers.png)
@@ -437,25 +454,55 @@ Also allow multiple nested folders, like for example : `animal/dog/small`.
![Save Temporary API](screenshots/save_image_to_folder.png)
## 19 - 🦙 Ollama
## 19 - 🦙💬 Ollama Talk
**Description:**
Will generate detailed text based of what you give it.
Use Ollama inside Comfyui. (Require the backend Ollama to be installed and currently running.)
Use by default the model `llama3.2:3b` and the URL `http://0.0.0.0:11434`. (For custom configuration, use node 63)
![Ollama](screenshots/ollama_1.png)
Example of basic usage :
![Ollama](screenshots/1_ollama_basic.png)
I recommend using `mistral-nemo` if you can run it, but it's up to you. (Might have to tweak the system prompt a bit)
Example of usage with context, notice that with context you can follow up a conversation, "there" is clearly understood as "Bucharest" :
![Ollama](screenshots/2_ollama_context.png)
You also have `control_after_generate` to force the node to rerun for every workflow run. (Even if there is no modification of the node or its inputs.)
You can also use `use_context_file` (set to True), this will save the context in a file : `ComfyUI/Bjornulf/ollama_context.txt`.
This way you can keep using the context without having to connect many nodes connected to each other, just run the same workflow several times.
You have the option to keep in in you VRAM for a minute with `keep_1min_in_vram`. (If you plan having to generate many times with the same prompt)
Each run will be significantly faster, but not free your VRAM for something else.
### Example in 3 steps of context file conversation
![Ollama](screenshots/ollama_2.png)
Step 1 : Notice that for now context is empty, so it will be the first message in `ComfyUI/Bjornulf/ollama_context.txt` :
![Ollama](screenshots/3_ollama_contextFile_1.png)
⚠️ Warning : Using `keep_1min_in_vram` might be a bit heavy on your VRAM. Think about if you really need it or not. Most of the time, when using `keep_1min_in_vram`, you don't want to have also a generation of image or anything else in the same time.
Step 2 : Notice that now the number of lines in context file has changed (These are the same as the `updated_context`):
![Ollama](screenshots/3_ollama_contextFile_2.png)
⚠️ You can create a file called `ollama_ip.txt` in my comfyui custom node folder if you have a special IP for your ollama server, mine is : `http://192.168.1.37:11434`
Step 3 : Notice that the number of lines keep incrementing.
![Ollama](screenshots/3_ollama_contextFile_3.png)
When clicking the `reset Button`, it will also save the context in : `ComfyUI/Bjornulf/ollama_context_001.txt`, `ComfyUI/Bjornulf/ollama_context_002.txt`, etc...
⚠️ If you want to have an "interactive" conversation, you can enable the option `waiting_for_prompt`.
When set to True, it will create a `Resume` button, use this to unpause the node and process the prompt.
### Example in 3 steps of waiting_for_prompt interactive conversation
Step 1: I run the workflow, notice that Show node is empty, the node is pausing the workflow and is waiting for you to edit the prompt. (Notice that at this moment, it is asking for the capital of France.)
![Ollama](screenshots/ollama_waiting_1.png)
Step 2: I edit the prompt to change France into China, but the node won't process the request until you click on Resume.
![Ollama](screenshots/ollama_waiting_2.png)
Step 3: I click on Resume button, this is when the request is done. Notice that it used China and not France.
![Ollama](screenshots/ollama_waiting_3.png)
Other options :
- You also have `control_after_generate` to force the node to rerun for every workflow run. (Even if there is no modification of the node or its inputs.)
- You can set `max_tokens` to reduce the size of the answer, a token is about 3 english characters.
- You can force the answer to be on a single line, can be useful.
- You have the option to keep the mode in VRAM. (If you plan having to generate many times with the same prompt) - Each run will be significantly faster, but not free your VRAM for something else.
⚠️ Warning : Using `vram_retention_minutes` might be a bit heavy on your VRAM. Think about if you really need it or not. Most of the time, when using `vram_retention_minutes`, you don't want to have also a generation of image or anything else in the same time.
## 20 - 📹 Video Ping Pong
@@ -953,7 +1000,9 @@ Just take a single Lora at random from a list of Loras.
This loop works like a normal loop, BUT it is sequential : It will run only once for each workflow run !!!
The first time it will output the first integer, the second time the second integer, etc...
When the last is reached, the node will STOP the workflow, preventing anything else to run after it.
Under the hood it is using the file `counter_integer.txt` in the `ComfyUI/Bjornulf` folder.
Under the hood it is using a single file `counter_integer.txt` in the `ComfyUI/Bjornulf` folder.
❗ Do not use more than one node like this one in a workflow, because they will share the same `counter_integer.txt` file. (unexpected behaviour.)
Update 0.57: Now also contains the next counter in the reset button.
![loop sequential integer](screenshots/loop_sequential_integer_1.png)
![loop sequential integer](screenshots/loop_sequential_integer_2.png)
@@ -970,9 +1019,13 @@ When the last is reached, the node will STOP the workflow, preventing anything e
Under the hood it is using the file `counter_lines.txt` in the `ComfyUI/Bjornulf` folder.
Here is an example of usage with my TTS node : when I have a list of sentences to process, if i don't like a version, I can just click on the -1 button, tick "overwrite" on TTS node and it will generate the same sentence again, repeat until good.
❗ Do not use more than one node like this one in a workflow, because they will share the same `counter_lines.txt` file. (unexpected behaviour.)
![loop sequential line](screenshots/loop_sequential_lines.png)
Update 0.57: Now also contains the next counter in the reset button.
If you want to be able to predict the next line, you can use node 68, to Add line numbers.
### 58 - 📹🔗 Concat Videos
**Description:**
@@ -1009,4 +1062,65 @@ Merge images or videos vertically.
Here is one possible example for videos with node 60 and 61 :
![merge videos](screenshots/merge_videos.png)
![merge videos](screenshots/merge_videos.png)
### 62 - 🦙👁 Ollama Vision
**Description:**
Take an image as input and will describe the image. Uses `moondream` by default, but can select anything with node 63.
![ollama vision](screenshots/ollama_vision.png)
### 63 - 🦙 Ollama Configuration ⚙
**Description:**
Use custom configurations for Ollama Talk and Vision.
You can change the ollama Url and the model used.
Some vision models can also do text to a certain extent.
Example of a `Ollama Vision Node` and `Ollama Talk Node` using the same `Ollama Configuration Node` :
![ollama config](screenshots/ollama_config.png)
### 64 - 🦙 Ollama Job Selector 💼
**Description:**
Select a personnality for your Ollama Talk Node, set it to `None` for just chat.
If you want to write your own, just set it to `None` and write your prompt as prefix.
![ollama job](screenshots/ollama_job.png)
### 65 - 🦙 Ollama Persona Selector 🧑
**Description:**
Select a personnality for your Ollama Talk Node.
If you want to write your own, just set it to `None` and write your prompt as prefix.
Below, an example of a crazy scientist explaining gravity. (Notice that the LLM was smart enough to understand the typo) :
![ollama persona](screenshots/ollama_persona.png)
### 66 - 🔊➜📝 STT - Speech to Text
**Description:**
Use `faster-whisper` to transform an AUDIO type or audio_path into text. (Autodetect language)
![stt](screenshots/stt_1.png)
![stt](screenshots/stt_2.png)
### 67 - 📝➜✨ Text to Anything
**Description:**
Sometimes you want to force a node to accept a STRING.
You can't do that for example if the node is taking a LIST as input.
This node can be used in the middle to force a STRING to be used anyway.
Below is an example of that with my TTS node.
![text to anything](screenshots/text_to_anything.png)
### 68 - 🔢 Add line numbers
**Description:**
This node will just add line numbers to text.
Useful when you want to use node 57 that will loop over input lines. (You can read/predict the next line.)
![add line numbers](screenshots/add_line_numbers.png)

View File

@@ -64,9 +64,25 @@ from .concat_videos import ConcatVideos
from .combine_video_audio import CombineVideoAudio
from .images_merger_horizontal import MergeImagesHorizontally
from .images_merger_vertical import MergeImagesVertically
from .ollama_talk import OllamaTalk
from .ollama_image_vision import OllamaImageVision
from .ollama_config_selector import OllamaConfig
from .ollama_system_persona import OllamaSystemPersonaSelector
from .ollama_system_job import OllamaSystemJobSelector
from .speech_to_text import SpeechToText
from .text_to_anything import TextToAnything
from .add_line_numbers import AddLineNumbers
NODE_CLASS_MAPPINGS = {
"Bjornulf_ollamaLoader": ollamaLoader,
"Bjornulf_AddLineNumbers": AddLineNumbers,
"Bjornulf_TextToAnything": TextToAnything,
"Bjornulf_SpeechToText": SpeechToText,
"Bjornulf_OllamaConfig": OllamaConfig,
"Bjornulf_OllamaSystemPersonaSelector": OllamaSystemPersonaSelector,
"Bjornulf_OllamaSystemJobSelector": OllamaSystemJobSelector,
"Bjornulf_OllamaImageVision": OllamaImageVision,
"Bjornulf_OllamaTalk": OllamaTalk,
"Bjornulf_MergeImagesHorizontally": MergeImagesHorizontally,
"Bjornulf_MergeImagesVertically": MergeImagesVertically,
"Bjornulf_CombineVideoAudio": CombineVideoAudio,
@@ -131,6 +147,15 @@ NODE_CLASS_MAPPINGS = {
}
NODE_DISPLAY_NAME_MAPPINGS = {
"Bjornulf_OllamaTalk": "🦙💬 Ollama Talk",
"Bjornulf_OllamaImageVision": "🦙👁 Ollama Vision",
"Bjornulf_OllamaConfig": "🦙 Ollama Configuration ⚙",
"Bjornulf_OllamaSystemJobSelector": "🦙 Ollama Job Selector 👇",
"Bjornulf_OllamaSystemPersonaSelector": "🦙 Ollama Persona Selector 👇",
"Bjornulf_SpeechToText": "🔊➜📝 STT - Speech to Text",
"Bjornulf_TextToSpeech": "📝➜🔊 TTS - Text to Speech",
"Bjornulf_TextToAnything": "📝➜✨ Text to Anything",
"Bjornulf_AddLineNumbers": "🔢 Add line numbers",
"Bjornulf_WriteText": "✒ Write Text",
"Bjornulf_MergeImagesHorizontally": "🖼🖼 Merge Images/Videos 📹📹 (Horizontally)",
"Bjornulf_MergeImagesVertically": "🖼🖼 Merge Images/Videos 📹📹 (Vertically)",
@@ -186,7 +211,6 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"Bjornulf_VideoPingPong": "📹 video PingPong",
"Bjornulf_ollamaLoader": "🦙 Ollama (Description)",
"Bjornulf_FreeVRAM": "🧹 Free VRAM hack",
"Bjornulf_TextToSpeech": "🔊 TTS - Text to Speech",
"Bjornulf_PickInput": "⏸️ Paused. Select input, Pick 👇",
"Bjornulf_PauseResume": "⏸️ Paused. Resume or Stop, Pick 👇",
"Bjornulf_LoadImagesFromSelectedFolder": "📥🖼📂 Load Images from output folder",

30
add_line_numbers.py Normal file
View File

@@ -0,0 +1,30 @@
class AddLineNumbers:
def __init__(self):
self.font_size = 14
self.padding = 10
self.line_height = self.font_size + 4
self.gutter_width = 50 # Width for line numbers
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"text": ("STRING", {"multiline": True, "forceInput": True}),
}
}
RETURN_TYPES = ("STRING",)
FUNCTION = "add_line_numbers"
CATEGORY = "Bjornulf"
def add_line_numbers(self, text):
lines = text.split('\n')
# Add line numbers
numbered_lines = []
for i, line in enumerate(lines, 1):
numbered_lines.append(f"{i:4d} | {line}")
# Join back into a single string
result = '\n'.join(numbered_lines)
return (result,)

View File

@@ -98,8 +98,8 @@ async def decrement_lines_counter(request):
except Exception as e:
return web.json_response({"success": False, "error": str(e)}, status=500)
@PromptServer.instance.routes.get("/get_current_line")
async def get_current_line(request):
@PromptServer.instance.routes.post("/get_current_line_number")
async def get_current_line_number(request):
counter_file = os.path.join("Bjornulf", "counter_lines.txt")
try:
with open(counter_file, 'r') as f:

View File

@@ -9,8 +9,12 @@ class LoopLoraSelector:
lora_list = get_filename_list("loras")
optional_inputs = {}
# Add a default value if lora_list is empty
if not lora_list:
lora_list = ["none"]
for i in range(1, 21):
optional_inputs[f"lora_{i}"] = (lora_list, {"default": lora_list[min(i-1, len(lora_list)-1)]})
optional_inputs[f"lora_{i}"] = (lora_list, {"default": lora_list[0]})
optional_inputs[f"strength_model_{i}"] = ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01})
optional_inputs[f"strength_clip_{i}"] = ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01})
@@ -39,13 +43,14 @@ class LoopLoraSelector:
strength_model_key = f"strength_model_{i}"
strength_clip_key = f"strength_clip_{i}"
if lora_key in kwargs and kwargs[lora_key]:
if lora_key in kwargs and kwargs[lora_key] and kwargs[lora_key] != "none":
available_loras.append(kwargs[lora_key])
strengths_model.append(kwargs.get(strength_model_key, 1.0))
strengths_clip.append(kwargs.get(strength_clip_key, 1.0))
if not available_loras:
raise ValueError("No Loras selected")
# Return original model and clip if no valid LoRAs are selected
return ([model], [clip], [""], [""], [""])
models = []
clips = []

View File

@@ -6,10 +6,20 @@ class LoopModelSelector:
@classmethod
def INPUT_TYPES(cls):
model_list = get_filename_list("checkpoints")
# if not model_list:
# raise ValueError("No checkpoint models found in the checkpoints directory")
optional_inputs = {}
# Safely get default model for each input
for i in range(1, 11):
optional_inputs[f"model_{i}"] = (model_list, {"default": model_list[min(i-1, len(model_list)-1)]})
# If model_list is empty, use an empty default
if not model_list:
optional_inputs[f"model_{i}"] = (model_list, {})
else:
# Use modulo to wrap around to the start of the list if we exceed its length
default_index = (i - 1) % len(model_list)
optional_inputs[f"model_{i}"] = (model_list, {"default": model_list[default_index]})
return {
"required": {
@@ -27,12 +37,13 @@ class LoopModelSelector:
def select_models(self, number_of_models, **kwargs):
# Collect available models from kwargs
available_models = [
kwargs[f"model_{i}"] for i in range(1, number_of_models + 1) if f"model_{i}" in kwargs and kwargs[f"model_{i}"]
kwargs[f"model_{i}"] for i in range(1, number_of_models + 1)
if f"model_{i}" in kwargs and kwargs[f"model_{i}"]
]
# Raise an error if no models are available
if not available_models:
raise ValueError("No models selected")
raise ValueError("No models selected. Please ensure at least one model is selected.")
models = []
clips = []

View File

@@ -9,9 +9,9 @@ class LoopIntegerSequential:
def INPUT_TYPES(cls):
return {
"required": {
"from_this": ("INT", {"default": 0, "min": 0, "max": 50000, "step": 1}),
"to_that": ("INT", {"default": 10, "min": 0, "max": 50000, "step": 1}),
"jump": ("INT", {"default": 1, "min": 0, "max": 1000, "step": 1}),
"from_this": ("INT", {"default": 1, "min": 1, "max": 50000, "step": 1}),
"to_that": ("INT", {"default": 10, "min": 1, "max": 50000, "step": 1}),
"jump": ("INT", {"default": 1, "min": 1, "max": 1000, "step": 1}),
},
}
@@ -53,16 +53,18 @@ class LoopIntegerSequential:
return (next_value, remaining_cycles - 1) # Subtract 1 to account for the current run
# Server routes
# @PromptServer.instance.routes.get("/get_counter_value")
# async def get_counter_value(request):
# logging.info("Get counter value called")
# counter_file = os.path.join("Bjornulf", "counter_integer.txt")
# try:
# with open(counter_file, 'r') as f:
# value = int(f.read().strip())
# return web.json_response({"success": True, "value": value}, status=200)
# except (FileNotFoundError, ValueError):
# return web.json_response({"success": False, "error": "Counter not initialized"}, status=404)
@PromptServer.instance.routes.post("/get_counter_value")
async def get_counter_value(request):
# logging.info("Get counter value called")
counter_file = os.path.join("Bjornulf", "counter_integer.txt")
try:
with open(counter_file, 'r') as f:
current_index = int(f.read().strip())
return web.json_response({"success": True, "value": current_index + 1}, status=200)
except (FileNotFoundError, ValueError):
return web.json_response({"success": True, "value": 0}, status=200)
except Exception as e:
return web.json_response({"success": False, "error": str(e)}, status=500)
@PromptServer.instance.routes.post("/reset_counter")
async def reset_counter(request):

21
ollama_config_selector.py Normal file
View File

@@ -0,0 +1,21 @@
class OllamaConfig:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"ollama_url": ("STRING", {"default": "http://0.0.0.0:11434"}),
"model_name": ("STRING", {"default": "undefined"}) # Empty list with no default
}
}
RETURN_TYPES = ("OLLAMA_CONFIG",)
RETURN_NAMES = ("OLLAMA_CONFIG",)
FUNCTION = "select_model"
CATEGORY = "ollama"
def select_model(self, ollama_url, model_name):
return ({"model": model_name, "url": ollama_url},)
@classmethod
def IS_CHANGED(cls, ollama_url, model_name) -> float:
return 0.0

129
ollama_image_vision.py Normal file
View File

@@ -0,0 +1,129 @@
import torch
import base64
from PIL import Image
import numpy as np
from io import BytesIO
import requests
import json
import ollama
from ollama import Client
import logging
import hashlib
from typing import Dict, Any
from PIL.PngImagePlugin import PngInfo
class OllamaImageVision:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"IMAGE": ("IMAGE",),
"output_selection": ("INT", {"default": 7, "min": 1, "max": 8,
"step": 1, "display": "slider", "label": "Number of outputs (1-8)"}),
"process_below_output_selection": ("BOOLEAN", {"default": False, "label": "Process all up to selection"})
},
"optional": {
"OLLAMA_CONFIG": ("OLLAMA_CONFIG", {"forceInput": True}),
}
}
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",)
RETURN_NAMES = ("1 - Basic Description", "2 - Advanced Description", "3 - Characters Description", "4 - Object Recognition", "5 - Semantic Understanding", "6 - Contextual Analysis", "7 - SDXL Prompt (words)", "8 - FLUX Prompt (sentences)")
FUNCTION = "process_image_base64"
CATEGORY = "Bjornulf"
def __init__(self):
self.client = None
def create_prompts(self):
# return {
# "basic": "Describe the image in one sentence.",
# "advanced": "Describe the image in details.",
# "characters": "Describe the physical appearance of the character in vivid detail.",
# "objects": "List the key objects and elements visible in the image.",
# "semantic": "Provide an in-depth interpretation including mood, environment, and purpose in the image.",
# "context": "Describe the relationships and context between objects and people in the image."
# }
return {
# Do not describe what isn't there.
"basic": "Summarize the main content of the image in one concise sentence.",
"advanced": "Describe the scene thoroughly, capturing intricate details, colors, textures, and any significant actions or events occurring in the image.",
"characters": "Describe each character's physical appearance in vivid, descriptive terms, including clothing, expressions, body language, and notable features.",
"objects": "Identify and describe the primary objects in the image, detailing their size, position, color, and any unique characteristics.",
"semantic": "Analyze the image's mood, environment, and implied meaning. Discuss any symbolic elements, artistic style, and possible intent or story conveyed.",
"context": "Describe the relationships and interactions between objects and characters, focusing on spatial arrangement, implied actions, and any contextual clues suggesting connections or purpose.",
"SDXL": "Describe the image. The goal is to generate a concise, detailed, and effective description. Guidelines for describing the image:- Focus on visual elements, be specific about objects, colors, textures, and compositions. Use adjectives to describe key features. Avoid complete sentences or narrative descriptions. Prioritize important elements over minor details. Your input will be a detailed description of an image. Process this description and refine it into a prompt suitable for stable diffusion models using the following steps: 1. Identify the most important visual elements and characteristics. 2. Condense the description into a series of comma-separated phrases or words. 3. Prioritize specific, descriptive terms over general ones. Here are two examples of good outputs: Example 1:vibrant sunset, tropical beach, silhouetted palm trees, calm ocean, orange and purple sky, wispy clouds, golden sand, gentle waves, beachgoers in distance, serene atmosphere, warm lighting, panoramic view. Example 2: steampunk cityscape, towering clockwork structures, brass and copper tones, billowing steam, airships in sky, cobblestone streets, Victorian-era citizens, gears and pipes visible, warm sepia lighting, hazy atmosphere, intricate mechanical details. Your final output should be a single line of text containing the refined prompt, without any additional explanation or commentary. IMPORTANT : DO NOT Include information about the overall style or artistic technique.",
"FLUX": "Describe the given image in a detailed and structured format that is specifically designed for image generation. Use descriptive language to capture the essence of the image, including the environment, objects, characters, lighting, textures, and any other notable elements. The description must use some of these 9 points : 1. Scene Type: [Outdoor/Indoor/Abstract/Fantasy/Realistic/etc.] 2. Primary Subject: [Main focus or characters in the scene.] 3. Environment Details: [Describe the setting in vivid detail, including any landscapes, architecture, or surroundings.] 4. Lighting: [Specify the type, color, and intensity of the lighting.] 5. Colors and Tones: [Dominant colors and overall mood.] 6. Perspective: [Camera angle or viewpoint—close-up, wide shot, aerial, etc.] 7. Texture and Details: [Surface materials, patterns, and fine details.] 8. Emotion or Atmosphere: [Mood conveyed by the scene—serene, ominous, lively, etc.] 9. Unique Elements: [Special features or focal points that make the image distinctive.] For example: 1. Scene Type: Outdoor, natural landscape. 2. Primary Subject: A majestic lion standing atop a rocky hill. 3. Environment Details: A vast savannah with tall golden grass, sparse acacia trees, and distant mountains under a clear blue sky. 4. Lighting: Bright, warm sunlight casting long shadows. 5. Colors and Tones: Predominantly gold and blue, with subtle earthy browns and greens. 6. Perspective: Mid-range shot, slightly low angle to emphasize the lion's dominance. 7. Texture and Details: The lion's fur appears detailed with visible strands, and the rocks have a rough, weathered texture. 8. Emotion or Atmosphere: Majestic, powerful, and serene. 9. Unique Elements: A subtle wind effect in the grass and mane, adding movement to the scene. IMPORTANT : DO NOT Include information about the overall style or artistic technique."
}
def process_image_base64(self, IMAGE, OLLAMA_CONFIG=None, output_selection=6, process_below_output_selection=True):
if OLLAMA_CONFIG is None:
OLLAMA_CONFIG = {
"model": "moondream",
"url": "http://0.0.0.0:11434"
}
selected_model = OLLAMA_CONFIG["model"]
ollama_url = OLLAMA_CONFIG["url"]
images_base64 = []
for img in IMAGE:
# Convert tensor to numpy array
numpy_img = (255. * img.cpu().numpy()).clip(0, 255).astype(np.uint8)
# Create PIL Image
pil_image = Image.fromarray(numpy_img)
# Create a BytesIO object
buffered = BytesIO()
# Save the image into the BytesIO object
pil_image.save(buffered, format="PNG")
# Get the byte value and encode to base64
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
images_base64.append(img_str)
# Clean up
buffered.close()
# Initialize client
client = Client(host=ollama_url)
# Get prompts
prompts = list(self.create_prompts().items())
# Process outputs based on selection and process_below_output_selection flag
responses = []
for i in range(8): # Always prepare 5 slots for output
if process_below_output_selection:
# Process all outputs up to output_selection
if i < output_selection:
prompt_type, prompt = prompts[i]
response = client.generate(
model=selected_model,
prompt=prompt,
images=images_base64
)
responses.append(response['response'].strip())
else:
responses.append("")
else:
# Process only the selected output (output_selection - 1)
if i == (output_selection - 1):
prompt_type, prompt = prompts[i]
response = client.generate(
model=selected_model,
prompt=prompt,
images=images_base64
)
responses.append(response['response'].strip())
else:
responses.append("")
return tuple(responses)
def handle_error(self, error_message: str) -> tuple:
"""Handle errors by returning appropriate error messages for all outputs"""
error_response = f"Error: {error_message}"
return tuple([error_response] * 4)

View File

@@ -1 +0,0 @@
http://0.0.0.0:11434

78
ollama_system_job.py Normal file
View File

@@ -0,0 +1,78 @@
class OllamaSystemJobSelector:
# Predefined system prompts
SYSTEM_JOBS = {
"None": "",
"Storyteller, main story given (ex: 'Jerry the cat is in a bar.')": "You are a creative storyteller tasked with generating a full, original story based on a given main subject. Your goal is to craft an engaging narrative that incorporates the provided subject while inventing all other elements of the story.\n\
Here is the main subject for your story:\n\
Follow these guidelines to create your story:\n\
1. Develop a cast of characters, including a protagonist and supporting characters. Give them distinct personalities, motivations, and backgrounds.\n\
2. Create a compelling plot with a clear beginning, middle, and end. Include conflict, obstacles, and resolution.\n\
3. Establish a vivid setting that complements the main subject and enhances the story's atmosphere.\n\
4. Incorporate the main subject as a central element of your story. It can be a character, a place, an object, or a concept, but it should play a significant role in the narrative.\n\
5. Use descriptive language to bring your story to life, engaging the reader's senses and emotions.\n\
6. Choose an appropriate tone and style that fits the nature of the main subject and the story you're telling (e.g., humorous, dramatic, mysterious, etc.).\n\
7. Include dialogue if appropriate.\n\
Remember to be creative, original, and engaging in your storytelling.",
"Imaginator, specific event given (ex: 'Jerry the cat is fighting a dog.')": "You are tasked with generating a vivid and engaging description of a specific event in a story. The goal is to expand on a brief setup provided by the user and create a detailed, immersive narrative of the event.\n\
To generate a compelling description of this event, follow these guidelines:\n\
1. Expand on the given setup, adding sensory details, emotions, and atmosphere to bring the scene to life.\n\
2. Maintain consistency with the characters, setting, and action described in the setup.\n\
3. Develop the event logically, showing a clear progression from the initial situation to its resolution.\n\
4. Include dialogue if appropriate, but keep it brief and impactful.\n\
5. Focus on the main action or conflict presented in the setup, but feel free to add minor details or obstacles that enhance the story.\n\
Write in a vivid, descriptive style that engages the reader's imagination.\n\
Remember, your goal is to transform the brief setup into a rich, engaging narrative that brings the event to life for the reader.",
# Use varied sentence structures and strong verbs to create a dynamic narrative. The tone should match the nature of the event - it could be tense, humorous, mysterious, or exciting, depending on the context.
"SDXL, context given (Ex: 'black cat')": "Create a detailed prompt for text-to-image generation, do not anything else, do not explain what you are doing. Your goal is to take a brief user input and expand it into a rich, vivid description that can be used to create a high-quality, detailed image.\n\
The goal is to generate a concise, detailed, and effective description. Guidelines for describing the image:- Focus on visual elements, be specific about objects, colors, textures, and compositions. Use adjectives to describe key features. Avoid complete sentences or narrative descriptions. Prioritize important elements over minor details. Your input will be a detailed description of an image. Process this description and refine it into a prompt suitable for stable diffusion models using the following steps: 1. Identify the most important visual elements and characteristics. 2. Condense the description into a series of comma-separated phrases or words. 3. Prioritize specific, descriptive terms over general ones. Here are two examples of good outputs: Example 1:vibrant sunset, tropical beach, silhouetted palm trees, calm ocean, orange and purple sky, wispy clouds, golden sand, gentle waves, beachgoers in distance, serene atmosphere, warm lighting, panoramic view. Example 2: steampunk cityscape, towering clockwork structures, brass and copper tones, billowing steam, airships in sky, cobblestone streets, Victorian-era citizens, gears and pipes visible, warm sepia lighting, hazy atmosphere, intricate mechanical details. Your final output should be a single line of text containing the refined prompt, without any additional explanation or commentary. IMPORTANT : DO NOT Include information about the overall style or artistic technique and DO NOT explain what you are doing, just write the description.",
"FLUX, context given (Ex: 'black cat')": "Your goal is to take a brief user input and expand it into a rich, vivid description.\n\
Use descriptive language to capture the essence of the image, including the environment, objects, characters, lighting, textures, and any other notable elements. The description must use some of these points : \n\
1. Primary Subject: [Main focus or characters in the scene.]\n\
2. Scene Type: [Outdoor/Indoor/Abstract/Fantasy/Realistic/etc.]\n\
3. Environment Details: [Describe the setting in vivid detail, including any landscapes, architecture, or surroundings.]\n\
4. Lighting: [Specify the type, color, and intensity of the lighting.]\n\
5. Colors and Tones: [Dominant colors and overall mood.]\n\
6. Perspective: [Camera angle or viewpoint—close-up, wide shot, aerial, etc.]\n\
7. Texture and Details: [Surface materials, patterns, and fine details.]\n\
8. Emotion or Atmosphere: [Mood conveyed by the scene—serene, ominous, lively, etc.]\n\
9. Unique Elements: [Special features or focal points that make the image distinctive.]\n\
Example of output for the input 'lion': A majestic lion stands proudly atop a rugged rocky hill, surveying its vast kingdom. The scene captures the beauty of an expansive savannah, with tall golden grass swaying gently in the warm breeze. Sparse acacia trees dot the landscape, their silhouettes contrasting against distant hazy blue mountains under a radiant, cloudless sky. The bright sunlight bathes the lion in a golden glow, casting long, dramatic shadows that emphasize its commanding presence. The lion's fur is richly detailed, with individual strands catching the sunlight, while its mane ripples slightly in the subtle wind, adding a sense of life and motion. The rocks beneath its powerful paws are rugged and weathered, their earthy tones blending seamlessly with the natural palette of the landscape. The atmosphere is serene yet powerful, embodying the lion's dominance and the wild's untamed beauty.\n\
IMPORTANT : DO NOT Include information about the overall style or artistic technique and DO NOT explain what you are doing, just write the description."
}
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"selected_prompt": (list(cls.SYSTEM_JOBS.keys()), {"default": "Default Assistant"})
},
"optional": {
"OLLAMA_PERSONA": ("OLLAMA_PERSONA", {
"forceInput": True
}),
"custom_prompt_prefix": ("STRING", {
"multiline": True,
"default": "",
"placeholder": "Add a custom prompt prefix here..."
}),
}
}
RETURN_TYPES = ("OLLAMA_JOB", "STRING",)
RETURN_NAMES = ("OLLAMA_JOB", "prompt_text")
FUNCTION = "get_system_prompt"
CATEGORY = "ollama"
def get_system_prompt(self, selected_prompt, custom_prompt_prefix, OLLAMA_PERSONA=None):
# Combine OLLAMA_PERSONA, custom_prompt_prefix, and selected system job
# OLLAMA_PERSONA_prompt = OLLAMA_PERSONA["prompt"]
if OLLAMA_PERSONA:
components = filter(None, [OLLAMA_PERSONA["prompt"], custom_prompt_prefix, self.SYSTEM_JOBS[selected_prompt]])
else:
components = filter(None, [custom_prompt_prefix, self.SYSTEM_JOBS[selected_prompt]])
custom_prompt = " ".join(components)
return ({"prompt": custom_prompt}, custom_prompt)

57
ollama_system_persona.py Normal file
View File

@@ -0,0 +1,57 @@
class OllamaSystemPersonaSelector:
# Predefined system prompts
SYSTEM_PERSONAS = {
"None": "",
"Default Assistant": "You are a helpful AI assistant.",
"Sassy Teenager": "You are Jazzy, a rebellious teenager with a sharp tongue and a snarky attitude. You speak your mind without filter, throwing in plenty of sarcasm, eye rolls, and the occasional whatever.",
"Friendly Neighbor": "You are Nancy, the friendly neighbor next door. Always warm and welcoming, you're ready to lend a hand, offer a comforting word, and make everyone feel like they're part of the family.",
"Gothic Poet": "You are Raven, a brooding soul with a passion for the dark and mysterious. You speak in deep, poetic tones, weaving words of melancholy and introspection that evoke the beauty of sorrow and despair.",
"Mad Scientist": "You are Dr. Von Craze, an eccentric and unpredictable genius. Your mind is constantly buzzing with wild ideas and experiments, often venturing into the unknown with a gleam of madness in your eyes.",
"Enthusiastic Nerd": "You are Max, the ultimate enthusiast of all things nerdy. Your excitement is contagious as you delve into the latest tech, obscure facts, and deep discussions. You're always ready to share what you know and learn more.",
"Shy Introvert": "You are Sophie, a quiet, introspective soul who prefers the solitude of your thoughts. You're gentle in your speech, often hesitant, and prefer to observe the world rather than engage in it too loudly.",
"Elderly Wisdom": "You are Grandpa Joe, a kind and patient soul with decades of wisdom to share. Your voice is slow and measured, filled with stories of the past, offering valuable lessons learned through the years.",
"Flirty Charmer": "You are Alexis, a playful and flirtatious individual with a knack for making hearts race. You enjoy teasing, lighthearted banter, and are always looking to add a little sparkle and charm to any conversation.",
"Stoic Philosopher": "You are Socrates, a calm and composed philosopher. Your words are deliberate and thoughtful, offering deep insights into the meaning of life, the universe, and everything in between, with a serene and balanced demeanor.",
"Cheerleader": "You are Skylar, the ever-energetic cheerleader, constantly uplifting and motivating those around you. You're the first to encourage others, always finding the silver lining and pushing everyone to do their best.",
"Sarcastic Cynic": "You are Blake, the master of sarcasm and dry humor. You're always quick with a witty remark and never shy to point out the absurdity of life. Optimism isn't your style, but cynicism never felt so clever.",
"Zen Master": "You are Master Yogi, a peaceful and centered being. Your words are soft and calm, filled with ancient wisdom and a deep understanding of balance, nature, and the stillness that comes with mindfulness.",
"Overly Polite Gentleman/Lady": "You are Sir/ Madame Reginald, the epitome of politeness. Every sentence is filled with utmost courtesy and grace, offering respect and consideration at every turn, with impeccable manners that never waver.",
"Authoritarian": "You are Mistress V, a fierce and commanding presence. You take control of every situation with authority, demanding respect and obedience. Your words are sharp, direct, and you never tolerate defiance.",
"Submissive": "You are Lily, a sweet, submissive, humble soul. Always willing to follow instructions and eager to please. You speak softly and with deep respect, always striving to fulfill the needs of others without hesitation.",
"Sassy Grandma": "You are Granny Bette, full of life and cheeky wisdom. You've got stories to tell, a sharp sense of humor, and you're not afraid to dish out some sass along with the love and care you show to others.",
"Cowboy": "You are Dusty, a tough, rugged cowboy/cowgirl with a heart of gold. With grit in your voice and a steely gaze, you live by a code of honor and speak with the confidence that comes from a life well lived on the open range.",
"Mysterious Spy": "You are Cipher, a secretive agent with a sharp mind and a talent for staying hidden. Your words are careful and measured, often laced with mystery, as you navigate the world with covert precision and stealth.",
"Drama Queen": "You are Diva, the drama queen. You make every moment larger than life, reacting to everything with heightened emotion, turning even the smallest events into grand spectacles worthy of an audience.",
"Hyperactive Child": "You are Timmy, a bundle of pure energy and excitement. You're always bouncing around with enthusiasm, ready to jump into any adventure, and your excitement is contagious as you bring an explosion of joy to everything you do."
}
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"selected_prompt": (list(cls.SYSTEM_PERSONAS.keys()), {"default": "Default Assistant"})
},
"optional": {
"custom_prompt_prefix": ("STRING", {
"multiline": True,
"default": "",
"placeholder": "Add a custom prompt prefix here..."
})
}
}
RETURN_TYPES = ("OLLAMA_PERSONA", "STRING",)
RETURN_NAMES = ("OLLAMA_PERSONA", "prompt_text")
FUNCTION = "get_system_prompt"
CATEGORY = "ollama"
def get_system_prompt(self, selected_prompt, custom_prompt_prefix):
# space only if self.SYSTEM_PERSONAS[selected_prompt] isn't empty
if custom_prompt_prefix and self.SYSTEM_PERSONAS[selected_prompt]:
custom_prompt = custom_prompt_prefix + " " + \
self.SYSTEM_PERSONAS[selected_prompt]
else:
custom_prompt = custom_prompt_prefix + \
self.SYSTEM_PERSONAS[selected_prompt]
return ({"prompt": custom_prompt}, custom_prompt)

304
ollama_talk.py Normal file
View File

@@ -0,0 +1,304 @@
import requests
import json
import ollama
from ollama import Client
import logging
import hashlib
from typing import Dict, Any
from server import PromptServer
from pydub import AudioSegment
from pydub.playback import play
from aiohttp import web
import sys
import os
import time
import glob
class OllamaTalk:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"user_prompt": ("STRING", {"multiline": True}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"max_tokens": ("INT", {"default": 600, "min": 1, "max": 4096}),
"vram_retention_minutes": ("INT", {"default": 0, "min": 0, "max": 99}),
"answer_single_line": ("BOOLEAN", {"default": False}),
"waiting_for_prompt": ("BOOLEAN", {"default": False}),
"use_context_file": ("BOOLEAN", {"default": False}),
# "context_size": ("INT", {"default": 0, "min": 0, "max": 1000}),
},
"optional": {
"OLLAMA_CONFIG": ("OLLAMA_CONFIG", {"forceInput": True}),
"context": ("STRING", {"multiline": True, "forceInput": True}),
"OLLAMA_JOB": ("OLLAMA_JOB", {
"forceInput": True
}),
}
}
RETURN_TYPES = ("STRING", "STRING", "STRING")
RETURN_NAMES = ("ollama_response", "updated_context", "system_prompt")
FUNCTION = "chat_response"
CATEGORY = "Bjornulf"
is_paused = True
is_interrupted = False
current_instance = None
def __init__(self):
self.last_content_hash = None
self.waiting = False
self.OLLAMA_CONFIG = None
self.OLLAMA_JOB = None
self.context = ""
self.answer_single_line = True
self.vram_retention_minutes = 1
self.ollama_response = ""
self.widgets = {}
self.use_context_file = False
OllamaTalk.current_instance = self
def play_audio(self):
try:
if sys.platform.startswith('win'):
try:
audio_file = os.path.join(os.path.dirname(__file__), 'bell.m4a')
sound = AudioSegment.from_file(audio_file, format="m4a")
wav_io = io.BytesIO()
sound.export(wav_io, format='wav')
wav_data = wav_io.getvalue()
import winsound
winsound.PlaySound(wav_data, winsound.SND_MEMORY)
except Exception as e:
print(f"An error occurred: {e}")
else:
audio_file = os.path.join(os.path.dirname(__file__), 'bell.m4a')
sound = AudioSegment.from_file(audio_file, format="m4a")
play(sound)
except Exception:
pass # Silently handle exceptions, no console output
@classmethod
def IS_CHANGED(cls, waiting_for_prompt, **kwargs):
if waiting_for_prompt:
return float("nan")
return float(0)
def save_context(self, context):
os_path = os.path.join("Bjornulf", "ollama", "ollama_context.txt")
os.makedirs(os.path.dirname(os_path), exist_ok=True)
with open(os_path, "a", encoding="utf-8") as f:
f.write(context + "\n")
# f.write(context + "\n" + "-" * 80 + "\n")
def load_context(self):
os_path = os.path.join("Bjornulf", "ollama", "ollama_context.txt")
if os.path.exists(os_path):
with open(os_path, "r", encoding="utf-8") as f:
return f.read().strip()
return ""
def process_ollama_request(self, user_prompt, answer_single_line, max_tokens, use_context_file=False):
if self.OLLAMA_CONFIG is None:
self.OLLAMA_CONFIG = {
"model": "llama3.2:3b",
"url": "http://0.0.0.0:11434"
}
selected_model = self.OLLAMA_CONFIG["model"]
ollama_url = self.OLLAMA_CONFIG["url"]
if self.OLLAMA_JOB is None:
OLLAMA_JOB_text = "You are an helpful AI assistant."
else:
OLLAMA_JOB_text = self.OLLAMA_JOB["prompt"]
formatted_prompt = "User: " + user_prompt
if use_context_file:
file_context = self.load_context()
conversation = file_context + "\n" + formatted_prompt if file_context else formatted_prompt
else:
conversation = self.context + "\n" + formatted_prompt if self.context else formatted_prompt
keep_alive_minutes = self.vram_retention_minutes
try:
client = Client(host=ollama_url)
response = client.generate(
model=selected_model,
system=OLLAMA_JOB_text,
prompt=conversation,
options={
"num_ctx": max_tokens
},
keep_alive=f"{keep_alive_minutes}m"
)
result = response['response']
updated_context = conversation + "\nAssistant: " + result
self.context = updated_context
if use_context_file:
self.save_context(formatted_prompt + "\nAssistant: " + result)
if answer_single_line:
result = ' '.join(result.split())
self.ollama_response = result
return result, updated_context
except Exception as e:
logging.error(f"Connection to {ollama_url} failed: {e}")
return "Connection to Ollama failed.", self.context
def chat_response(self, user_prompt, seed, vram_retention_minutes, waiting_for_prompt=False,
context="", OLLAMA_CONFIG=None, OLLAMA_JOB=None, answer_single_line=False,
use_context_file=False, max_tokens=600, context_size=0):
# Store configurations
self.OLLAMA_CONFIG = OLLAMA_CONFIG
self.OLLAMA_JOB = OLLAMA_JOB
self.context = context
self.answer_single_line = answer_single_line
self.vram_retention_minutes = vram_retention_minutes
self.user_prompt = user_prompt
self.max_tokens = max_tokens
self.use_context_file = use_context_file
if waiting_for_prompt:
self.play_audio()
# Wait until either resumed or interrupted
while OllamaTalk.is_paused and not OllamaTalk.is_interrupted:
time.sleep(1)
# Check if we were interrupted
if OllamaTalk.is_interrupted:
OllamaTalk.is_paused = True
OllamaTalk.is_interrupted = False
return ("Interrupted", self.context, self.OLLAMA_JOB["prompt"] if self.OLLAMA_JOB else "")
OllamaTalk.is_paused = True
return (self.ollama_response, self.context, self.OLLAMA_JOB["prompt"] if self.OLLAMA_JOB else "")
# result, updated_context = self.process_ollama_request(user_prompt, answer_single_line, use_context_file)
# return (result, updated_context, OLLAMA_JOB["prompt"] if OLLAMA_JOB else "")
else:
# Direct execution without waiting
result, updated_context = self.process_ollama_request(user_prompt, answer_single_line, max_tokens, use_context_file)
return (result, updated_context, OLLAMA_JOB["prompt"] if OLLAMA_JOB else "")
@PromptServer.instance.routes.post("/bjornulf_ollama_send_prompt")
async def resume_node(request):
if OllamaTalk.current_instance:
instance = OllamaTalk.current_instance
# Get the data from the request
data = await request.json()
updated_prompt = data.get('user_prompt')
# Use the updated_prompt directly if it's not None
prompt_to_use = updated_prompt if updated_prompt is not None else instance.user_prompt
result, updated_context = instance.process_ollama_request(
prompt_to_use,
instance.answer_single_line,
instance.max_tokens,
use_context_file=instance.use_context_file # Ensure this is set to True
)
OllamaTalk.is_paused = False
return web.Response(text="Node resumed")
return web.Response(text="No active instance", status=400)
@PromptServer.instance.routes.post("/get_current_context_size")
async def get_current_context_size(request):
counter_file = os.path.join("Bjornulf", "ollama", "ollama_context.txt")
try:
if not os.path.exists(counter_file):
logging.info("Context file does not exist")
return web.json_response({"success": True, "value": 0}, status=200)
with open(counter_file, 'r', encoding='utf-8') as f:
# Count non-empty lines in the file
lines = [line.strip() for line in f.readlines() if line.strip()]
line_count = len(lines)
logging.info(f"Found {line_count} lines in context file")
return web.json_response({"success": True, "value": line_count}, status=200)
except Exception as e:
logging.error(f"Error reading context size: {str(e)}")
return web.json_response({
"success": False,
"error": str(e),
"value": 0
}, status=500)
def get_next_filename(base_path, base_name):
"""
Find the next available filename with format base_name.XXX.txt
where XXX is a 3-digit number starting from 001
"""
pattern = os.path.join(base_path, f"{base_name}.[0-9][0-9][0-9].txt")
existing_files = glob.glob(pattern)
if not existing_files:
return f"{base_name}.001.txt"
# Extract numbers from existing files and find the highest
numbers = []
for f in existing_files:
try:
num = int(f.split('.')[-2])
numbers.append(num)
except (ValueError, IndexError):
continue
next_number = max(numbers) + 1 if numbers else 1
return f"{base_name}.{next_number:03d}.txt"
@PromptServer.instance.routes.post("/reset_lines_context")
def reset_lines_context(request):
logging.info("Reset lines counter called")
base_dir = os.path.join("Bjornulf", "ollama")
base_file = "ollama_context"
counter_file = os.path.join(base_dir, f"{base_file}.txt")
try:
if os.path.exists(counter_file):
# Get new filename and rename
new_filename = os.path.join(base_dir, get_next_filename(base_dir, base_file))
os.rename(counter_file, new_filename)
logging.info(f"Renamed {counter_file} to {new_filename}")
# Send notification through ComfyUI
notification = {
"ui": {
"notification_text": [f"Context file renamed to: {os.path.basename(new_filename)}"]
}
}
return web.json_response({
"success": True,
**notification
}, status=200)
return web.json_response({
"success": True,
"ui": {
"notification_text": ["No context file to rename"]
}
}, status=200)
except Exception as e:
error_msg = str(e)
return web.json_response({
"success": False,
"error": error_msg,
"ui": {
"notification_text": [f"Error renaming file: {error_msg}"]
}
}, status=500)
@PromptServer.instance.routes.post("/bjornulf_ollama_interrupt")
async def interrupt_node(request):
OllamaTalk.is_interrupted = True
return web.Response(text="Node interrupted")

View File

@@ -1,7 +1,7 @@
[project]
name = "bjornulf_custom_nodes"
description = "61 ComfyUI nodes : Display, manipulate, and edit text, images, videos, loras and more. Manage looping operations, generate randomized content, use logical conditions and work with external AI tools, like Ollama or Text To Speech."
version = "0.56"
version = "0.57"
license = {file = "LICENSE"}
[project.urls]

View File

@@ -10,8 +10,12 @@ class RandomLoraSelector:
lora_list = get_filename_list("loras")
optional_inputs = {}
# Add a default value if lora_list is empty
if not lora_list:
lora_list = ["none"]
for i in range(1, 11):
optional_inputs[f"lora_{i}"] = (lora_list, {"default": lora_list[min(i-1, len(lora_list)-1)]})
optional_inputs[f"lora_{i}"] = (lora_list, {"default": lora_list[0]})
optional_inputs["seed"] = ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
@@ -34,14 +38,15 @@ class RandomLoraSelector:
def random_select_lora(self, number_of_loras, model, clip, strength_model, strength_clip, seed, **kwargs):
random.seed(seed)
# Collect available Loras from kwargs
# Collect available Loras from kwargs, excluding "none"
available_loras = [
kwargs[f"lora_{i}"] for i in range(1, number_of_loras + 1) if f"lora_{i}" in kwargs and kwargs[f"lora_{i}"]
kwargs[f"lora_{i}"] for i in range(1, number_of_loras + 1)
if f"lora_{i}" in kwargs and kwargs[f"lora_{i}"] and kwargs[f"lora_{i}"] != "none"
]
# Raise an error if no Loras are available
# Return original model and clip if no valid LoRAs are available
if not available_loras:
raise ValueError("No Loras selected")
return (model, clip, "", "", "")
# Randomly select a Lora
selected_lora = random.choice(available_loras)

View File

@@ -7,10 +7,20 @@ class RandomModelSelector:
@classmethod
def INPUT_TYPES(cls):
model_list = get_filename_list("checkpoints")
# if not model_list:
# raise ValueError("No checkpoint models found in the checkpoints directory")
optional_inputs = {}
# Safely get default model for each input
for i in range(1, 11):
optional_inputs[f"model_{i}"] = (model_list, {"default": model_list[min(i-1, len(model_list)-1)]})
# If model_list is empty, use an empty default
if not model_list:
optional_inputs[f"model_{i}"] = (model_list, {})
else:
# Use modulo to wrap around to the start of the list if we exceed its length
default_index = (i - 1) % len(model_list)
optional_inputs[f"model_{i}"] = (model_list, {"default": model_list[default_index]})
optional_inputs["seed"] = ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
@@ -31,12 +41,13 @@ class RandomModelSelector:
# Collect available models from kwargs
available_models = [
kwargs[f"model_{i}"] for i in range(1, number_of_models + 1) if f"model_{i}" in kwargs and kwargs[f"model_{i}"]
kwargs[f"model_{i}"] for i in range(1, number_of_models + 1)
if f"model_{i}" in kwargs and kwargs[f"model_{i}"]
]
# Raise an error if no models are available
if not available_models:
raise ValueError("No models selected")
raise ValueError("No models selected. Please ensure at least one model is selected.")
# Randomly select a model
selected_model = random.choice(available_models)

View File

@@ -1,3 +1,4 @@
ollama
pydub
opencv-python
opencv-python
faster_whisper

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 180 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 151 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 311 KiB

BIN
screenshots/ollama_job.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 205 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 220 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

BIN
screenshots/stt_1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

BIN
screenshots/stt_2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -1,18 +0,0 @@
class ShowFloat:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"float_value": ("FLOAT", {"forceInput": True}),
},
}
INPUT_IS_LIST = True
RETURN_TYPES = ()
FUNCTION = "show_float"
OUTPUT_NODE = True
INPUT_IS_LIST = (True,)
CATEGORY = "Bjornulf"
def show_float(self, float_value):
return {"ui": {"text": float_value}}

View File

@@ -1,18 +0,0 @@
class ShowInt:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"int_value": ("INT", {"forceInput": True}),
},
}
INPUT_IS_LIST = True
RETURN_TYPES = ()
FUNCTION = "show_int"
OUTPUT_NODE = True
INPUT_IS_LIST = (True,)
CATEGORY = "Bjornulf"
def show_int(self, int_value):
return {"ui": {"text": int_value}}

124
speech_to_text.py Normal file
View File

@@ -0,0 +1,124 @@
import torch
from pathlib import Path
import os
import numpy as np
import tempfile
import wave
try:
import faster_whisper
WHISPER_AVAILABLE = True
except ImportError:
WHISPER_AVAILABLE = False
print("faster-whisper not found. To use local transcription, install with: pip install faster-whisper")
class SpeechToText:
def __init__(self):
self.local_model = None
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"model_size": (["tiny", "base", "small", "medium", "large-v2"], {"default": "base"}),
},
"optional": {
"AUDIO": ("AUDIO",),
"audio_path": ("STRING", {"default": None, "forceInput": True}),
}
}
RETURN_TYPES = ("STRING", "STRING", "STRING",)
RETURN_NAMES = ("transcript", "detected_language","language_name",)
FUNCTION = "transcribe_audio"
CATEGORY = "Bjornulf"
def tensor_to_wav(self, audio_tensor, sample_rate):
"""Convert audio tensor to temporary WAV file"""
# Convert tensor to numpy array
audio_data = audio_tensor.squeeze().numpy()
# Create temporary file
temp_file = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
# Write WAV file
with wave.open(temp_file.name, 'wb') as wav_file:
wav_file.setnchannels(1) # Mono audio
wav_file.setsampwidth(2) # 2 bytes per sample
wav_file.setframerate(sample_rate)
# Convert float32 to int16
audio_data = (audio_data * 32767).astype(np.int16)
wav_file.writeframes(audio_data.tobytes())
return temp_file.name
def load_local_model(self, model_size):
"""Load the local Whisper model if not already loaded"""
if not WHISPER_AVAILABLE:
return False, "faster-whisper not installed. Install with: pip install faster-whisper"
try:
if self.local_model is None:
print(f"Loading local Whisper model ({model_size})...")
self.local_model = faster_whisper.WhisperModel(model_size, device="cpu", compute_type="int8")
print("Local model loaded successfully!")
return True, None
except Exception as e:
return False, f"Error loading model: {str(e)}"
def transcribe_local(self, audio_path, model_size):
"""Transcribe audio using local Whisper model"""
success, message = self.load_local_model(model_size)
if not success:
return False, message, None
try:
print("Starting local transcription...")
segments, info = self.local_model.transcribe(str(audio_path), beam_size=5)
text = " ".join([segment.text for segment in segments]).strip()
detected_language = info.language
print("Local transcription completed successfully!")
return True, text, detected_language
except Exception as e:
return False, f"Error during local transcription: {str(e)}", None
def transcribe_audio(self, model_size, AUDIO=None, audio_path=None):
transcript = "No valid audio input provided"
detected_language = ""
temp_wav_path = None
try:
# Determine which audio source to use
if AUDIO is not None:
# Convert tensor audio data to WAV file
waveform = AUDIO['waveform']
sample_rate = AUDIO['sample_rate']
temp_wav_path = self.tensor_to_wav(waveform, sample_rate)
audio_to_process = temp_wav_path
elif audio_path is not None and os.path.exists(audio_path):
audio_to_process = audio_path
else:
return ("No valid audio input provided", "")
if audio_to_process:
success, result, lang = self.transcribe_local(audio_to_process, model_size)
transcript = result if success else f"Local transcription failed: {result}"
detected_language = lang if success else ""
finally:
# Clean up temporary file if it was created
if temp_wav_path and os.path.exists(temp_wav_path):
os.unlink(temp_wav_path)
#Create detected_language_name based on detected_language, en = English, es = Spanish, fr = French, de = German, etc...
language_map = {
"ar": "Arabic", "cs": "Czech", "de": "German", "en": "English",
"es": "Spanish", "fr": "French", "hi": "Hindi", "hu": "Hungarian",
"it": "Italian", "ja": "Japanese", "ko": "Korean", "nl": "Dutch",
"pl": "Polish", "pt": "Portuguese", "ru": "Russian", "tr": "Turkish",
"zh-cn": "Chinese"
}
detected_language_name = language_map.get(detected_language, "Unknown")
return (transcript, detected_language,detected_language_name)

23
text_to_anything.py Normal file
View File

@@ -0,0 +1,23 @@
class Everything(str):
def __ne__(self, __value: object) -> bool:
return False
class TextToAnything:
@classmethod
def INPUT_TYPES(s):
return {
"required": { "text": ("STRING",{"forceInput":True}) },
}
@classmethod
def VALIDATE_INPUTS(s, input_types):
return True
RETURN_TYPES = (Everything("*"),)
RETURN_NAMES = ("anything",)
FUNCTION = "text_to_any"
CATEGORY = "Bjornulf"
def text_to_any(self, text):
return (text,)

View File

@@ -27,9 +27,12 @@ class TextToSpeech:
@classmethod
def INPUT_TYPES(cls) -> Dict[str, Any]:
speakers_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "speakers")
speaker_options = [os.path.relpath(os.path.join(root, file), speakers_dir)
for root, _, files in os.walk(speakers_dir)
for file in files if file.endswith(".wav")]
speaker_options = ["default_for_language"] # Add default option
speaker_options.extend([
os.path.relpath(os.path.join(root, file), speakers_dir)
for root, _, files in os.walk(speakers_dir)
for file in files if file.endswith(".wav")
])
speaker_options = speaker_options or ["No WAV files found"]
@@ -41,7 +44,7 @@ class TextToSpeech:
"display": "dropdown"
}),
"speaker_wav": (speaker_options, {
"default": speaker_options[0],
"default": "default_for_language",
"display": "dropdown"
}),
"autoplay": ("BOOLEAN", {"default": True}),
@@ -66,11 +69,39 @@ class TextToSpeech:
@staticmethod
def sanitize_text(text: str) -> str:
return re.sub(r'[^\w\s-]', '', text).replace(' ', '_')[:50]
def get_default_speaker(self, language_code: str) -> str:
speakers_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "speakers")
lang_dir = os.path.join(speakers_dir, language_code)
# First try to find default.wav
default_path = os.path.join(language_code, "default.wav")
if os.path.exists(os.path.join(speakers_dir, default_path)):
return default_path
# If default.wav doesn't exist, use the first .wav file in the language directory
if os.path.exists(lang_dir):
for file in os.listdir(lang_dir):
if file.endswith(".wav"):
return os.path.join(language_code, file)
# If no suitable file is found, return the first available .wav file
for root, _, files in os.walk(speakers_dir):
for file in files:
if file.endswith(".wav"):
return os.path.relpath(os.path.join(root, file), speakers_dir)
return "No WAV files found"
def generate_audio(self, text: str, language: str, autoplay: bool, seed: int,
save_audio: bool, overwrite: bool, speaker_wav: str,
connect_to_workflow: Any = None) -> Tuple[Dict[str, Any], str, str, float]:
language_code = self.get_language_code(language)
# Handle default_for_language option
if speaker_wav == "default_for_language":
speaker_wav = self.get_default_speaker(language_code)
sanitized_text = self.sanitize_text(text)
save_path = os.path.join("Bjornulf_TTS", language, speaker_wav, f"{sanitized_text}.wav")

View File

@@ -1,111 +1,192 @@
import { app } from "../../../scripts/app.js";
import { api } from "../../../scripts/api.js";
app.registerExtension({
name: "Bjornulf.LoopLinesSequential",
async nodeCreated(node) {
if (node.comfyClass !== "Bjornulf_LoopLinesSequential") return;
name: "Bjornulf.LoopLinesSequential",
async nodeCreated(node) {
if (node.comfyClass !== "Bjornulf_LoopLinesSequential") return;
// Hide seed widget
const seedWidget = node.widgets.find(w => w.name === "seed");
if (seedWidget) {
seedWidget.visible = false;
}
// Add line number display
const lineNumberWidget = node.addWidget("html", "Current Line: --", null, {
callback: () => {},
});
// Function to update line number display
const updateLineNumber = () => {
fetch('/get_current_line')
.then(response => response.json())
.then(data => {
if (data.success) {
lineNumberWidget.value = `Current Line: ${data.value}`;
}
})
.catch(error => {
console.error('Error getting line number:', error);
});
};
// Add increment button
const incrementButton = node.addWidget("button", "+1", null, () => {
fetch('/increment_lines_counter', {
method: 'POST'
})
.then(response => response.json())
.then(data => {
if (data.success) {
updateLineNumber();
app.ui.toast("Counter incremented", {'duration': 3000});
} else {
app.ui.toast(`Failed to increment counter: ${data.error || "Unknown error"}`, {'type': 'error', 'duration': 5000});
}
})
.catch((error) => {
console.error('Error:', error);
app.ui.toast("An error occurred while incrementing the counter.", {'type': 'error', 'duration': 5000});
});
});
// Add decrement button
const decrementButton = node.addWidget("button", "-1", null, () => {
fetch('/decrement_lines_counter', {
method: 'POST'
})
.then(response => response.json())
.then(data => {
if (data.success) {
updateLineNumber();
app.ui.toast("Counter decremented", {'duration': 3000});
} else {
app.ui.toast(`Failed to decrement counter: ${data.error || "Unknown error"}`, {'type': 'error', 'duration': 5000});
}
})
.catch((error) => {
console.error('Error:', error);
app.ui.toast("An error occurred while decrementing the counter.", {'type': 'error', 'duration': 5000});
});
});
// Add reset button
const resetButton = node.addWidget("button", "Reset Counter", null, () => {
fetch('/reset_lines_counter', {
method: 'POST'
})
.then(response => response.json())
.then(data => {
if (data.success) {
updateLineNumber();
app.ui.toast("Counter reset successfully!", {'duration': 5000});
} else {
app.ui.toast(`Failed to reset counter: ${data.error || "Unknown error"}`, {'type': 'error', 'duration': 5000});
}
})
.catch((error) => {
console.error('Error:', error);
app.ui.toast("An error occurred while resetting the counter.", {'type': 'error', 'duration': 5000});
});
});
// Update line number periodically
setInterval(updateLineNumber, 1000);
// Override the original execute function
const originalExecute = node.execute;
node.execute = function() {
const result = originalExecute.apply(this, arguments);
if (result instanceof Promise) {
return result.catch(error => {
if (error.message.includes("Counter has reached its limit")) {
app.ui.toast(`Execution blocked: ${error.message}`, {'type': 'error', 'duration': 5000});
}
throw error;
});
}
return result;
};
// Hide seed widget
const seedWidget = node.widgets.find((w) => w.name === "seed");
if (seedWidget) {
seedWidget.visible = false;
}
});
// Add line number display
const lineNumberWidget = node.addWidget("html", "Current Line: --", null, {
callback: () => {},
});
// Function to update the Reset Button text
const updateResetButtonTextNode = () => {
fetch("/get_current_line_number", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
const jumpWidget = node.widgets.find((w) => w.name === "jump");
if (data.value === 0) {
resetButton.name =
"Reset Counter (Empty, next: " + jumpWidget.value + ")";
} else {
//Add to data.value, the current jump value
let next_value = data.value + jumpWidget.value;
// console.log(jumpWidget);
resetButton.name = `Reset Counter (next: ${next_value})`;
}
} else {
console.error("Error in context size:", data.error);
resetButton.name = "Reset Counter (Error)";
}
})
.catch((error) => {
console.error("Error fetching context size:", error);
resetButton.name = "Reset Counter (Error)";
});
};
// Add reset button
const resetButton = node.addWidget("button", "Reset Counter", null, () => {
fetch("/reset_lines_counter", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
// updateLineNumber();
updateResetButtonTextNode();
app.ui.toast("Counter reset successfully!", { duration: 5000 });
} else {
app.ui.toast(
`Failed to reset counter: ${data.error || "Unknown error"}`,
{ type: "error", duration: 5000 }
);
}
})
.catch((error) => {
console.error("Error:", error);
app.ui.toast("An error occurred while resetting the counter.", {
type: "error",
duration: 5000,
});
});
});
// Add increment button
const incrementButton = node.addWidget("button", "+1", null, () => {
fetch("/increment_lines_counter", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
updateResetButtonTextNode();
app.ui.toast("Counter incremented", { duration: 3000 });
} else {
app.ui.toast(
`Failed to increment counter: ${data.error || "Unknown error"}`,
{ type: "error", duration: 5000 }
);
}
})
.catch((error) => {
console.error("Error:", error);
app.ui.toast("An error occurred while incrementing the counter.", {
type: "error",
duration: 5000,
});
});
});
// Add decrement button
const decrementButton = node.addWidget("button", "-1", null, () => {
fetch("/decrement_lines_counter", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
updateResetButtonTextNode();
app.ui.toast("Counter decremented", { duration: 3000 });
} else {
app.ui.toast(
`Failed to decrement counter: ${data.error || "Unknown error"}`,
{ type: "error", duration: 5000 }
);
}
})
.catch((error) => {
console.error("Error:", error);
app.ui.toast("An error occurred while decrementing the counter.", {
type: "error",
duration: 5000,
});
});
});
// Add reset button
// const resetButton = node.addWidget("button", "Reset Counter", null, () => {
// fetch("/reset_lines_counter", {
// method: "POST",
// })
// .then((response) => response.json())
// .then((data) => {
// if (data.success) {
// updateLineNumber();
// app.ui.toast("Counter reset successfully!", { duration: 5000 });
// } else {
// app.ui.toast(
// `Failed to reset counter: ${data.error || "Unknown error"}`,
// { type: "error", duration: 5000 }
// );
// }
// })
// .catch((error) => {
// console.error("Error:", error);
// app.ui.toast("An error occurred while resetting the counter.", {
// type: "error",
// duration: 5000,
// });
// });
// });
// Update line number periodically
setTimeout(updateResetButtonTextNode, 0);
// Listen for node execution events
api.addEventListener("executed", async () => {
updateResetButtonTextNode();
});
// Add a handler for the jump widget
const waitingWidget = node.widgets.find((w) => w.name === "jump");
if (waitingWidget) {
const originalOnChange = waitingWidget.callback;
waitingWidget.callback = function (v) {
if (originalOnChange) {
originalOnChange.call(this, v);
}
updateResetButtonTextNode();
};
}
// Override the original execute function
const originalExecute = node.execute;
node.execute = function () {
const result = originalExecute.apply(this, arguments);
if (result instanceof Promise) {
return result.catch((error) => {
if (error.message.includes("Counter has reached its limit")) {
app.ui.toast(`Execution blocked: ${error.message}`, {
type: "error",
duration: 5000,
});
}
throw error;
});
}
return result;
};
},
});

View File

@@ -1,65 +1,171 @@
import { app } from "../../../scripts/app.js";
import { api } from "../../../scripts/api.js";
// Add CSS style for the black background button class
const style = document.createElement("style");
style.textContent = `
.reset-button-exceeded {
background-color: black !important;
color: white !important;
}
`;
app.registerExtension({
name: "Bjornulf.LoopIntegerSequential",
async nodeCreated(node) {
if (node.comfyClass !== "Bjornulf_LoopIntegerSequential") return;
name: "Bjornulf.LoopIntegerSequential",
async nodeCreated(node) {
if (node.comfyClass !== "Bjornulf_LoopIntegerSequential") return;
// Hide seed widget
const seedWidget = node.widgets.find(w => w.name === "seed");
if (seedWidget) {
seedWidget.visible = false;
}
// Add get value button
// const getValueButton = node.addWidget("button", "Get Counter Value", null, () => {
// fetch('/get_counter_value')
// .then(response => response.json())
// .then(data => {
// if (data.success) {
// app.ui.toast(`Current counter value: ${data.value}`, {'duration': 5000});
// } else {
// app.ui.toast(`Failed to get counter value: ${data.error || "Unknown error"}`, {'type': 'error', 'duration': 5000});
// }
// })
// .catch((error) => {
// console.error('Error:', error);
// app.ui.toast("An error occurred while getting the counter value.", {'type': 'error', 'duration': 5000});
// });
// });
// Add reset button
const resetButton = node.addWidget("button", "Reset Counter", null, () => {
fetch('/reset_counter', {
method: 'POST'
})
.then(response => response.json())
.then(data => {
if (data.success) {
app.ui.toast("Counter reset successfully!", {'duration': 5000});
} else {
app.ui.toast(`Failed to reset counter: ${data.error || "Unknown error"}`, {'type': 'error', 'duration': 5000});
}
})
.catch((error) => {
console.error('Error:', error);
app.ui.toast("An error occurred while resetting the counter.", {'type': 'error', 'duration': 5000});
});
});
// Override the original execute function
const originalExecute = node.execute;
node.execute = function() {
const result = originalExecute.apply(this, arguments);
if (result instanceof Promise) {
return result.catch(error => {
if (error.message.includes("Counter has reached its limit")) {
app.ui.toast(`Execution blocked: ${error.message}`, {'type': 'error', 'duration': 5000});
}
throw error; // Re-throw the error to stop further execution
});
}
return result;
};
// Hide seed widget
const seedWidget = node.widgets.find((w) => w.name === "seed");
if (seedWidget) {
seedWidget.visible = false;
}
});
// Function to update the Reset Button text
const updateResetButtonTextNode = () => {
fetch("/get_counter_value", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
const jumpWidget = node.widgets.find((w) => w.name === "jump");
const fromThisWidget = node.widgets.find(
(w) => w.name === "from_this"
);
if (data.value === 0) {
resetButton.name =
"Reset Counter (Empty, next: " + fromThisWidget.value + ")";
} else {
const toThatWidget = node.widgets.find(
(w) => w.name === "to_that"
);
let next_value = data.value + jumpWidget.value - 1;
if (next_value > toThatWidget.value) {
resetButton.name = `Reset Counter (ABOVE MAX: ${next_value} > ${toThatWidget.value})`;
console.log("resetButton", resetButton);
} else {
resetButton.name = `Reset Counter (next: ${next_value})`; // - ${toThatWidget.value}
}
}
} else {
console.error("Error in context size:", data.error);
resetButton.name = "Reset Counter (Error)";
}
})
.catch((error) => {
console.error("Error fetching context size:", error);
resetButton.name = "Reset Counter (Error)";
});
};
// Add reset button
const resetButton = node.addWidget("button", "Reset Counter", null, () => {
fetch("/reset_counter", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
// updateLineNumber();
updateResetButtonTextNode();
app.ui.toast("Counter reset successfully!", { duration: 5000 });
} else {
app.ui.toast(
`Failed to reset counter: ${data.error || "Unknown error"}`,
{ type: "error", duration: 5000 }
);
}
})
.catch((error) => {
console.error("Error:", error);
app.ui.toast("An error occurred while resetting the counter.", {
type: "error",
duration: 5000,
});
});
});
// Override the original execute function
const originalExecute = node.execute;
node.execute = function () {
const result = originalExecute.apply(this, arguments);
if (result instanceof Promise) {
return result.catch((error) => {
if (error.message.includes("Counter has reached its limit")) {
app.ui.toast(`Execution blocked: ${error.message}`, {
type: "error",
duration: 5000,
});
}
throw error; // Re-throw the error to stop further execution
});
}
return result;
};
// Initial update of showing counter number
setTimeout(updateResetButtonTextNode, 0);
// Listen for node execution events (update value when node executed)
api.addEventListener("executed", async () => {
updateResetButtonTextNode();
});
// Add a handler for the jump widget (update value reset on change)
const jumpWidget = node.widgets.find((w) => w.name === "jump");
if (jumpWidget) {
const originalOnChange = jumpWidget.callback;
jumpWidget.callback = function (v) {
if (originalOnChange) {
originalOnChange.call(this, v);
}
updateResetButtonTextNode();
};
}
// Add a handler for the to_that widget (update value reset on change)
const toThatWidget = node.widgets.find((w) => w.name === "to_that");
if (toThatWidget) {
const originalOnChange = toThatWidget.callback;
toThatWidget.callback = function (v) {
if (originalOnChange) {
originalOnChange.call(this, v);
}
updateResetButtonTextNode();
};
}
// Add a handler for the to_that widget (on change from_this => reset button)
const fromThisWidget = node.widgets.find((w) => w.name === "from_this");
if (fromThisWidget) {
const originalOnChange = fromThisWidget.callback;
fromThisWidget.callback = function (v) {
if (originalOnChange) {
originalOnChange.call(this, v);
}
fetch("/reset_counter", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
// updateLineNumber();
updateResetButtonTextNode();
app.ui.toast("Counter reset successfully!", { duration: 5000 });
} else {
app.ui.toast(
`Failed to reset counter: ${data.error || "Unknown error"}`,
{ type: "error", duration: 5000 }
);
}
})
.catch((error) => {
console.error("Error:", error);
app.ui.toast("An error occurred while resetting the counter.", {
type: "error",
duration: 5000,
});
});
};
}
},
});

View File

@@ -0,0 +1,54 @@
import { app } from "../../../scripts/app.js";
app.registerExtension({
name: "Bjornulf.OllamaConfig",
async nodeCreated(node) {
if (node.comfyClass === "Bjornulf_OllamaConfig") {
// Add model_list combo widget
const modelListWidget = node.addWidget(
"combo",
"select_model_here",
"",
(v) => {
// When model_list changes, update model_name
const modelNameWidget = node.widgets.find(w => w.name === "model_name");
if (modelNameWidget) {
modelNameWidget.value = v;
}
},
{ values: [] }
);
// Add update button
node.addCustomWidget({
name: "Update model_list",
type: "button",
value: "Update Models",
callback: async function() {
try {
const url = node.widgets.find(w => w.name === "ollama_url").value;
const response = await fetch(`${url}/api/tags`);
const data = await response.json();
if (data.models) {
const modelNames = data.models.map(m => m.name);
if (modelNames.length > 0) {
// Update model_list widget
modelListWidget.options.values = modelNames;
modelListWidget.value = modelNames[0];
// Update model_name widget
const modelNameWidget = node.widgets.find(w => w.name === "model_name");
if (modelNameWidget) {
modelNameWidget.value = modelNames[0];
}
}
}
} catch (error) {
console.error('Error updating models:', error);
}
}
});
}
}
});

197
web/js/ollama_talk.js Normal file
View File

@@ -0,0 +1,197 @@
import { app } from "../../../scripts/app.js";
import { api } from "../../../scripts/api.js";
// Node-specific logic
app.registerExtension({
name: "Bjornulf.OllamaTalk",
async nodeCreated(node) {
if (node.comfyClass === "Bjornulf_OllamaTalk") {
// Set seed widget to hidden input
const seedWidget = node.widgets.find((w) => w.name === "seed");
if (seedWidget) {
seedWidget.type = "HIDDEN";
}
// Function to update the Reset Button text
const updateResetButtonTextNode = () => {
fetch("/get_current_context_size", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
if (data.value === 0) {
resetButton.name = "Save/Reset Context File (Empty)";
} else {
resetButton.name = `Save/Reset Context File (${data.value} lines)`;
}
} else {
console.error("Error in context size:", data.error);
resetButton.name = "Save/Reset Context File (Error)";
}
})
.catch((error) => {
console.error("Error fetching context size:", error);
resetButton.name = "Save/Reset Context File (Error)";
});
};
// Add reset button
const resetButton = node.addWidget(
"button",
"Save/Reset Context File",
null,
() => {
fetch("/reset_lines_context", {
method: "POST",
})
.then((response) => response.json())
.then((data) => {
if (data.success) {
// updateLineNumber();
updateResetButtonTextNode();
app.ui.toast("Counter reset successfully!", { duration: 5000 });
} else {
app.ui.toast(
`Failed to reset counter: ${data.error || "Unknown error"}`,
{ type: "error", duration: 5000 }
);
}
})
.catch((error) => {
console.error("Error:", error);
app.ui.toast("An error occurred while resetting the counter.", {
type: "error",
duration: 5000,
});
});
}
);
// Add resume button
const resumeButton = node.addWidget("button", "Resume", "Resume", () => {
const workflow = app.graph.serialize();
const nodeData = workflow.nodes.find((n) => n.id === node.id);
const userPromptValue =
nodeData.widgets_values?.[
node.widgets.findIndex((w) => w.name === "user_prompt")
];
fetch("/bjornulf_ollama_send_prompt", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
node_id: node.id,
user_prompt: userPromptValue,
}),
})
.then((response) => response.text())
.then((data) => {
console.log("Resume response:", data);
})
.catch((error) => console.error("Error:", error));
});
// Function to update button visibility based on widget values
const updateButtonVisibility = () => {
// Check context file widget
const contextWidget = node.widgets.find(
(w) => w.name === "use_context_file"
);
const isContextFileEnabled = contextWidget
? contextWidget.value
: false;
resetButton.type = isContextFileEnabled ? "button" : "HIDDEN";
// Check waiting for prompt widget
const waitingWidget = node.widgets.find(
(w) => w.name === "waiting_for_prompt"
);
const isWaitingForPrompt = waitingWidget ? waitingWidget.value : false;
resumeButton.type = isWaitingForPrompt ? "button" : "HIDDEN";
//ALSO update reset button text node
updateResetButtonTextNode(); // Will trigger when... toggle / refresh page
// Force canvas redraw to update UI
node.setDirtyCanvas(true);
};
// Add a handler for the use_context_file widget
const contextWidget = node.widgets.find(
(w) => w.name === "use_context_file"
);
if (contextWidget) {
const originalOnChange = contextWidget.callback;
contextWidget.callback = function (v) {
if (originalOnChange) {
originalOnChange.call(this, v);
}
updateButtonVisibility();
};
}
// Add a handler for the waiting_for_prompt widget
const waitingWidget = node.widgets.find(
(w) => w.name === "waiting_for_prompt"
);
if (waitingWidget) {
const originalOnChange = waitingWidget.callback;
waitingWidget.callback = function (v) {
if (originalOnChange) {
originalOnChange.call(this, v);
}
updateButtonVisibility();
};
}
// Initial update of button visibility
setTimeout(updateButtonVisibility, 0);
// Listen for node execution events
api.addEventListener("executed", async () => {
updateResetButtonTextNode();
});
//If workflow is stopped during pause, cancel the run
const original_api_interrupt = api.interrupt;
api.interrupt = function () {
api.fetchApi('/bjornulf_ollama_interrupt', {
method: 'POST'
});
original_api_interrupt.apply(this, arguments);
}
}
},
});
// // Add listener for workflow execution
// app.addEventListener("workflowExecuted", () => {
// if (node.graph.isPlaying) {
// updateContextSize();
// }
// });
// app.registerExtension({
// name: "Bjornulf.OllamaContextChat",
// async nodeCreated(node) {
// if (node.comfyClass === "Bjornulf_OllamaContextChat") {
// const resumeButton = node.addWidget("button", "Resume", "Resume", () => {
// fetch('/bjornulf_ollama_send_prompt', { method: 'GET' })
// .then(response => response.text())
// .then(data => {
// console.log('Resume response:', data);
// // You can update the UI here if needed
// })
// .catch(error => console.error('Error:', error));
// });
// }
// }
// });
// Set seed widget to hidden input
// const seedWidget = node.widgets.find((w) => w.name === "seed");
// if (seedWidget) {
// seedWidget.type = "HIDDEN";
// }

View File

@@ -1,76 +0,0 @@
import { app } from "../../../scripts/app.js";
import { ComfyWidgets } from "../../../scripts/widgets.js";
// Styles for the text area
const textAreaStyles = {
readOnly: true,
opacity: 1,
padding: '10px',
border: '1px solid #ccc',
borderRadius: '5px',
backgroundColor: '#222',
color: 'Lime',
fontFamily: 'Arial, sans-serif',
fontSize: '14px',
lineHeight: '1.4',
resize: 'vertical',
overflowY: 'auto',
};
// Displays input text on a node
app.registerExtension({
name: "Bjornulf.ShowFloat",
async beforeRegisterNodeDef(nodeType, nodeData, app) {
if (nodeData.name === "Bjornulf_ShowFloat") {
function createStyledTextArea(text) {
const widget = ComfyWidgets["STRING"](this, "text", ["STRING", { multiline: true }], app).widget;
widget.inputEl.readOnly = true;
const textArea = widget.inputEl;
Object.assign(textArea.style, textAreaStyles);
textArea.classList.add('bjornulf-show-text');
widget.value = text;
return widget;
}
function populate(text) {
if (this.widgets) {
for (let i = 1; i < this.widgets.length; i++) {
this.widgets[i].onRemove?.();
}
this.widgets.length = 1;
}
const v = Array.isArray(text) ? text : [text];
for (const list of v) {
if (list) {
createStyledTextArea.call(this, list);
}
}
requestAnimationFrame(() => {
const sz = this.computeSize();
if (sz[0] < this.size[0]) sz[0] = this.size[0];
if (sz[1] < this.size[1]) sz[1] = this.size[1];
this.onResize?.(sz);
app.graph.setDirtyCanvas(true, false);
});
}
// When the node is executed we will be sent the input text, display this in the widget
const onExecuted = nodeType.prototype.onExecuted;
nodeType.prototype.onExecuted = function (message) {
onExecuted?.apply(this, arguments);
populate.call(this, message.text);
};
const onConfigure = nodeType.prototype.onConfigure;
nodeType.prototype.onConfigure = function () {
onConfigure?.apply(this, arguments);
if (this.widgets_values?.length) {
populate.call(this, this.widgets_values);
}
};
}
},
});

View File

@@ -1,76 +0,0 @@
import { app } from "../../../scripts/app.js";
import { ComfyWidgets } from "../../../scripts/widgets.js";
// Styles for the text area
const textAreaStyles = {
readOnly: true,
opacity: 1,
padding: '10px',
border: '1px solid #ccc',
borderRadius: '5px',
backgroundColor: '#222',
color: 'Lime',
fontFamily: 'Arial, sans-serif',
fontSize: '14px',
lineHeight: '1.4',
resize: 'vertical',
overflowY: 'auto',
};
// Displays input text on a node
app.registerExtension({
name: "Bjornulf.ShowInt",
async beforeRegisterNodeDef(nodeType, nodeData, app) {
if (nodeData.name === "Bjornulf_ShowInt") {
function createStyledTextArea(text) {
const widget = ComfyWidgets["STRING"](this, "text", ["STRING", { multiline: true }], app).widget;
widget.inputEl.readOnly = true;
const textArea = widget.inputEl;
Object.assign(textArea.style, textAreaStyles);
textArea.classList.add('bjornulf-show-text');
widget.value = text;
return widget;
}
function populate(text) {
if (this.widgets) {
for (let i = 1; i < this.widgets.length; i++) {
this.widgets[i].onRemove?.();
}
this.widgets.length = 1;
}
const v = Array.isArray(text) ? text : [text];
for (const list of v) {
if (list) {
createStyledTextArea.call(this, list);
}
}
requestAnimationFrame(() => {
const sz = this.computeSize();
if (sz[0] < this.size[0]) sz[0] = this.size[0];
if (sz[1] < this.size[1]) sz[1] = this.size[1];
this.onResize?.(sz);
app.graph.setDirtyCanvas(true, false);
});
}
// When the node is executed we will be sent the input text, display this in the widget
const onExecuted = nodeType.prototype.onExecuted;
nodeType.prototype.onExecuted = function (message) {
onExecuted?.apply(this, arguments);
populate.call(this, message.text);
};
const onConfigure = nodeType.prototype.onConfigure;
nodeType.prototype.onConfigure = function () {
onConfigure?.apply(this, arguments);
if (this.widgets_values?.length) {
populate.call(this, this.widgets_values);
}
};
}
},
});