mirror of
https://github.com/justUmen/Bjornulf_custom_nodes.git
synced 2026-03-26 06:45:44 -03:00
0.18
This commit is contained in:
15
README.md
15
README.md
@@ -1,4 +1,4 @@
|
|||||||
# 🔗 Comfyui : Bjornulf_custom_nodes v0.17 🔗
|
# 🔗 Comfyui : Bjornulf_custom_nodes v0.18 🔗
|
||||||
|
|
||||||
# Dependencies
|
# Dependencies
|
||||||
|
|
||||||
@@ -27,6 +27,7 @@
|
|||||||
- **v0.15**: Add two new nodes: TTS - Text to Speech and Character Description Generator
|
- **v0.15**: Add two new nodes: TTS - Text to Speech and Character Description Generator
|
||||||
- **v0.16**: Big changes on Character Description Generator
|
- **v0.16**: Big changes on Character Description Generator
|
||||||
- **v0.17**: New loop node, combine by lines.
|
- **v0.17**: New loop node, combine by lines.
|
||||||
|
- **v0.18**: New loop node, Free VRAM hack
|
||||||
|
|
||||||
# 📝 Nodes descriptions
|
# 📝 Nodes descriptions
|
||||||
|
|
||||||
@@ -273,3 +274,15 @@ Some details are unusable for some checkpoints, very much a work in progress, th
|
|||||||
**Description:**
|
**Description:**
|
||||||
Sometimes you want to loop over several inputs but you also want to separate different lines of your output.
|
Sometimes you want to loop over several inputs but you also want to separate different lines of your output.
|
||||||
So with this node, you can have the number of inputs and outputs you want. See example for usage.
|
So with this node, you can have the number of inputs and outputs you want. See example for usage.
|
||||||
|
|
||||||
|
### 34 - 🧹 Free VRAM hack
|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
**Description:**
|
||||||
|
So this is my attempt at freeing up VRAM after usage, I will try to improve that.
|
||||||
|
For me, on launch ComfyUI is using 180MB of VRAM, after my clean up VRAM node it can go back down to 376MB.
|
||||||
|
I don't think there is a clean way to do that, so I'm using a hacky way.
|
||||||
|
So, not perfect but better than being stuck at 6GB of VRAM used if I know I won't be using it again...
|
||||||
|
Just connect this node with your workflow, it takes an image as input and return the same image without any changes.
|
||||||
|
❗ Comfyui is using cache to run faster (like not reloading checkpoints), so only use this free VRAM node when you need it.
|
||||||
@@ -38,6 +38,7 @@ from .image_mask_cutter import ImageMaskCutter
|
|||||||
from .character_description import CharacterDescriptionGenerator
|
from .character_description import CharacterDescriptionGenerator
|
||||||
from .text_to_speech import TextToSpeech
|
from .text_to_speech import TextToSpeech
|
||||||
from .loop_combine_texts_by_lines import CombineTextsByLines
|
from .loop_combine_texts_by_lines import CombineTextsByLines
|
||||||
|
from .free_vram_hack import FreeVRAMNode
|
||||||
# from .check_black_image import CheckBlackImage
|
# from .check_black_image import CheckBlackImage
|
||||||
# from .clear_vram import ClearVRAM
|
# from .clear_vram import ClearVRAM
|
||||||
|
|
||||||
@@ -46,6 +47,7 @@ from .loop_combine_texts_by_lines import CombineTextsByLines
|
|||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
# "Bjornulf_CustomStringType": CustomStringType,
|
# "Bjornulf_CustomStringType": CustomStringType,
|
||||||
"Bjornulf_ollamaLoader": ollamaLoader,
|
"Bjornulf_ollamaLoader": ollamaLoader,
|
||||||
|
"Bjornulf_FreeVRAM": FreeVRAMNode,
|
||||||
"Bjornulf_CombineTextsByLines": CombineTextsByLines,
|
"Bjornulf_CombineTextsByLines": CombineTextsByLines,
|
||||||
"Bjornulf_TextToSpeech": TextToSpeech,
|
"Bjornulf_TextToSpeech": TextToSpeech,
|
||||||
"Bjornulf_CharacterDescriptionGenerator": CharacterDescriptionGenerator,
|
"Bjornulf_CharacterDescriptionGenerator": CharacterDescriptionGenerator,
|
||||||
@@ -92,6 +94,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
# "Bjornulf_CustomStringType": "!!! CUSTOM STRING TYPE !!!",
|
# "Bjornulf_CustomStringType": "!!! CUSTOM STRING TYPE !!!",
|
||||||
"Bjornulf_ollamaLoader": "🦙 Ollama (Description)",
|
"Bjornulf_ollamaLoader": "🦙 Ollama (Description)",
|
||||||
|
"Bjornulf_FreeVRAM": "🧹 Free VRAM hack",
|
||||||
"Bjornulf_CombineTextsByLines": "♻ Loop (All Lines from input 🔗 combine by lines)",
|
"Bjornulf_CombineTextsByLines": "♻ Loop (All Lines from input 🔗 combine by lines)",
|
||||||
"Bjornulf_TextToSpeech": "🔊 TTS - Text to Speech",
|
"Bjornulf_TextToSpeech": "🔊 TTS - Text to Speech",
|
||||||
"Bjornulf_CharacterDescriptionGenerator": "🧑📝 Character Description Generator",
|
"Bjornulf_CharacterDescriptionGenerator": "🧑📝 Character Description Generator",
|
||||||
|
|||||||
56
free_vram_hack.py
Normal file
56
free_vram_hack.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
import torch
|
||||||
|
import gc
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
|
||||||
|
class FreeVRAMNode:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {"image": ("IMAGE",)}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "free_vram"
|
||||||
|
CATEGORY = "memory_management"
|
||||||
|
|
||||||
|
def free_vram(self, image):
|
||||||
|
print("Attempting to free VRAM...")
|
||||||
|
|
||||||
|
# Clear CUDA cache
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
print("CUDA cache cleared.")
|
||||||
|
|
||||||
|
# Run garbage collection
|
||||||
|
collected = gc.collect()
|
||||||
|
print(f"Garbage collector: collected {collected} objects.")
|
||||||
|
|
||||||
|
# Trigger the HTTP request
|
||||||
|
self.trigger_http_request()
|
||||||
|
|
||||||
|
# Return the input image unchanged
|
||||||
|
return (image,)
|
||||||
|
|
||||||
|
def trigger_http_request(self):
|
||||||
|
url = "http://localhost:8188/prompt"
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
payload = {
|
||||||
|
"prompt": {
|
||||||
|
"3": {
|
||||||
|
"inputs": {"text": "free VRAM hack"},
|
||||||
|
"class_type": "Bjornulf_WriteText",
|
||||||
|
"_meta": {"title": "✒ Write Text"}
|
||||||
|
},
|
||||||
|
"4": {
|
||||||
|
"inputs": {"text_value": ["3", 0], "text": "free VRAM hack"},
|
||||||
|
"class_type": "Bjornulf_ShowText",
|
||||||
|
"_meta": {"title": "👁 Show (Text)"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
||||||
|
response.raise_for_status()
|
||||||
|
print("HTTP request triggered successfully")
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"Failed to trigger HTTP request: {e}")
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "bjornulf_custom_nodes"
|
name = "bjornulf_custom_nodes"
|
||||||
description = "Nodes: Ollama, Text to Speech, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images (Background+Overlay alpha), Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..."
|
description = "Nodes: Ollama, Text to Speech, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images (Background+Overlay alpha), Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..."
|
||||||
version = "0.17"
|
version = "0.18"
|
||||||
license = {file = "LICENSE"}
|
license = {file = "LICENSE"}
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
|
|||||||
BIN
screenshots/free_vram_hack1.png
Normal file
BIN
screenshots/free_vram_hack1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 394 KiB |
BIN
screenshots/free_vram_hack2.png
Normal file
BIN
screenshots/free_vram_hack2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 400 KiB |
Reference in New Issue
Block a user