diff --git a/README.md b/README.md index f85fd91..e6b778e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# ๐Ÿ”— Comfyui : Bjornulf_custom_nodes v0.17 ๐Ÿ”— +# ๐Ÿ”— Comfyui : Bjornulf_custom_nodes v0.18 ๐Ÿ”— # Dependencies @@ -27,6 +27,7 @@ - **v0.15**: Add two new nodes: TTS - Text to Speech and Character Description Generator - **v0.16**: Big changes on Character Description Generator - **v0.17**: New loop node, combine by lines. +- **v0.18**: New loop node, Free VRAM hack # ๐Ÿ“ Nodes descriptions @@ -272,4 +273,16 @@ Some details are unusable for some checkpoints, very much a work in progress, th **Description:** Sometimes you want to loop over several inputs but you also want to separate different lines of your output. -So with this node, you can have the number of inputs and outputs you want. See example for usage. \ No newline at end of file +So with this node, you can have the number of inputs and outputs you want. See example for usage. + +### 34 - ๐Ÿงน Free VRAM hack +![free vram](screenshots/free_vram_hack1.png) +![free vram](screenshots/free_vram_hack2.png) + +**Description:** +So this is my attempt at freeing up VRAM after usage, I will try to improve that. +For me, on launch ComfyUI is using 180MB of VRAM, after my clean up VRAM node it can go back down to 376MB. +I don't think there is a clean way to do that, so I'm using a hacky way. +So, not perfect but better than being stuck at 6GB of VRAM used if I know I won't be using it again... +Just connect this node with your workflow, it takes an image as input and return the same image without any changes. +โ— Comfyui is using cache to run faster (like not reloading checkpoints), so only use this free VRAM node when you need it. \ No newline at end of file diff --git a/__init__.py b/__init__.py index 520e21f..1ef840a 100644 --- a/__init__.py +++ b/__init__.py @@ -38,6 +38,7 @@ from .image_mask_cutter import ImageMaskCutter from .character_description import CharacterDescriptionGenerator from .text_to_speech import TextToSpeech from .loop_combine_texts_by_lines import CombineTextsByLines +from .free_vram_hack import FreeVRAMNode # from .check_black_image import CheckBlackImage # from .clear_vram import ClearVRAM @@ -46,6 +47,7 @@ from .loop_combine_texts_by_lines import CombineTextsByLines NODE_CLASS_MAPPINGS = { # "Bjornulf_CustomStringType": CustomStringType, "Bjornulf_ollamaLoader": ollamaLoader, + "Bjornulf_FreeVRAM": FreeVRAMNode, "Bjornulf_CombineTextsByLines": CombineTextsByLines, "Bjornulf_TextToSpeech": TextToSpeech, "Bjornulf_CharacterDescriptionGenerator": CharacterDescriptionGenerator, @@ -92,6 +94,7 @@ NODE_CLASS_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = { # "Bjornulf_CustomStringType": "!!! CUSTOM STRING TYPE !!!", "Bjornulf_ollamaLoader": "๐Ÿฆ™ Ollama (Description)", + "Bjornulf_FreeVRAM": "๐Ÿงน Free VRAM hack", "Bjornulf_CombineTextsByLines": "โ™ป Loop (All Lines from input ๐Ÿ”— combine by lines)", "Bjornulf_TextToSpeech": "๐Ÿ”Š TTS - Text to Speech", "Bjornulf_CharacterDescriptionGenerator": "๐Ÿง‘๐Ÿ“ Character Description Generator", diff --git a/free_vram_hack.py b/free_vram_hack.py new file mode 100644 index 0000000..0307667 --- /dev/null +++ b/free_vram_hack.py @@ -0,0 +1,56 @@ +import torch +import gc +import requests +import json + +class FreeVRAMNode: + @classmethod + def INPUT_TYPES(s): + return {"required": {"image": ("IMAGE",)}} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "free_vram" + CATEGORY = "memory_management" + + def free_vram(self, image): + print("Attempting to free VRAM...") + + # Clear CUDA cache + if torch.cuda.is_available(): + torch.cuda.empty_cache() + print("CUDA cache cleared.") + + # Run garbage collection + collected = gc.collect() + print(f"Garbage collector: collected {collected} objects.") + + # Trigger the HTTP request + self.trigger_http_request() + + # Return the input image unchanged + return (image,) + + def trigger_http_request(self): + url = "http://localhost:8188/prompt" + headers = {"Content-Type": "application/json"} + payload = { + "prompt": { + "3": { + "inputs": {"text": "free VRAM hack"}, + "class_type": "Bjornulf_WriteText", + "_meta": {"title": "โœ’ Write Text"} + }, + "4": { + "inputs": {"text_value": ["3", 0], "text": "free VRAM hack"}, + "class_type": "Bjornulf_ShowText", + "_meta": {"title": "๐Ÿ‘ Show (Text)"} + } + } + } + + try: + response = requests.post(url, headers=headers, data=json.dumps(payload)) + response.raise_for_status() + print("HTTP request triggered successfully") + except requests.exceptions.RequestException as e: + print(f"Failed to trigger HTTP request: {e}") \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5c992c4..320ddbb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [project] name = "bjornulf_custom_nodes" description = "Nodes: Ollama, Text to Speech, Save image for Bjornulf LobeChat, Text with random Seed, Random line from input, Combine images (Background+Overlay alpha), Image to grayscale (black & white), Remove image Transparency (alpha), Resize Image, ..." -version = "0.17" +version = "0.18" license = {file = "LICENSE"} [project.urls] diff --git a/screenshots/free_vram_hack1.png b/screenshots/free_vram_hack1.png new file mode 100644 index 0000000..944a966 Binary files /dev/null and b/screenshots/free_vram_hack1.png differ diff --git a/screenshots/free_vram_hack2.png b/screenshots/free_vram_hack2.png new file mode 100644 index 0000000..b50e901 Binary files /dev/null and b/screenshots/free_vram_hack2.png differ