add requirements.txt

This commit is contained in:
justumen
2024-09-12 12:57:49 +02:00
parent 98b322956d
commit a65d78f1b7
4 changed files with 24 additions and 5 deletions

View File

@@ -1,5 +1,21 @@
# 🔗 Comfyui : Bjornulf_custom_nodes v0.21 🔗
# ☁ Usage in cloud :
If you want to use my nodes and comfyui in the cloud, I'm managing an optimized template on runpod : <https://runpod.io/console/deploy?template=r32dtr35u1&ref=tkowk7g5>
Template name : `bjornulf-comfyui-allin-workspace`, can be operational in ~3 minutes. (Depending on your pod)
⚠️ You need to open a terminal in browser (After clicking on `connect` from your pod) and use this to launch ComfyUI : `cd /workspace/ComfyUI && python main.py --listen 0.0.0.0 --port 3000` (Much better to control it with a terminal, check logs, etc...)
After that you can just click on the `Connect to port 3000` button.
If you have any issues with it, please let me know.
You need to create and select a network volume, size is up to you, i have 50Gb Storage because i use cloud only for Flux or lora training on a 4090. (~0.7$/hour)
It will manage everything in Runpod network storage (`/workspace/ComfyUI`), so you can stop and start the cloud GPU without losing anything, change GPU or whatever.
Zone : I recommend `EU-RO-1`, but up to you.
Top-up your Runpod account with minimum 10$ to start.
⚠️ Warning, you will pay by the minute, so not recommended for testing or learning comfyui. Do that locally !!!
Run cloud GPU only when you already have your workflow ready to run.
Advice : take a cheap GPU for testing, downloading models or so one.
# Dependencies
- `pip install ollama` (you can also install ollama if you want : https://ollama.com/download) - You don't need to really install it if you don't want to use my ollama node. (BUT you need to run `pip install ollama`)

View File

@@ -39,7 +39,8 @@ from .image_mask_cutter import ImageMaskCutter
from .character_description import CharacterDescriptionGenerator
from .text_to_speech import TextToSpeech
from .loop_combine_texts_by_lines import CombineTextsByLines
from .free_vram_hack import FreeVRAMNode
from .free_vram_hack import FreeVRAM
# from .pause_resume import PauseResume
# from .check_black_image import CheckBlackImage
# from .clear_vram import ClearVRAM
@@ -48,7 +49,8 @@ from .free_vram_hack import FreeVRAMNode
NODE_CLASS_MAPPINGS = {
# "Bjornulf_CustomStringType": CustomStringType,
"Bjornulf_ollamaLoader": ollamaLoader,
"Bjornulf_FreeVRAM": FreeVRAMNode,
# "Bjornulf_PauseResume": PauseResume,
"Bjornulf_FreeVRAM": FreeVRAM,
"Bjornulf_CombineTextsByLines": CombineTextsByLines,
"Bjornulf_TextToSpeech": TextToSpeech,
"Bjornulf_CharacterDescriptionGenerator": CharacterDescriptionGenerator,
@@ -96,6 +98,7 @@ NODE_CLASS_MAPPINGS = {
NODE_DISPLAY_NAME_MAPPINGS = {
# "Bjornulf_CustomStringType": "!!! CUSTOM STRING TYPE !!!",
"Bjornulf_ollamaLoader": "🦙 Ollama (Description)",
# "Bjornulf_PauseResume": "⏸️ Pause/Resume",
"Bjornulf_FreeVRAM": "🧹 Free VRAM hack",
"Bjornulf_CombineTextsByLines": "♻ Loop (All Lines from input 🔗 combine by lines)",
"Bjornulf_TextToSpeech": "🔊 TTS - Text to Speech",

View File

@@ -3,14 +3,14 @@ import gc
import requests
import json
class FreeVRAMNode:
class FreeVRAM:
@classmethod
def INPUT_TYPES(s):
return {"required": {"image": ("IMAGE",)}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "free_vram"
CATEGORY = "memory_management"
CATEGORY = "Bjornulf"
def free_vram(self, image):
print("Attempting to free VRAM...")