From 9c45d9db6c7b82a94d59abdc71e8dfdbfc870406 Mon Sep 17 00:00:00 2001 From: Will Miao <13051207myq@gmail.com> Date: Mon, 18 Aug 2025 15:05:57 +0800 Subject: [PATCH] feat: Enhance WanVideoLoraSelect with improved low_mem_load and merge_loras options for better LORA management, see #363 --- py/nodes/wanvideo_lora_select.py | 9 +++++++-- web/comfyui/wanvideo_lora_select.js | 10 +++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/py/nodes/wanvideo_lora_select.py b/py/nodes/wanvideo_lora_select.py index 2bc2cb43..0d0188e2 100644 --- a/py/nodes/wanvideo_lora_select.py +++ b/py/nodes/wanvideo_lora_select.py @@ -14,7 +14,8 @@ class WanVideoLoraSelect: def INPUT_TYPES(cls): return { "required": { - "low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load the LORA model with less VRAM usage, slower loading"}), + "low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}), + "merge_loras": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}), "text": (IO.STRING, { "multiline": True, "dynamicPrompts": True, @@ -29,7 +30,7 @@ class WanVideoLoraSelect: RETURN_NAMES = ("lora", "trigger_words", "active_loras") FUNCTION = "process_loras" - def process_loras(self, text, low_mem_load=False, **kwargs): + def process_loras(self, text, low_mem_load=False, merge_loras=True, **kwargs): loras_list = [] all_trigger_words = [] active_loras = [] @@ -38,6 +39,9 @@ class WanVideoLoraSelect: prev_lora = kwargs.get('prev_lora', None) if prev_lora is not None: loras_list.extend(prev_lora) + + if not merge_loras: + low_mem_load = False # Unmerged LoRAs don't need low_mem_load # Get blocks if available blocks = kwargs.get('blocks', {}) @@ -65,6 +69,7 @@ class WanVideoLoraSelect: "blocks": selected_blocks, "layer_filter": layer_filter, "low_mem_load": low_mem_load, + "merge_loras": merge_loras, } # Add to list and collect active loras diff --git a/web/comfyui/wanvideo_lora_select.js b/web/comfyui/wanvideo_lora_select.js index 744788a9..5c5237b3 100644 --- a/web/comfyui/wanvideo_lora_select.js +++ b/web/comfyui/wanvideo_lora_select.js @@ -30,12 +30,12 @@ app.registerExtension({ // Restore saved value if exists let existingLoras = []; if (this.widgets_values && this.widgets_values.length > 0) { - // 0 for low_mem_load, 1 for text widget, 2 for loras widget - const savedValue = this.widgets_values[2]; + // 0 for low_mem_load, 1 for merge_loras, 2 for text widget, 3 for loras widget + const savedValue = this.widgets_values[3]; existingLoras = savedValue || []; } // Merge the loras data - const mergedLoras = mergeLoras(this.widgets[1].value, existingLoras); + const mergedLoras = mergeLoras(this.widgets[2].value, existingLoras); // Add flag to prevent callback loops let isUpdating = false; @@ -49,7 +49,7 @@ app.registerExtension({ try { // Remove loras that are not in the value array - const inputWidget = this.widgets[1]; + const inputWidget = this.widgets[2]; const currentLoras = value.map(l => l.name); // Use the constant pattern here as well @@ -79,7 +79,7 @@ app.registerExtension({ this.lorasWidget = result.widget; // Update input widget callback - const inputWidget = this.widgets[1]; + const inputWidget = this.widgets[2]; inputWidget.options.getMaxHeight = () => 100; this.inputWidget = inputWidget; // Wrap the callback with autocomplete setup