From fc98c752dca2a8bf5c2ba23e01fc306ff83fa397 Mon Sep 17 00:00:00 2001 From: Will Miao Date: Tue, 24 Feb 2026 23:58:32 +0800 Subject: [PATCH] Fix Windows FileNotFoundError when loading LoRAs from lora_stack lora_stack stores relative paths (e.g., 'Illustrious/style/file.safetensors'), but comfy.utils.load_torch_file requires absolute paths. Previously, when loading LoRAs from lora_stack, the relative path was passed directly to the low-level API, causing FileNotFoundError on Windows. This fix extracts the lora name from the relative path and uses get_lora_info_absolute() to resolve the full absolute path before passing it to load_torch_file(). This maintains compatibility with the lora_stack format while ensuring correct file loading across all platforms. Fixes: FileNotFoundError for relative paths in LoraLoaderLM and LoraTextLoaderLM when processing lora_stack input. --- py/nodes/lora_loader.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/py/nodes/lora_loader.py b/py/nodes/lora_loader.py index bfebdf29..e6a5830f 100644 --- a/py/nodes/lora_loader.py +++ b/py/nodes/lora_loader.py @@ -53,6 +53,11 @@ class LoraLoaderLM: # First process lora_stack if available if lora_stack: for lora_path, model_strength, clip_strength in lora_stack: + # Extract lora name and convert to absolute path + # lora_stack stores relative paths, but load_torch_file needs absolute paths + lora_name = extract_lora_name(lora_path) + absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name) + # Apply the LoRA using the appropriate loader if is_nunchaku_model: # Use our custom function for Flux models @@ -60,13 +65,9 @@ class LoraLoaderLM: # clip remains unchanged for Nunchaku models else: # Use lower-level API to load LoRA directly without folder_paths validation - lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + lora = comfy.utils.load_torch_file(absolute_lora_path, safe_load=True) model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength) - # Extract lora name for trigger words lookup - lora_name = extract_lora_name(lora_path) - _, trigger_words = get_lora_info_absolute(lora_name) - all_trigger_words.extend(trigger_words) # Add clip strength to output if different from model strength (except for Nunchaku models) if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001: @@ -196,6 +197,11 @@ class LoraTextLoaderLM: # First process lora_stack if available if lora_stack: for lora_path, model_strength, clip_strength in lora_stack: + # Extract lora name and convert to absolute path + # lora_stack stores relative paths, but load_torch_file needs absolute paths + lora_name = extract_lora_name(lora_path) + absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name) + # Apply the LoRA using the appropriate loader if is_nunchaku_model: # Use our custom function for Flux models @@ -203,13 +209,9 @@ class LoraTextLoaderLM: # clip remains unchanged for Nunchaku models else: # Use lower-level API to load LoRA directly without folder_paths validation - lora = comfy.utils.load_torch_file(lora_path, safe_load=True) + lora = comfy.utils.load_torch_file(absolute_lora_path, safe_load=True) model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength) - # Extract lora name for trigger words lookup - lora_name = extract_lora_name(lora_path) - _, trigger_words = get_lora_info_absolute(lora_name) - all_trigger_words.extend(trigger_words) # Add clip strength to output if different from model strength (except for Nunchaku models) if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001: