Fix Windows FileNotFoundError when loading LoRAs from lora_stack

lora_stack stores relative paths (e.g., 'Illustrious/style/file.safetensors'),
but comfy.utils.load_torch_file requires absolute paths. Previously, when
loading LoRAs from lora_stack, the relative path was passed directly to the
low-level API, causing FileNotFoundError on Windows.

This fix extracts the lora name from the relative path and uses
get_lora_info_absolute() to resolve the full absolute path before passing
it to load_torch_file(). This maintains compatibility with the lora_stack
format while ensuring correct file loading across all platforms.

Fixes: FileNotFoundError for relative paths in LoraLoaderLM and LoraTextLoaderLM
when processing lora_stack input.
This commit is contained in:
Will Miao
2026-02-24 23:58:32 +08:00
parent c2754ea937
commit fc98c752dc

View File

@@ -53,6 +53,11 @@ class LoraLoaderLM:
# First process lora_stack if available # First process lora_stack if available
if lora_stack: if lora_stack:
for lora_path, model_strength, clip_strength in lora_stack: for lora_path, model_strength, clip_strength in lora_stack:
# Extract lora name and convert to absolute path
# lora_stack stores relative paths, but load_torch_file needs absolute paths
lora_name = extract_lora_name(lora_path)
absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name)
# Apply the LoRA using the appropriate loader # Apply the LoRA using the appropriate loader
if is_nunchaku_model: if is_nunchaku_model:
# Use our custom function for Flux models # Use our custom function for Flux models
@@ -60,13 +65,9 @@ class LoraLoaderLM:
# clip remains unchanged for Nunchaku models # clip remains unchanged for Nunchaku models
else: else:
# Use lower-level API to load LoRA directly without folder_paths validation # Use lower-level API to load LoRA directly without folder_paths validation
lora = comfy.utils.load_torch_file(lora_path, safe_load=True) lora = comfy.utils.load_torch_file(absolute_lora_path, safe_load=True)
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength) model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
# Extract lora name for trigger words lookup
lora_name = extract_lora_name(lora_path)
_, trigger_words = get_lora_info_absolute(lora_name)
all_trigger_words.extend(trigger_words) all_trigger_words.extend(trigger_words)
# Add clip strength to output if different from model strength (except for Nunchaku models) # Add clip strength to output if different from model strength (except for Nunchaku models)
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001: if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
@@ -196,6 +197,11 @@ class LoraTextLoaderLM:
# First process lora_stack if available # First process lora_stack if available
if lora_stack: if lora_stack:
for lora_path, model_strength, clip_strength in lora_stack: for lora_path, model_strength, clip_strength in lora_stack:
# Extract lora name and convert to absolute path
# lora_stack stores relative paths, but load_torch_file needs absolute paths
lora_name = extract_lora_name(lora_path)
absolute_lora_path, trigger_words = get_lora_info_absolute(lora_name)
# Apply the LoRA using the appropriate loader # Apply the LoRA using the appropriate loader
if is_nunchaku_model: if is_nunchaku_model:
# Use our custom function for Flux models # Use our custom function for Flux models
@@ -203,13 +209,9 @@ class LoraTextLoaderLM:
# clip remains unchanged for Nunchaku models # clip remains unchanged for Nunchaku models
else: else:
# Use lower-level API to load LoRA directly without folder_paths validation # Use lower-level API to load LoRA directly without folder_paths validation
lora = comfy.utils.load_torch_file(lora_path, safe_load=True) lora = comfy.utils.load_torch_file(absolute_lora_path, safe_load=True)
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength) model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
# Extract lora name for trigger words lookup
lora_name = extract_lora_name(lora_path)
_, trigger_words = get_lora_info_absolute(lora_name)
all_trigger_words.extend(trigger_words) all_trigger_words.extend(trigger_words)
# Add clip strength to output if different from model strength (except for Nunchaku models) # Add clip strength to output if different from model strength (except for Nunchaku models)
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001: if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001: