mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-21 21:22:11 -03:00
feat(lora-loader): Load LoRAs using lower-level API to bypass folder_paths validation
- Add get_lora_info_absolute() function to return absolute file paths - Replace LoraLoader().load_lora() with comfy.utils.load_torch_file() + comfy.sd.load_lora_for_models() to enable loading LoRAs from any path - This allows LoRA Manager to load LoRAs from non-standard paths (multi-library support) - Fixes #805
This commit is contained in:
@@ -1,7 +1,8 @@
|
||||
import logging
|
||||
import re
|
||||
from nodes import LoraLoader
|
||||
from ..utils.utils import get_lora_info
|
||||
import comfy.utils # type: ignore
|
||||
import comfy.sd # type: ignore
|
||||
from ..utils.utils import get_lora_info_absolute
|
||||
from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_loras_list, nunchaku_load_lora
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -58,12 +59,13 @@ class LoraLoaderLM:
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged for Nunchaku models
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
# Extract lora name for trigger words lookup
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
_, trigger_words = get_lora_info(lora_name)
|
||||
_, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
all_trigger_words.extend(trigger_words)
|
||||
# Add clip strength to output if different from model strength (except for Nunchaku models)
|
||||
@@ -84,7 +86,7 @@ class LoraLoaderLM:
|
||||
clip_strength = float(lora.get('clipStrength', model_strength))
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = get_lora_info(lora_name)
|
||||
lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
@@ -92,8 +94,9 @@ class LoraLoaderLM:
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
# Include clip strength in output if different from model strength and not a Nunchaku model
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
@@ -199,12 +202,13 @@ class LoraTextLoaderLM:
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged for Nunchaku models
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
# Extract lora name for trigger words lookup
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
_, trigger_words = get_lora_info(lora_name)
|
||||
_, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
all_trigger_words.extend(trigger_words)
|
||||
# Add clip strength to output if different from model strength (except for Nunchaku models)
|
||||
@@ -221,7 +225,7 @@ class LoraTextLoaderLM:
|
||||
clip_strength = lora['clip_strength']
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = get_lora_info(lora_name)
|
||||
lora_path, trigger_words = get_lora_info_absolute(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
@@ -229,8 +233,9 @@ class LoraTextLoaderLM:
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
# Use lower-level API to load LoRA directly without folder_paths validation
|
||||
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
|
||||
model, clip = comfy.sd.load_lora_for_models(model, clip, lora, model_strength, clip_strength)
|
||||
|
||||
# Include clip strength in output if different from model strength and not a Nunchaku model
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
|
||||
@@ -50,6 +50,52 @@ def get_lora_info(lora_name):
|
||||
# No event loop is running, we can use asyncio.run()
|
||||
return asyncio.run(_get_lora_info_async())
|
||||
|
||||
|
||||
def get_lora_info_absolute(lora_name):
|
||||
"""Get the absolute lora path and trigger words from cache
|
||||
|
||||
Returns:
|
||||
tuple: (absolute_path, trigger_words) where absolute_path is the full
|
||||
file system path to the LoRA file, or original lora_name if not found
|
||||
"""
|
||||
async def _get_lora_info_absolute_async():
|
||||
scanner = await ServiceRegistry.get_lora_scanner()
|
||||
cache = await scanner.get_cached_data()
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == lora_name:
|
||||
file_path = item.get('file_path')
|
||||
if file_path:
|
||||
# Return absolute path directly
|
||||
# Get trigger words from civitai metadata
|
||||
civitai = item.get('civitai', {})
|
||||
trigger_words = civitai.get('trainedWords', []) if civitai else []
|
||||
return file_path, trigger_words
|
||||
return lora_name, []
|
||||
|
||||
try:
|
||||
# Check if we're already in an event loop
|
||||
loop = asyncio.get_running_loop()
|
||||
# If we're in a running loop, we need to use a different approach
|
||||
# Create a new thread to run the async code
|
||||
import concurrent.futures
|
||||
|
||||
def run_in_thread():
|
||||
new_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(new_loop)
|
||||
try:
|
||||
return new_loop.run_until_complete(_get_lora_info_absolute_async())
|
||||
finally:
|
||||
new_loop.close()
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(run_in_thread)
|
||||
return future.result()
|
||||
|
||||
except RuntimeError:
|
||||
# No event loop is running, we can use asyncio.run()
|
||||
return asyncio.run(_get_lora_info_absolute_async())
|
||||
|
||||
def fuzzy_match(text: str, pattern: str, threshold: float = 0.85) -> bool:
|
||||
"""
|
||||
Check if text matches pattern using fuzzy matching.
|
||||
|
||||
Reference in New Issue
Block a user