mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-25 07:05:43 -03:00
Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager
This commit is contained in:
37
py/config.py
37
py/config.py
@@ -17,6 +17,7 @@ class Config:
|
||||
# 静态路由映射字典, target to route mapping
|
||||
self._route_mappings = {}
|
||||
self.loras_roots = self._init_lora_paths()
|
||||
self.checkpoints_roots = self._init_checkpoint_paths()
|
||||
self.temp_directory = folder_paths.get_temp_directory()
|
||||
# 在初始化时扫描符号链接
|
||||
self._scan_symbolic_links()
|
||||
@@ -39,9 +40,12 @@ class Config:
|
||||
return False
|
||||
|
||||
def _scan_symbolic_links(self):
|
||||
"""扫描所有 LoRA 根目录中的符号链接"""
|
||||
"""扫描所有 LoRA 和 Checkpoint 根目录中的符号链接"""
|
||||
for root in self.loras_roots:
|
||||
self._scan_directory_links(root)
|
||||
|
||||
for root in self.checkpoints_roots:
|
||||
self._scan_directory_links(root)
|
||||
|
||||
def _scan_directory_links(self, root: str):
|
||||
"""递归扫描目录中的符号链接"""
|
||||
@@ -73,7 +77,7 @@ class Config:
|
||||
"""添加静态路由映射"""
|
||||
normalized_path = os.path.normpath(path).replace(os.sep, '/')
|
||||
self._route_mappings[normalized_path] = route
|
||||
logger.info(f"Added route mapping: {normalized_path} -> {route}")
|
||||
# logger.info(f"Added route mapping: {normalized_path} -> {route}")
|
||||
|
||||
def map_path_to_link(self, path: str) -> str:
|
||||
"""将目标路径映射回符号链接路径"""
|
||||
@@ -123,6 +127,35 @@ class Config:
|
||||
return unique_paths
|
||||
|
||||
|
||||
def _init_checkpoint_paths(self) -> List[str]:
|
||||
"""Initialize and validate checkpoint paths from ComfyUI settings"""
|
||||
# Get checkpoint paths from folder_paths
|
||||
checkpoint_paths = folder_paths.get_folder_paths("checkpoints")
|
||||
diffusion_paths = folder_paths.get_folder_paths("diffusers")
|
||||
unet_paths = folder_paths.get_folder_paths("unet")
|
||||
|
||||
# Combine all checkpoint-related paths
|
||||
all_paths = checkpoint_paths + diffusion_paths + unet_paths
|
||||
|
||||
# Filter and normalize paths
|
||||
paths = sorted(set(path.replace(os.sep, "/")
|
||||
for path in all_paths
|
||||
if os.path.exists(path)), key=lambda p: p.lower())
|
||||
|
||||
print("Found checkpoint roots:", paths)
|
||||
|
||||
if not paths:
|
||||
logger.warning("No valid checkpoint folders found in ComfyUI configuration")
|
||||
return []
|
||||
|
||||
# 初始化路径映射,与 LoRA 路径处理方式相同
|
||||
for path in paths:
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
if real_path != path:
|
||||
self.add_path_mapping(path, real_path)
|
||||
|
||||
return paths
|
||||
|
||||
def get_preview_static_url(self, preview_path: str) -> str:
|
||||
"""Convert local preview path to static URL"""
|
||||
if not preview_path:
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
import asyncio
|
||||
import os
|
||||
from server import PromptServer # type: ignore
|
||||
from .config import config
|
||||
from .routes.lora_routes import LoraRoutes
|
||||
from .routes.api_routes import ApiRoutes
|
||||
from .routes.recipe_routes import RecipeRoutes
|
||||
from .routes.checkpoints_routes import CheckpointsRoutes
|
||||
from .services.lora_scanner import LoraScanner
|
||||
from .services.recipe_scanner import RecipeScanner
|
||||
from .services.file_monitor import LoraFileMonitor
|
||||
from .services.lora_cache import LoraCache
|
||||
from .services.recipe_cache import RecipeCache
|
||||
from .services.service_registry import ServiceRegistry
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -23,7 +18,7 @@ class LoraManager:
|
||||
"""Initialize and register all routes"""
|
||||
app = PromptServer.instance.app
|
||||
|
||||
added_targets = set() # 用于跟踪已添加的目标路径
|
||||
added_targets = set() # Track already added target paths
|
||||
|
||||
# Add static routes for each lora root
|
||||
for idx, root in enumerate(config.loras_roots, start=1):
|
||||
@@ -35,102 +30,141 @@ class LoraManager:
|
||||
if link == root:
|
||||
real_root = target
|
||||
break
|
||||
# 为原始路径添加静态路由
|
||||
# Add static route for original path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {real_root}")
|
||||
|
||||
# 记录路由映射
|
||||
# Record route mapping
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(real_root)
|
||||
|
||||
# 为符号链接的目标路径添加额外的静态路由
|
||||
link_idx = 1
|
||||
# Add static routes for each checkpoint root
|
||||
for idx, root in enumerate(config.checkpoints_roots, start=1):
|
||||
preview_path = f'/checkpoints_static/root{idx}/preview'
|
||||
|
||||
real_root = root
|
||||
if root in config._path_mappings.values():
|
||||
for target, link in config._path_mappings.items():
|
||||
if link == root:
|
||||
real_root = target
|
||||
break
|
||||
# Add static route for original path
|
||||
app.router.add_static(preview_path, real_root)
|
||||
logger.info(f"Added static route {preview_path} -> {real_root}")
|
||||
|
||||
# Record route mapping
|
||||
config.add_route_mapping(real_root, preview_path)
|
||||
added_targets.add(real_root)
|
||||
|
||||
# Add static routes for symlink target paths
|
||||
link_idx = {
|
||||
'lora': 1,
|
||||
'checkpoint': 1
|
||||
}
|
||||
|
||||
for target_path, link_path in config._path_mappings.items():
|
||||
if target_path not in added_targets:
|
||||
route_path = f'/loras_static/link_{link_idx}/preview'
|
||||
# Determine if this is a checkpoint or lora link based on path
|
||||
is_checkpoint = any(cp_root in link_path for cp_root in config.checkpoints_roots)
|
||||
is_checkpoint = is_checkpoint or any(cp_root in target_path for cp_root in config.checkpoints_roots)
|
||||
|
||||
if is_checkpoint:
|
||||
route_path = f'/checkpoints_static/link_{link_idx["checkpoint"]}/preview'
|
||||
link_idx["checkpoint"] += 1
|
||||
else:
|
||||
route_path = f'/loras_static/link_{link_idx["lora"]}/preview'
|
||||
link_idx["lora"] += 1
|
||||
|
||||
app.router.add_static(route_path, target_path)
|
||||
logger.info(f"Added static route for link target {route_path} -> {target_path}")
|
||||
config.add_route_mapping(target_path, route_path)
|
||||
added_targets.add(target_path)
|
||||
link_idx += 1
|
||||
|
||||
# Add static route for plugin assets
|
||||
app.router.add_static('/loras_static', config.static_path)
|
||||
|
||||
# Setup feature routes
|
||||
routes = LoraRoutes()
|
||||
lora_routes = LoraRoutes()
|
||||
checkpoints_routes = CheckpointsRoutes()
|
||||
|
||||
# Setup file monitoring
|
||||
monitor = LoraFileMonitor(routes.scanner, config.loras_roots)
|
||||
monitor.start()
|
||||
|
||||
routes.setup_routes(app)
|
||||
# Initialize routes
|
||||
lora_routes.setup_routes(app)
|
||||
checkpoints_routes.setup_routes(app)
|
||||
ApiRoutes.setup_routes(app, monitor)
|
||||
ApiRoutes.setup_routes(app)
|
||||
RecipeRoutes.setup_routes(app)
|
||||
|
||||
# Store monitor in app for cleanup
|
||||
app['lora_monitor'] = monitor
|
||||
|
||||
# Schedule cache initialization using the application's startup handler
|
||||
app.on_startup.append(lambda app: cls._schedule_cache_init(routes.scanner, routes.recipe_scanner))
|
||||
# Schedule service initialization
|
||||
app.on_startup.append(lambda app: cls._initialize_services())
|
||||
|
||||
# Add cleanup
|
||||
app.on_shutdown.append(cls._cleanup)
|
||||
app.on_shutdown.append(ApiRoutes.cleanup)
|
||||
|
||||
@classmethod
|
||||
async def _schedule_cache_init(cls, scanner: LoraScanner, recipe_scanner: RecipeScanner):
|
||||
"""Schedule cache initialization in the running event loop"""
|
||||
async def _initialize_services(cls):
|
||||
"""Initialize all services using the ServiceRegistry"""
|
||||
try:
|
||||
# 创建低优先级的初始化任务
|
||||
lora_task = asyncio.create_task(cls._initialize_lora_cache(scanner), name='lora_cache_init')
|
||||
# Initialize CivitaiClient first to ensure it's ready for other services
|
||||
civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Schedule recipe cache initialization with a delay to let lora scanner initialize first
|
||||
recipe_task = asyncio.create_task(cls._initialize_recipe_cache(recipe_scanner, delay=2), name='recipe_cache_init')
|
||||
# Get file monitors through ServiceRegistry
|
||||
lora_monitor = await ServiceRegistry.get_lora_monitor()
|
||||
checkpoint_monitor = await ServiceRegistry.get_checkpoint_monitor()
|
||||
|
||||
# Start monitors
|
||||
lora_monitor.start()
|
||||
logger.debug("Lora monitor started")
|
||||
|
||||
# Make sure checkpoint monitor has paths before starting
|
||||
await checkpoint_monitor.initialize_paths()
|
||||
checkpoint_monitor.start()
|
||||
logger.debug("Checkpoint monitor started")
|
||||
|
||||
# Register DownloadManager with ServiceRegistry
|
||||
download_manager = await ServiceRegistry.get_download_manager()
|
||||
|
||||
# Initialize WebSocket manager
|
||||
ws_manager = await ServiceRegistry.get_websocket_manager()
|
||||
|
||||
# Initialize scanners in background
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
# Initialize recipe scanner if needed
|
||||
recipe_scanner = await ServiceRegistry.get_recipe_scanner()
|
||||
|
||||
# Create low-priority initialization tasks
|
||||
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
|
||||
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
|
||||
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
|
||||
|
||||
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error scheduling cache initialization: {e}")
|
||||
|
||||
@classmethod
|
||||
async def _initialize_lora_cache(cls, scanner: LoraScanner):
|
||||
"""Initialize lora cache in background"""
|
||||
try:
|
||||
# 设置初始缓存占位
|
||||
scanner._cache = LoraCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# 分阶段加载缓存
|
||||
await scanner.get_cached_data(force_refresh=True)
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing lora cache: {e}")
|
||||
|
||||
@classmethod
|
||||
async def _initialize_recipe_cache(cls, scanner: RecipeScanner, delay: float = 2.0):
|
||||
"""Initialize recipe cache in background with a delay"""
|
||||
try:
|
||||
# Wait for the specified delay to let lora scanner initialize first
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Set initial empty cache
|
||||
scanner._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
|
||||
# Force refresh to load the actual data
|
||||
await scanner.get_cached_data(force_refresh=True)
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing recipe cache: {e}")
|
||||
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
async def _cleanup(cls, app):
|
||||
"""Cleanup resources"""
|
||||
if 'lora_monitor' in app:
|
||||
app['lora_monitor'].stop()
|
||||
"""Cleanup resources using ServiceRegistry"""
|
||||
try:
|
||||
logger.info("LoRA Manager: Cleaning up services")
|
||||
|
||||
# Get monitors from ServiceRegistry
|
||||
lora_monitor = await ServiceRegistry.get_service("lora_monitor")
|
||||
if lora_monitor:
|
||||
lora_monitor.stop()
|
||||
logger.info("Stopped LoRA monitor")
|
||||
|
||||
checkpoint_monitor = await ServiceRegistry.get_service("checkpoint_monitor")
|
||||
if checkpoint_monitor:
|
||||
checkpoint_monitor.stop()
|
||||
logger.info("Stopped checkpoint monitor")
|
||||
|
||||
# Close CivitaiClient gracefully
|
||||
civitai_client = await ServiceRegistry.get_service("civitai_client")
|
||||
if civitai_client:
|
||||
await civitai_client.close()
|
||||
logger.info("Closed CivitaiClient connection")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during cleanup: {e}", exc_info=True)
|
||||
|
||||
18
py/metadata_collector/__init__.py
Normal file
18
py/metadata_collector/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import os
|
||||
import importlib
|
||||
from .metadata_hook import MetadataHook
|
||||
from .metadata_registry import MetadataRegistry
|
||||
|
||||
def init():
|
||||
# Install hooks to collect metadata during execution
|
||||
MetadataHook.install()
|
||||
|
||||
# Initialize registry
|
||||
registry = MetadataRegistry()
|
||||
|
||||
print("ComfyUI Metadata Collector initialized")
|
||||
|
||||
def get_metadata(prompt_id=None):
|
||||
"""Helper function to get metadata from the registry"""
|
||||
registry = MetadataRegistry()
|
||||
return registry.get_metadata(prompt_id)
|
||||
12
py/metadata_collector/constants.py
Normal file
12
py/metadata_collector/constants.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""Constants used by the metadata collector"""
|
||||
|
||||
# Individual category constants
|
||||
MODELS = "models"
|
||||
PROMPTS = "prompts"
|
||||
SAMPLING = "sampling"
|
||||
LORAS = "loras"
|
||||
SIZE = "size"
|
||||
IMAGES = "images" # Added new category for image results
|
||||
|
||||
# Collection of categories for iteration
|
||||
METADATA_CATEGORIES = [MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES] # Added IMAGES to categories
|
||||
123
py/metadata_collector/metadata_hook.py
Normal file
123
py/metadata_collector/metadata_hook.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import sys
|
||||
import inspect
|
||||
from .metadata_registry import MetadataRegistry
|
||||
|
||||
class MetadataHook:
|
||||
"""Install hooks for metadata collection"""
|
||||
|
||||
@staticmethod
|
||||
def install():
|
||||
"""Install hooks to collect metadata during execution"""
|
||||
try:
|
||||
# Import ComfyUI's execution module
|
||||
execution = None
|
||||
try:
|
||||
# Try direct import first
|
||||
import execution # type: ignore
|
||||
except ImportError:
|
||||
# Try to locate from system modules
|
||||
for module_name in sys.modules:
|
||||
if module_name.endswith('.execution'):
|
||||
execution = sys.modules[module_name]
|
||||
break
|
||||
|
||||
# If we can't find the execution module, we can't install hooks
|
||||
if execution is None:
|
||||
print("Could not locate ComfyUI execution module, metadata collection disabled")
|
||||
return
|
||||
|
||||
# Store the original _map_node_over_list function
|
||||
original_map_node_over_list = execution._map_node_over_list
|
||||
|
||||
# Define the wrapped _map_node_over_list function
|
||||
def map_node_over_list_with_metadata(obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None):
|
||||
# Only collect metadata when calling the main function of nodes
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
# Get the current prompt_id from the registry
|
||||
registry = MetadataRegistry()
|
||||
prompt_id = registry.current_prompt_id
|
||||
|
||||
if prompt_id is not None:
|
||||
# Get node class type
|
||||
class_type = obj.__class__.__name__
|
||||
|
||||
# Unique ID might be available through the obj if it has a unique_id field
|
||||
node_id = getattr(obj, 'unique_id', None)
|
||||
if node_id is None and pre_execute_cb:
|
||||
# Try to extract node_id through reflection on GraphBuilder.set_default_prefix
|
||||
frame = inspect.currentframe()
|
||||
while frame:
|
||||
if 'unique_id' in frame.f_locals:
|
||||
node_id = frame.f_locals['unique_id']
|
||||
break
|
||||
frame = frame.f_back
|
||||
|
||||
# Record inputs before execution
|
||||
if node_id is not None:
|
||||
registry.record_node_execution(node_id, class_type, input_data_all, None)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
|
||||
# Execute the original function
|
||||
results = original_map_node_over_list(obj, input_data_all, func, allow_interrupt, execution_block_cb, pre_execute_cb)
|
||||
|
||||
# After execution, collect outputs for relevant nodes
|
||||
if func == obj.FUNCTION and hasattr(obj, '__class__'):
|
||||
try:
|
||||
# Get the current prompt_id from the registry
|
||||
registry = MetadataRegistry()
|
||||
prompt_id = registry.current_prompt_id
|
||||
|
||||
if prompt_id is not None:
|
||||
# Get node class type
|
||||
class_type = obj.__class__.__name__
|
||||
|
||||
# Unique ID might be available through the obj if it has a unique_id field
|
||||
node_id = getattr(obj, 'unique_id', None)
|
||||
if node_id is None and pre_execute_cb:
|
||||
# Try to extract node_id through reflection
|
||||
frame = inspect.currentframe()
|
||||
while frame:
|
||||
if 'unique_id' in frame.f_locals:
|
||||
node_id = frame.f_locals['unique_id']
|
||||
break
|
||||
frame = frame.f_back
|
||||
|
||||
# Record outputs after execution
|
||||
if node_id is not None:
|
||||
registry.update_node_execution(node_id, class_type, results)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
|
||||
return results
|
||||
|
||||
# Also hook the execute function to track the current prompt_id
|
||||
original_execute = execution.execute
|
||||
|
||||
def execute_with_prompt_tracking(*args, **kwargs):
|
||||
if len(args) >= 7: # Check if we have enough arguments
|
||||
server, prompt, caches, node_id, extra_data, executed, prompt_id = args[:7]
|
||||
registry = MetadataRegistry()
|
||||
|
||||
# Start collection if this is a new prompt
|
||||
if not registry.current_prompt_id or registry.current_prompt_id != prompt_id:
|
||||
registry.start_collection(prompt_id)
|
||||
|
||||
# Store the dynprompt reference for node lookups
|
||||
if hasattr(prompt, 'original_prompt'):
|
||||
registry.set_current_prompt(prompt)
|
||||
|
||||
# Execute the original function
|
||||
return original_execute(*args, **kwargs)
|
||||
|
||||
# Replace the functions
|
||||
execution._map_node_over_list = map_node_over_list_with_metadata
|
||||
execution.execute = execute_with_prompt_tracking
|
||||
# Make map_node_over_list public to avoid it being hidden by hooks
|
||||
execution.map_node_over_list = original_map_node_over_list
|
||||
|
||||
print("Metadata collection hooks installed for runtime values")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error installing metadata hooks: {str(e)}")
|
||||
245
py/metadata_collector/metadata_processor.py
Normal file
245
py/metadata_collector/metadata_processor.py
Normal file
@@ -0,0 +1,245 @@
|
||||
import json
|
||||
|
||||
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE
|
||||
|
||||
class MetadataProcessor:
|
||||
"""Process and format collected metadata"""
|
||||
|
||||
@staticmethod
|
||||
def find_primary_sampler(metadata):
|
||||
"""Find the primary KSampler node (with denoise=1)"""
|
||||
primary_sampler = None
|
||||
primary_sampler_id = None
|
||||
|
||||
# First, check for KSamplerAdvanced with add_noise="enable"
|
||||
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
add_noise = parameters.get("add_noise")
|
||||
|
||||
# If add_noise is "enable", this is likely the primary sampler for KSamplerAdvanced
|
||||
if add_noise == "enable":
|
||||
primary_sampler = sampler_info
|
||||
primary_sampler_id = node_id
|
||||
break
|
||||
|
||||
# If no KSamplerAdvanced found, fall back to traditional KSampler with denoise=1
|
||||
if primary_sampler is None:
|
||||
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
|
||||
parameters = sampler_info.get("parameters", {})
|
||||
denoise = parameters.get("denoise")
|
||||
|
||||
# If denoise is 1.0, this is likely the primary sampler
|
||||
if denoise == 1.0 or denoise == 1:
|
||||
primary_sampler = sampler_info
|
||||
primary_sampler_id = node_id
|
||||
break
|
||||
|
||||
return primary_sampler_id, primary_sampler
|
||||
|
||||
@staticmethod
|
||||
def trace_node_input(prompt, node_id, input_name, target_class=None, max_depth=10):
|
||||
"""
|
||||
Trace an input connection from a node to find the source node
|
||||
|
||||
Parameters:
|
||||
- prompt: The prompt object containing node connections
|
||||
- node_id: ID of the starting node
|
||||
- input_name: Name of the input to trace
|
||||
- target_class: Optional class name to search for (e.g., "CLIPTextEncode")
|
||||
- max_depth: Maximum depth to follow the node chain to prevent infinite loops
|
||||
|
||||
Returns:
|
||||
- node_id of the found node, or None if not found
|
||||
"""
|
||||
if not prompt or not prompt.original_prompt or node_id not in prompt.original_prompt:
|
||||
return None
|
||||
|
||||
# For depth tracking
|
||||
current_depth = 0
|
||||
|
||||
current_node_id = node_id
|
||||
current_input = input_name
|
||||
|
||||
while current_depth < max_depth:
|
||||
if current_node_id not in prompt.original_prompt:
|
||||
return None
|
||||
|
||||
node_inputs = prompt.original_prompt[current_node_id].get("inputs", {})
|
||||
if current_input not in node_inputs:
|
||||
return None
|
||||
|
||||
input_value = node_inputs[current_input]
|
||||
# Input connections are formatted as [node_id, output_index]
|
||||
if isinstance(input_value, list) and len(input_value) >= 2:
|
||||
found_node_id = input_value[0] # Connected node_id
|
||||
|
||||
# If we're looking for a specific node class
|
||||
if target_class and prompt.original_prompt[found_node_id].get("class_type") == target_class:
|
||||
return found_node_id
|
||||
|
||||
# If we're not looking for a specific class or haven't found it yet
|
||||
if not target_class:
|
||||
return found_node_id
|
||||
|
||||
# Continue tracing through intermediate nodes
|
||||
current_node_id = found_node_id
|
||||
# For most conditioning nodes, the input we want to follow is named "conditioning"
|
||||
if "conditioning" in prompt.original_prompt[current_node_id].get("inputs", {}):
|
||||
current_input = "conditioning"
|
||||
else:
|
||||
# If there's no "conditioning" input, we can't trace further
|
||||
return found_node_id if not target_class else None
|
||||
else:
|
||||
# We've reached a node with no further connections
|
||||
return None
|
||||
|
||||
current_depth += 1
|
||||
|
||||
# If we've reached max depth without finding target_class
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def find_primary_checkpoint(metadata):
|
||||
"""Find the primary checkpoint model in the workflow"""
|
||||
if not metadata.get(MODELS):
|
||||
return None
|
||||
|
||||
# In most workflows, there's only one checkpoint, so we can just take the first one
|
||||
for node_id, model_info in metadata.get(MODELS, {}).items():
|
||||
if model_info.get("type") == "checkpoint":
|
||||
return model_info.get("name")
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def extract_generation_params(metadata):
|
||||
"""Extract generation parameters from metadata using node relationships"""
|
||||
params = {
|
||||
"prompt": "",
|
||||
"negative_prompt": "",
|
||||
"seed": None,
|
||||
"steps": None,
|
||||
"cfg_scale": None,
|
||||
"guidance": None, # Add guidance parameter
|
||||
"sampler": None,
|
||||
"scheduler": None,
|
||||
"checkpoint": None,
|
||||
"loras": "",
|
||||
"size": None,
|
||||
"clip_skip": None
|
||||
}
|
||||
|
||||
# Get the prompt object for node relationship tracing
|
||||
prompt = metadata.get("current_prompt")
|
||||
|
||||
# Find the primary KSampler node
|
||||
primary_sampler_id, primary_sampler = MetadataProcessor.find_primary_sampler(metadata)
|
||||
|
||||
# Directly get checkpoint from metadata instead of tracing
|
||||
checkpoint = MetadataProcessor.find_primary_checkpoint(metadata)
|
||||
if checkpoint:
|
||||
params["checkpoint"] = checkpoint
|
||||
|
||||
if primary_sampler:
|
||||
# Extract sampling parameters
|
||||
sampling_params = primary_sampler.get("parameters", {})
|
||||
# Handle both seed and noise_seed
|
||||
params["seed"] = sampling_params.get("seed") if sampling_params.get("seed") is not None else sampling_params.get("noise_seed")
|
||||
params["steps"] = sampling_params.get("steps")
|
||||
params["cfg_scale"] = sampling_params.get("cfg")
|
||||
params["sampler"] = sampling_params.get("sampler_name")
|
||||
params["scheduler"] = sampling_params.get("scheduler")
|
||||
|
||||
# Trace connections from the primary sampler
|
||||
if prompt and primary_sampler_id:
|
||||
# Trace positive prompt - look specifically for CLIPTextEncode
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", "CLIPTextEncode", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
# Find any FluxGuidance nodes in the positive conditioning path
|
||||
flux_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", "FluxGuidance", max_depth=5)
|
||||
if flux_node_id and flux_node_id in metadata.get(SAMPLING, {}):
|
||||
flux_params = metadata[SAMPLING][flux_node_id].get("parameters", {})
|
||||
params["guidance"] = flux_params.get("guidance")
|
||||
|
||||
# Trace negative prompt - look specifically for CLIPTextEncode
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "negative", "CLIPTextEncode", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
|
||||
# Check if the sampler itself has size information (from latent_image)
|
||||
if primary_sampler_id in metadata.get(SIZE, {}):
|
||||
width = metadata[SIZE][primary_sampler_id].get("width")
|
||||
height = metadata[SIZE][primary_sampler_id].get("height")
|
||||
if width and height:
|
||||
params["size"] = f"{width}x{height}"
|
||||
else:
|
||||
# Fallback to the previous trace method if needed
|
||||
latent_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "latent_image")
|
||||
if latent_node_id:
|
||||
# Follow chain to find EmptyLatentImage node
|
||||
size_found = False
|
||||
current_node_id = latent_node_id
|
||||
|
||||
# Limit depth to avoid infinite loops in complex workflows
|
||||
max_depth = 10
|
||||
for _ in range(max_depth):
|
||||
if current_node_id in metadata.get(SIZE, {}):
|
||||
width = metadata[SIZE][current_node_id].get("width")
|
||||
height = metadata[SIZE][current_node_id].get("height")
|
||||
if width and height:
|
||||
params["size"] = f"{width}x{height}"
|
||||
size_found = True
|
||||
break
|
||||
|
||||
# Try to follow the chain
|
||||
if prompt and prompt.original_prompt and current_node_id in prompt.original_prompt:
|
||||
node_info = prompt.original_prompt[current_node_id]
|
||||
if "inputs" in node_info:
|
||||
# Look for a connection that might lead to size information
|
||||
for input_name, input_value in node_info["inputs"].items():
|
||||
if isinstance(input_value, list) and len(input_value) >= 2:
|
||||
current_node_id = input_value[0]
|
||||
break
|
||||
else:
|
||||
break # No connections to follow
|
||||
else:
|
||||
break # No inputs to follow
|
||||
else:
|
||||
break # Can't follow further
|
||||
|
||||
# Extract LoRAs using the standardized format
|
||||
lora_parts = []
|
||||
for node_id, lora_info in metadata.get(LORAS, {}).items():
|
||||
# Access the lora_list from the standardized format
|
||||
lora_list = lora_info.get("lora_list", [])
|
||||
for lora in lora_list:
|
||||
name = lora.get("name", "unknown")
|
||||
strength = lora.get("strength", 1.0)
|
||||
lora_parts.append(f"<lora:{name}:{strength}>")
|
||||
|
||||
params["loras"] = " ".join(lora_parts)
|
||||
|
||||
# Set default clip_skip value
|
||||
params["clip_skip"] = "1" # Common default
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def to_dict(metadata):
|
||||
"""Convert extracted metadata to the ComfyUI output.json format"""
|
||||
params = MetadataProcessor.extract_generation_params(metadata)
|
||||
|
||||
# Convert all values to strings to match output.json format
|
||||
for key in params:
|
||||
if params[key] is not None:
|
||||
params[key] = str(params[key])
|
||||
|
||||
return params
|
||||
|
||||
@staticmethod
|
||||
def to_json(metadata):
|
||||
"""Convert metadata to JSON string"""
|
||||
params = MetadataProcessor.to_dict(metadata)
|
||||
return json.dumps(params, indent=4)
|
||||
275
py/metadata_collector/metadata_registry.py
Normal file
275
py/metadata_collector/metadata_registry.py
Normal file
@@ -0,0 +1,275 @@
|
||||
import time
|
||||
from nodes import NODE_CLASS_MAPPINGS
|
||||
from .node_extractors import NODE_EXTRACTORS, GenericNodeExtractor
|
||||
from .constants import METADATA_CATEGORIES, IMAGES
|
||||
|
||||
class MetadataRegistry:
|
||||
"""A singleton registry to store and retrieve workflow metadata"""
|
||||
_instance = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._reset()
|
||||
return cls._instance
|
||||
|
||||
def _reset(self):
|
||||
self.current_prompt_id = None
|
||||
self.current_prompt = None
|
||||
self.metadata = {}
|
||||
self.prompt_metadata = {}
|
||||
self.executed_nodes = set()
|
||||
|
||||
# Node-level cache for metadata
|
||||
self.node_cache = {}
|
||||
|
||||
# Limit the number of stored prompts
|
||||
self.max_prompt_history = 3
|
||||
|
||||
# Categories we want to track and retrieve from cache
|
||||
self.metadata_categories = METADATA_CATEGORIES
|
||||
|
||||
def _clean_old_prompts(self):
|
||||
"""Clean up old prompt metadata, keeping only recent ones"""
|
||||
if len(self.prompt_metadata) <= self.max_prompt_history:
|
||||
return
|
||||
|
||||
# Sort all prompt_ids by timestamp
|
||||
sorted_prompts = sorted(
|
||||
self.prompt_metadata.keys(),
|
||||
key=lambda pid: self.prompt_metadata[pid].get("timestamp", 0)
|
||||
)
|
||||
|
||||
# Remove oldest records
|
||||
prompts_to_remove = sorted_prompts[:len(sorted_prompts) - self.max_prompt_history]
|
||||
for pid in prompts_to_remove:
|
||||
del self.prompt_metadata[pid]
|
||||
|
||||
def start_collection(self, prompt_id):
|
||||
"""Begin metadata collection for a new prompt"""
|
||||
self.current_prompt_id = prompt_id
|
||||
self.executed_nodes = set()
|
||||
self.prompt_metadata[prompt_id] = {
|
||||
category: {} for category in METADATA_CATEGORIES
|
||||
}
|
||||
# Add additional metadata fields
|
||||
self.prompt_metadata[prompt_id].update({
|
||||
"execution_order": [],
|
||||
"current_prompt": None, # Will store the prompt object
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
# Clean up old prompt data
|
||||
self._clean_old_prompts()
|
||||
|
||||
def set_current_prompt(self, prompt):
|
||||
"""Set the current prompt object reference"""
|
||||
self.current_prompt = prompt
|
||||
if self.current_prompt_id and self.current_prompt_id in self.prompt_metadata:
|
||||
# Store the prompt in the metadata for later relationship tracing
|
||||
self.prompt_metadata[self.current_prompt_id]["current_prompt"] = prompt
|
||||
|
||||
def get_metadata(self, prompt_id=None):
|
||||
"""Get collected metadata for a prompt"""
|
||||
key = prompt_id if prompt_id is not None else self.current_prompt_id
|
||||
if key not in self.prompt_metadata:
|
||||
return {}
|
||||
|
||||
metadata = self.prompt_metadata[key]
|
||||
|
||||
# If we have a current prompt object, check for non-executed nodes
|
||||
prompt_obj = metadata.get("current_prompt")
|
||||
if prompt_obj and hasattr(prompt_obj, "original_prompt"):
|
||||
original_prompt = prompt_obj.original_prompt
|
||||
|
||||
# Fill in missing metadata from cache for nodes that weren't executed
|
||||
self._fill_missing_metadata(key, original_prompt)
|
||||
|
||||
return self.prompt_metadata.get(key, {})
|
||||
|
||||
def _fill_missing_metadata(self, prompt_id, original_prompt):
|
||||
"""Fill missing metadata from cache for non-executed nodes"""
|
||||
if not original_prompt:
|
||||
return
|
||||
|
||||
executed_nodes = self.executed_nodes
|
||||
metadata = self.prompt_metadata[prompt_id]
|
||||
|
||||
# Iterate through nodes in the original prompt
|
||||
for node_id, node_data in original_prompt.items():
|
||||
# Skip if already executed in this run
|
||||
if node_id in executed_nodes:
|
||||
continue
|
||||
|
||||
# Get the node type from the prompt (this is the key in NODE_CLASS_MAPPINGS)
|
||||
prompt_class_type = node_data.get("class_type")
|
||||
if not prompt_class_type:
|
||||
continue
|
||||
|
||||
# Convert to actual class name (which is what we use in our cache)
|
||||
class_type = prompt_class_type
|
||||
if prompt_class_type in NODE_CLASS_MAPPINGS:
|
||||
class_obj = NODE_CLASS_MAPPINGS[prompt_class_type]
|
||||
class_type = class_obj.__name__
|
||||
|
||||
# Create cache key using the actual class name
|
||||
cache_key = f"{node_id}:{class_type}"
|
||||
|
||||
# Check if this node type is relevant for metadata collection
|
||||
if class_type in NODE_EXTRACTORS:
|
||||
# Check if we have cached metadata for this node
|
||||
if cache_key in self.node_cache:
|
||||
cached_data = self.node_cache[cache_key]
|
||||
|
||||
# Apply cached metadata to the current metadata
|
||||
for category in self.metadata_categories:
|
||||
if category in cached_data and node_id in cached_data[category]:
|
||||
if node_id not in metadata[category]:
|
||||
metadata[category][node_id] = cached_data[category][node_id]
|
||||
|
||||
def record_node_execution(self, node_id, class_type, inputs, outputs):
|
||||
"""Record information about a node's execution"""
|
||||
if not self.current_prompt_id:
|
||||
return
|
||||
|
||||
# Add to execution order and mark as executed
|
||||
if node_id not in self.executed_nodes:
|
||||
self.executed_nodes.add(node_id)
|
||||
self.prompt_metadata[self.current_prompt_id]["execution_order"].append(node_id)
|
||||
|
||||
# Process inputs to simplify working with them
|
||||
processed_inputs = {}
|
||||
for input_name, input_values in inputs.items():
|
||||
if isinstance(input_values, list) and len(input_values) > 0:
|
||||
# For single values, just use the first one (most common case)
|
||||
processed_inputs[input_name] = input_values[0]
|
||||
else:
|
||||
processed_inputs[input_name] = input_values
|
||||
|
||||
# Extract node-specific metadata
|
||||
extractor = NODE_EXTRACTORS.get(class_type, GenericNodeExtractor)
|
||||
extractor.extract(
|
||||
node_id,
|
||||
processed_inputs,
|
||||
outputs,
|
||||
self.prompt_metadata[self.current_prompt_id]
|
||||
)
|
||||
|
||||
# Cache this node's metadata
|
||||
self._cache_node_metadata(node_id, class_type)
|
||||
|
||||
def update_node_execution(self, node_id, class_type, outputs):
|
||||
"""Update node metadata with output information"""
|
||||
if not self.current_prompt_id:
|
||||
return
|
||||
|
||||
# Process outputs to make them more usable
|
||||
processed_outputs = outputs
|
||||
|
||||
# Use the same extractor to update with outputs
|
||||
extractor = NODE_EXTRACTORS.get(class_type, GenericNodeExtractor)
|
||||
if hasattr(extractor, 'update'):
|
||||
extractor.update(
|
||||
node_id,
|
||||
processed_outputs,
|
||||
self.prompt_metadata[self.current_prompt_id]
|
||||
)
|
||||
|
||||
# Update the cached metadata for this node
|
||||
self._cache_node_metadata(node_id, class_type)
|
||||
|
||||
def _cache_node_metadata(self, node_id, class_type):
|
||||
"""Cache the metadata for a specific node"""
|
||||
if not self.current_prompt_id or not node_id or not class_type:
|
||||
return
|
||||
|
||||
# Create a cache key combining node_id and class_type
|
||||
cache_key = f"{node_id}:{class_type}"
|
||||
|
||||
# Create a shallow copy of the node's metadata
|
||||
node_metadata = {}
|
||||
current_metadata = self.prompt_metadata[self.current_prompt_id]
|
||||
|
||||
for category in self.metadata_categories:
|
||||
if category in current_metadata and node_id in current_metadata[category]:
|
||||
if category not in node_metadata:
|
||||
node_metadata[category] = {}
|
||||
node_metadata[category][node_id] = current_metadata[category][node_id]
|
||||
|
||||
# Save to cache if we have any metadata for this node
|
||||
if any(node_metadata.values()):
|
||||
self.node_cache[cache_key] = node_metadata
|
||||
|
||||
def clear_unused_cache(self):
|
||||
"""Clean up node_cache entries that are no longer in use"""
|
||||
# Collect all node_ids currently in prompt_metadata
|
||||
active_node_ids = set()
|
||||
for prompt_data in self.prompt_metadata.values():
|
||||
for category in self.metadata_categories:
|
||||
if category in prompt_data:
|
||||
active_node_ids.update(prompt_data[category].keys())
|
||||
|
||||
# Find cache keys that are no longer needed
|
||||
keys_to_remove = []
|
||||
for cache_key in self.node_cache:
|
||||
node_id = cache_key.split(':')[0]
|
||||
if node_id not in active_node_ids:
|
||||
keys_to_remove.append(cache_key)
|
||||
|
||||
# Remove cache entries that are no longer needed
|
||||
for key in keys_to_remove:
|
||||
del self.node_cache[key]
|
||||
|
||||
def clear_metadata(self, prompt_id=None):
|
||||
"""Clear metadata for a specific prompt or reset all data"""
|
||||
if prompt_id is not None:
|
||||
if prompt_id in self.prompt_metadata:
|
||||
del self.prompt_metadata[prompt_id]
|
||||
# Clean up cache after removing prompt
|
||||
self.clear_unused_cache()
|
||||
else:
|
||||
# Reset all data
|
||||
self._reset()
|
||||
|
||||
def get_first_decoded_image(self, prompt_id=None):
|
||||
"""Get the first decoded image result"""
|
||||
key = prompt_id if prompt_id is not None else self.current_prompt_id
|
||||
if key not in self.prompt_metadata:
|
||||
return None
|
||||
|
||||
metadata = self.prompt_metadata[key]
|
||||
if IMAGES in metadata and "first_decode" in metadata[IMAGES]:
|
||||
image_data = metadata[IMAGES]["first_decode"]["image"]
|
||||
|
||||
# If it's an image batch or tuple, handle various formats
|
||||
if isinstance(image_data, (list, tuple)) and len(image_data) > 0:
|
||||
# Return first element of list/tuple
|
||||
return image_data[0]
|
||||
|
||||
# If it's a tensor, return as is for processing in the route handler
|
||||
return image_data
|
||||
|
||||
# If no image is found in the current metadata, try to find it in the cache
|
||||
# This handles the case where VAEDecode was cached by ComfyUI and not executed
|
||||
prompt_obj = metadata.get("current_prompt")
|
||||
if prompt_obj and hasattr(prompt_obj, "original_prompt"):
|
||||
original_prompt = prompt_obj.original_prompt
|
||||
for node_id, node_data in original_prompt.items():
|
||||
class_type = node_data.get("class_type")
|
||||
if class_type and class_type in NODE_CLASS_MAPPINGS:
|
||||
class_obj = NODE_CLASS_MAPPINGS[class_type]
|
||||
class_name = class_obj.__name__
|
||||
# Check if this is a VAEDecode node
|
||||
if class_name == "VAEDecode":
|
||||
# Try to find this node in the cache
|
||||
cache_key = f"{node_id}:{class_name}"
|
||||
if cache_key in self.node_cache:
|
||||
cached_data = self.node_cache[cache_key]
|
||||
if IMAGES in cached_data and node_id in cached_data[IMAGES]:
|
||||
image_data = cached_data[IMAGES][node_id]["image"]
|
||||
# Handle different image formats
|
||||
if isinstance(image_data, (list, tuple)) and len(image_data) > 0:
|
||||
return image_data[0]
|
||||
return image_data
|
||||
|
||||
return None
|
||||
280
py/metadata_collector/node_extractors.py
Normal file
280
py/metadata_collector/node_extractors.py
Normal file
@@ -0,0 +1,280 @@
|
||||
import os
|
||||
|
||||
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES
|
||||
|
||||
|
||||
class NodeMetadataExtractor:
|
||||
"""Base class for node-specific metadata extraction"""
|
||||
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
"""Extract metadata from node inputs/outputs"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
"""Update metadata with node outputs after execution"""
|
||||
pass
|
||||
|
||||
class GenericNodeExtractor(NodeMetadataExtractor):
|
||||
"""Default extractor for nodes without specific handling"""
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
pass
|
||||
|
||||
class CheckpointLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "ckpt_name" not in inputs:
|
||||
return
|
||||
|
||||
model_name = inputs.get("ckpt_name")
|
||||
if model_name:
|
||||
metadata[MODELS][node_id] = {
|
||||
"name": model_name,
|
||||
"type": "checkpoint",
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class CLIPTextEncodeExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "text" not in inputs:
|
||||
return
|
||||
|
||||
text = inputs.get("text", "")
|
||||
metadata[PROMPTS][node_id] = {
|
||||
"text": text,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class SamplerExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
sampling_params = {}
|
||||
for key in ["seed", "steps", "cfg", "sampler_name", "scheduler", "denoise"]:
|
||||
if key in inputs:
|
||||
sampling_params[key] = inputs[key]
|
||||
|
||||
metadata[SAMPLING][node_id] = {
|
||||
"parameters": sampling_params,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
# Extract latent image dimensions if available
|
||||
if "latent_image" in inputs and inputs["latent_image"] is not None:
|
||||
latent = inputs["latent_image"]
|
||||
if isinstance(latent, dict) and "samples" in latent:
|
||||
# Extract dimensions from latent tensor
|
||||
samples = latent["samples"]
|
||||
if hasattr(samples, "shape") and len(samples.shape) >= 3:
|
||||
# Correct shape interpretation: [batch_size, channels, height/8, width/8]
|
||||
# Multiply by 8 to get actual pixel dimensions
|
||||
height = int(samples.shape[2] * 8)
|
||||
width = int(samples.shape[3] * 8)
|
||||
|
||||
if SIZE not in metadata:
|
||||
metadata[SIZE] = {}
|
||||
|
||||
metadata[SIZE][node_id] = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class KSamplerAdvancedExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
sampling_params = {}
|
||||
for key in ["noise_seed", "steps", "cfg", "sampler_name", "scheduler", "add_noise"]:
|
||||
if key in inputs:
|
||||
sampling_params[key] = inputs[key]
|
||||
|
||||
metadata[SAMPLING][node_id] = {
|
||||
"parameters": sampling_params,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
# Extract latent image dimensions if available
|
||||
if "latent_image" in inputs and inputs["latent_image"] is not None:
|
||||
latent = inputs["latent_image"]
|
||||
if isinstance(latent, dict) and "samples" in latent:
|
||||
# Extract dimensions from latent tensor
|
||||
samples = latent["samples"]
|
||||
if hasattr(samples, "shape") and len(samples.shape) >= 3:
|
||||
# Correct shape interpretation: [batch_size, channels, height/8, width/8]
|
||||
# Multiply by 8 to get actual pixel dimensions
|
||||
height = int(samples.shape[2] * 8)
|
||||
width = int(samples.shape[3] * 8)
|
||||
|
||||
if SIZE not in metadata:
|
||||
metadata[SIZE] = {}
|
||||
|
||||
metadata[SIZE][node_id] = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class LoraLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "lora_name" not in inputs:
|
||||
return
|
||||
|
||||
lora_name = inputs.get("lora_name")
|
||||
# Extract base filename without extension from path
|
||||
lora_name = os.path.splitext(os.path.basename(lora_name))[0]
|
||||
strength_model = round(float(inputs.get("strength_model", 1.0)), 2)
|
||||
|
||||
# Use the standardized format with lora_list
|
||||
metadata[LORAS][node_id] = {
|
||||
"lora_list": [
|
||||
{
|
||||
"name": lora_name,
|
||||
"strength": strength_model
|
||||
}
|
||||
],
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class ImageSizeExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
width = inputs.get("width", 512)
|
||||
height = inputs.get("height", 512)
|
||||
|
||||
if SIZE not in metadata:
|
||||
metadata[SIZE] = {}
|
||||
|
||||
metadata[SIZE][node_id] = {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class LoraLoaderManagerExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs:
|
||||
return
|
||||
|
||||
active_loras = []
|
||||
|
||||
# Process lora_stack if available
|
||||
if "lora_stack" in inputs:
|
||||
lora_stack = inputs.get("lora_stack", [])
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Extract lora name from path (following the format in lora_loader.py)
|
||||
lora_name = os.path.splitext(os.path.basename(lora_path))[0]
|
||||
active_loras.append({
|
||||
"name": lora_name,
|
||||
"strength": model_strength
|
||||
})
|
||||
|
||||
# Process loras from inputs
|
||||
if "loras" in inputs:
|
||||
loras_data = inputs.get("loras", [])
|
||||
|
||||
# Handle new format: {'loras': {'__value__': [...]}}
|
||||
if isinstance(loras_data, dict) and '__value__' in loras_data:
|
||||
loras_list = loras_data['__value__']
|
||||
# Handle old format: {'loras': [...]}
|
||||
elif isinstance(loras_data, list):
|
||||
loras_list = loras_data
|
||||
else:
|
||||
loras_list = []
|
||||
|
||||
# Filter for active loras
|
||||
for lora in loras_list:
|
||||
if isinstance(lora, dict) and lora.get("active", True) and not lora.get("_isDummy", False):
|
||||
active_loras.append({
|
||||
"name": lora.get("name", ""),
|
||||
"strength": float(lora.get("strength", 1.0))
|
||||
})
|
||||
|
||||
if active_loras:
|
||||
metadata[LORAS][node_id] = {
|
||||
"lora_list": active_loras,
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class FluxGuidanceExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "guidance" not in inputs:
|
||||
return
|
||||
|
||||
guidance_value = inputs.get("guidance")
|
||||
|
||||
# Store the guidance value in SAMPLING category
|
||||
if node_id not in metadata[SAMPLING]:
|
||||
metadata[SAMPLING][node_id] = {"parameters": {}, "node_id": node_id}
|
||||
|
||||
metadata[SAMPLING][node_id]["parameters"]["guidance"] = guidance_value
|
||||
|
||||
class UNETLoaderExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
if not inputs or "unet_name" not in inputs:
|
||||
return
|
||||
|
||||
model_name = inputs.get("unet_name")
|
||||
if model_name:
|
||||
metadata[MODELS][node_id] = {
|
||||
"name": model_name,
|
||||
"type": "checkpoint",
|
||||
"node_id": node_id
|
||||
}
|
||||
|
||||
class VAEDecodeExtractor(NodeMetadataExtractor):
|
||||
@staticmethod
|
||||
def extract(node_id, inputs, outputs, metadata):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def update(node_id, outputs, metadata):
|
||||
# Ensure IMAGES category exists
|
||||
if IMAGES not in metadata:
|
||||
metadata[IMAGES] = {}
|
||||
|
||||
# Save image data under node ID index to be captured by caching mechanism
|
||||
metadata[IMAGES][node_id] = {
|
||||
"node_id": node_id,
|
||||
"image": outputs
|
||||
}
|
||||
|
||||
# Only set first_decode if it hasn't been recorded yet
|
||||
if "first_decode" not in metadata[IMAGES]:
|
||||
metadata[IMAGES]["first_decode"] = metadata[IMAGES][node_id]
|
||||
|
||||
# Registry of node-specific extractors
|
||||
NODE_EXTRACTORS = {
|
||||
# Sampling
|
||||
"KSampler": SamplerExtractor,
|
||||
"KSamplerAdvanced": KSamplerAdvancedExtractor, # Add KSamplerAdvanced
|
||||
"SamplerCustomAdvanced": SamplerExtractor, # Add SamplerCustomAdvanced
|
||||
# Loaders
|
||||
"CheckpointLoaderSimple": CheckpointLoaderExtractor,
|
||||
"UNETLoader": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"LoraLoader": LoraLoaderExtractor,
|
||||
"LoraManagerLoader": LoraLoaderManagerExtractor,
|
||||
# Conditioning
|
||||
"CLIPTextEncode": CLIPTextEncodeExtractor,
|
||||
# Latent
|
||||
"EmptyLatentImage": ImageSizeExtractor,
|
||||
# Flux
|
||||
"FluxGuidance": FluxGuidanceExtractor, # Add FluxGuidance
|
||||
# Image
|
||||
"VAEDecode": VAEDecodeExtractor, # Added VAEDecode extractor
|
||||
# Add other nodes as needed
|
||||
}
|
||||
35
py/nodes/debug_metadata.py
Normal file
35
py/nodes/debug_metadata.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import logging
|
||||
from ..metadata_collector.metadata_processor import MetadataProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DebugMetadata:
|
||||
NAME = "Debug Metadata (LoraManager)"
|
||||
CATEGORY = "Lora Manager/utils"
|
||||
DESCRIPTION = "Debug node to verify metadata_processor functionality"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"images": ("IMAGE",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("metadata_json",)
|
||||
FUNCTION = "process_metadata"
|
||||
|
||||
def process_metadata(self, images):
|
||||
try:
|
||||
# Get the current execution context's metadata
|
||||
from ..metadata_collector import get_metadata
|
||||
metadata = get_metadata()
|
||||
|
||||
# Use the MetadataProcessor to convert it to JSON string
|
||||
metadata_json = MetadataProcessor.to_json(metadata)
|
||||
|
||||
return (metadata_json,)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing metadata: {e}")
|
||||
return ("{}",) # Return empty JSON object in case of error
|
||||
@@ -5,10 +5,11 @@ import re
|
||||
import numpy as np
|
||||
import folder_paths # type: ignore
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..workflow.parser import WorkflowParser
|
||||
from ..services.checkpoint_scanner import CheckpointScanner
|
||||
from ..metadata_collector.metadata_processor import MetadataProcessor
|
||||
from ..metadata_collector import get_metadata
|
||||
from PIL import Image, PngImagePlugin
|
||||
import piexif
|
||||
from io import BytesIO
|
||||
|
||||
class SaveImage:
|
||||
NAME = "Save Image (LoraManager)"
|
||||
@@ -34,8 +35,7 @@ class SaveImage:
|
||||
"file_format": (["png", "jpeg", "webp"],),
|
||||
},
|
||||
"optional": {
|
||||
"custom_prompt": ("STRING", {"default": "", "forceInput": True}),
|
||||
"lossless_webp": ("BOOLEAN", {"default": True}),
|
||||
"lossless_webp": ("BOOLEAN", {"default": False}),
|
||||
"quality": ("INT", {"default": 100, "min": 1, "max": 100}),
|
||||
"embed_workflow": ("BOOLEAN", {"default": False}),
|
||||
"add_counter_to_filename": ("BOOLEAN", {"default": True}),
|
||||
@@ -54,28 +54,61 @@ class SaveImage:
|
||||
async def get_lora_hash(self, lora_name):
|
||||
"""Get the lora hash from cache"""
|
||||
scanner = await LoraScanner.get_instance()
|
||||
cache = await scanner.get_cached_data()
|
||||
|
||||
# Use the new direct filename lookup method
|
||||
hash_value = scanner.get_hash_by_filename(lora_name)
|
||||
if hash_value:
|
||||
return hash_value
|
||||
|
||||
# Fallback to old method for compatibility
|
||||
cache = await scanner.get_cached_data()
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == lora_name:
|
||||
return item.get('sha256')
|
||||
return None
|
||||
|
||||
async def format_metadata(self, parsed_workflow, custom_prompt=None):
|
||||
async def get_checkpoint_hash(self, checkpoint_path):
|
||||
"""Get the checkpoint hash from cache"""
|
||||
scanner = await CheckpointScanner.get_instance()
|
||||
|
||||
if not checkpoint_path:
|
||||
return None
|
||||
|
||||
# Extract basename without extension
|
||||
checkpoint_name = os.path.basename(checkpoint_path)
|
||||
checkpoint_name = os.path.splitext(checkpoint_name)[0]
|
||||
|
||||
# Try direct filename lookup first
|
||||
hash_value = scanner.get_hash_by_filename(checkpoint_name)
|
||||
if hash_value:
|
||||
return hash_value
|
||||
|
||||
# Fallback to old method for compatibility
|
||||
cache = await scanner.get_cached_data()
|
||||
normalized_path = checkpoint_path.replace('\\', '/')
|
||||
|
||||
for item in cache.raw_data:
|
||||
if item.get('file_name') == checkpoint_name and item.get('file_path').endswith(normalized_path):
|
||||
return item.get('sha256')
|
||||
|
||||
return None
|
||||
|
||||
async def format_metadata(self, metadata_dict):
|
||||
"""Format metadata in the requested format similar to userComment example"""
|
||||
if not parsed_workflow:
|
||||
if not metadata_dict:
|
||||
return ""
|
||||
|
||||
# Extract the prompt and negative prompt
|
||||
prompt = parsed_workflow.get('prompt', '')
|
||||
negative_prompt = parsed_workflow.get('negative_prompt', '')
|
||||
# Helper function to only add parameter if value is not None
|
||||
def add_param_if_not_none(param_list, label, value):
|
||||
if value is not None:
|
||||
param_list.append(f"{label}: {value}")
|
||||
|
||||
# Override prompt with custom_prompt if provided
|
||||
if custom_prompt:
|
||||
prompt = custom_prompt
|
||||
# Extract the prompt and negative prompt
|
||||
prompt = metadata_dict.get('prompt', '')
|
||||
negative_prompt = metadata_dict.get('negative_prompt', '')
|
||||
|
||||
# Extract loras from the prompt if present
|
||||
loras_text = parsed_workflow.get('loras', '')
|
||||
loras_text = metadata_dict.get('loras', '')
|
||||
lora_hashes = {}
|
||||
|
||||
# If loras are found, add them on a new line after the prompt
|
||||
@@ -104,11 +137,15 @@ class SaveImage:
|
||||
params = []
|
||||
|
||||
# Add standard parameters in the correct order
|
||||
if 'steps' in parsed_workflow:
|
||||
params.append(f"Steps: {parsed_workflow.get('steps')}")
|
||||
if 'steps' in metadata_dict:
|
||||
add_param_if_not_none(params, "Steps", metadata_dict.get('steps'))
|
||||
|
||||
if 'sampler' in parsed_workflow:
|
||||
sampler = parsed_workflow.get('sampler')
|
||||
# Combine sampler and scheduler information
|
||||
sampler_name = None
|
||||
scheduler_name = None
|
||||
|
||||
if 'sampler' in metadata_dict:
|
||||
sampler = metadata_dict.get('sampler')
|
||||
# Convert ComfyUI sampler names to user-friendly names
|
||||
sampler_mapping = {
|
||||
'euler': 'Euler',
|
||||
@@ -128,10 +165,9 @@ class SaveImage:
|
||||
'ddim': 'DDIM'
|
||||
}
|
||||
sampler_name = sampler_mapping.get(sampler, sampler)
|
||||
params.append(f"Sampler: {sampler_name}")
|
||||
|
||||
if 'scheduler' in parsed_workflow:
|
||||
scheduler = parsed_workflow.get('scheduler')
|
||||
if 'scheduler' in metadata_dict:
|
||||
scheduler = metadata_dict.get('scheduler')
|
||||
scheduler_mapping = {
|
||||
'normal': 'Simple',
|
||||
'karras': 'Karras',
|
||||
@@ -140,29 +176,48 @@ class SaveImage:
|
||||
'sgm_quadratic': 'SGM Quadratic'
|
||||
}
|
||||
scheduler_name = scheduler_mapping.get(scheduler, scheduler)
|
||||
params.append(f"Schedule type: {scheduler_name}")
|
||||
|
||||
# CFG scale (cfg in parsed_workflow)
|
||||
if 'cfg_scale' in parsed_workflow:
|
||||
params.append(f"CFG scale: {parsed_workflow.get('cfg_scale')}")
|
||||
elif 'cfg' in parsed_workflow:
|
||||
params.append(f"CFG scale: {parsed_workflow.get('cfg')}")
|
||||
# Add combined sampler and scheduler information
|
||||
if sampler_name:
|
||||
if scheduler_name:
|
||||
params.append(f"Sampler: {sampler_name} {scheduler_name}")
|
||||
else:
|
||||
params.append(f"Sampler: {sampler_name}")
|
||||
|
||||
# CFG scale (Use guidance if available, otherwise fall back to cfg_scale or cfg)
|
||||
if 'guidance' in metadata_dict:
|
||||
add_param_if_not_none(params, "CFG scale", metadata_dict.get('guidance'))
|
||||
elif 'cfg_scale' in metadata_dict:
|
||||
add_param_if_not_none(params, "CFG scale", metadata_dict.get('cfg_scale'))
|
||||
elif 'cfg' in metadata_dict:
|
||||
add_param_if_not_none(params, "CFG scale", metadata_dict.get('cfg'))
|
||||
|
||||
# Seed
|
||||
if 'seed' in parsed_workflow:
|
||||
params.append(f"Seed: {parsed_workflow.get('seed')}")
|
||||
if 'seed' in metadata_dict:
|
||||
add_param_if_not_none(params, "Seed", metadata_dict.get('seed'))
|
||||
|
||||
# Size
|
||||
if 'size' in parsed_workflow:
|
||||
params.append(f"Size: {parsed_workflow.get('size')}")
|
||||
if 'size' in metadata_dict:
|
||||
add_param_if_not_none(params, "Size", metadata_dict.get('size'))
|
||||
|
||||
# Model info
|
||||
if 'checkpoint' in parsed_workflow:
|
||||
# Extract basename without path
|
||||
checkpoint = os.path.basename(parsed_workflow.get('checkpoint', ''))
|
||||
# Remove extension if present
|
||||
checkpoint = os.path.splitext(checkpoint)[0]
|
||||
params.append(f"Model: {checkpoint}")
|
||||
if 'checkpoint' in metadata_dict:
|
||||
# Ensure checkpoint is a string before processing
|
||||
checkpoint = metadata_dict.get('checkpoint')
|
||||
if checkpoint is not None:
|
||||
# Get model hash
|
||||
model_hash = await self.get_checkpoint_hash(checkpoint)
|
||||
|
||||
# Extract basename without path
|
||||
checkpoint_name = os.path.basename(checkpoint)
|
||||
# Remove extension if present
|
||||
checkpoint_name = os.path.splitext(checkpoint_name)[0]
|
||||
|
||||
# Add model hash if available
|
||||
if model_hash:
|
||||
params.append(f"Model hash: {model_hash[:10]}, Model: {checkpoint_name}")
|
||||
else:
|
||||
params.append(f"Model: {checkpoint_name}")
|
||||
|
||||
# Add LoRA hashes if available
|
||||
if lora_hashes:
|
||||
@@ -181,9 +236,9 @@ class SaveImage:
|
||||
|
||||
# credit to nkchocoai
|
||||
# Add format_filename method to handle pattern substitution
|
||||
def format_filename(self, filename, parsed_workflow):
|
||||
def format_filename(self, filename, metadata_dict):
|
||||
"""Format filename with metadata values"""
|
||||
if not parsed_workflow:
|
||||
if not metadata_dict:
|
||||
return filename
|
||||
|
||||
result = re.findall(self.pattern_format, filename)
|
||||
@@ -191,30 +246,30 @@ class SaveImage:
|
||||
parts = segment.replace("%", "").split(":")
|
||||
key = parts[0]
|
||||
|
||||
if key == "seed" and 'seed' in parsed_workflow:
|
||||
filename = filename.replace(segment, str(parsed_workflow.get('seed', '')))
|
||||
elif key == "width" and 'size' in parsed_workflow:
|
||||
size = parsed_workflow.get('size', 'x')
|
||||
if key == "seed" and 'seed' in metadata_dict:
|
||||
filename = filename.replace(segment, str(metadata_dict.get('seed', '')))
|
||||
elif key == "width" and 'size' in metadata_dict:
|
||||
size = metadata_dict.get('size', 'x')
|
||||
w = size.split('x')[0] if isinstance(size, str) else size[0]
|
||||
filename = filename.replace(segment, str(w))
|
||||
elif key == "height" and 'size' in parsed_workflow:
|
||||
size = parsed_workflow.get('size', 'x')
|
||||
elif key == "height" and 'size' in metadata_dict:
|
||||
size = metadata_dict.get('size', 'x')
|
||||
h = size.split('x')[1] if isinstance(size, str) else size[1]
|
||||
filename = filename.replace(segment, str(h))
|
||||
elif key == "pprompt" and 'prompt' in parsed_workflow:
|
||||
prompt = parsed_workflow.get('prompt', '').replace("\n", " ")
|
||||
elif key == "pprompt" and 'prompt' in metadata_dict:
|
||||
prompt = metadata_dict.get('prompt', '').replace("\n", " ")
|
||||
if len(parts) >= 2:
|
||||
length = int(parts[1])
|
||||
prompt = prompt[:length]
|
||||
filename = filename.replace(segment, prompt.strip())
|
||||
elif key == "nprompt" and 'negative_prompt' in parsed_workflow:
|
||||
prompt = parsed_workflow.get('negative_prompt', '').replace("\n", " ")
|
||||
elif key == "nprompt" and 'negative_prompt' in metadata_dict:
|
||||
prompt = metadata_dict.get('negative_prompt', '').replace("\n", " ")
|
||||
if len(parts) >= 2:
|
||||
length = int(parts[1])
|
||||
prompt = prompt[:length]
|
||||
filename = filename.replace(segment, prompt.strip())
|
||||
elif key == "model" and 'checkpoint' in parsed_workflow:
|
||||
model = parsed_workflow.get('checkpoint', '')
|
||||
elif key == "model" and 'checkpoint' in metadata_dict:
|
||||
model = metadata_dict.get('checkpoint', '')
|
||||
model = os.path.splitext(os.path.basename(model))[0]
|
||||
if len(parts) >= 2:
|
||||
length = int(parts[1])
|
||||
@@ -224,12 +279,13 @@ class SaveImage:
|
||||
from datetime import datetime
|
||||
now = datetime.now()
|
||||
date_table = {
|
||||
"yyyy": str(now.year),
|
||||
"MM": str(now.month).zfill(2),
|
||||
"dd": str(now.day).zfill(2),
|
||||
"hh": str(now.hour).zfill(2),
|
||||
"mm": str(now.minute).zfill(2),
|
||||
"ss": str(now.second).zfill(2),
|
||||
"yyyy": f"{now.year:04d}",
|
||||
"yy": f"{now.year % 100:02d}",
|
||||
"MM": f"{now.month:02d}",
|
||||
"dd": f"{now.day:02d}",
|
||||
"hh": f"{now.hour:02d}",
|
||||
"mm": f"{now.minute:02d}",
|
||||
"ss": f"{now.second:02d}",
|
||||
}
|
||||
if len(parts) >= 2:
|
||||
date_format = parts[1]
|
||||
@@ -245,23 +301,19 @@ class SaveImage:
|
||||
return filename
|
||||
|
||||
def save_images(self, images, filename_prefix, file_format, prompt=None, extra_pnginfo=None,
|
||||
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True,
|
||||
custom_prompt=None):
|
||||
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True):
|
||||
"""Save images with metadata"""
|
||||
results = []
|
||||
|
||||
# Parse the workflow using the WorkflowParser
|
||||
parser = WorkflowParser()
|
||||
if prompt:
|
||||
parsed_workflow = parser.parse_workflow(prompt)
|
||||
else:
|
||||
parsed_workflow = {}
|
||||
# Get metadata using the metadata collector
|
||||
raw_metadata = get_metadata()
|
||||
metadata_dict = MetadataProcessor.to_dict(raw_metadata)
|
||||
|
||||
# Get or create metadata asynchronously
|
||||
metadata = asyncio.run(self.format_metadata(parsed_workflow, custom_prompt))
|
||||
metadata = asyncio.run(self.format_metadata(metadata_dict))
|
||||
|
||||
# Process filename_prefix with pattern substitution
|
||||
filename_prefix = self.format_filename(filename_prefix, parsed_workflow)
|
||||
filename_prefix = self.format_filename(filename_prefix, metadata_dict)
|
||||
|
||||
# Get initial save path info once for the batch
|
||||
full_output_folder, filename, counter, subfolder, processed_prefix = folder_paths.get_save_image_path(
|
||||
@@ -289,7 +341,8 @@ class SaveImage:
|
||||
if file_format == "png":
|
||||
file = base_filename + ".png"
|
||||
file_extension = ".png"
|
||||
save_kwargs = {"optimize": True, "compress_level": self.compress_level}
|
||||
# Remove "optimize": True to match built-in node behavior
|
||||
save_kwargs = {"compress_level": self.compress_level}
|
||||
pnginfo = PngImagePlugin.PngInfo()
|
||||
elif file_format == "jpeg":
|
||||
file = base_filename + ".jpg"
|
||||
@@ -298,7 +351,8 @@ class SaveImage:
|
||||
elif file_format == "webp":
|
||||
file = base_filename + ".webp"
|
||||
file_extension = ".webp"
|
||||
save_kwargs = {"quality": quality, "lossless": lossless_webp}
|
||||
# Add optimization param to control performance
|
||||
save_kwargs = {"quality": quality, "lossless": lossless_webp, "method": 0}
|
||||
|
||||
# Full save path
|
||||
file_path = os.path.join(full_output_folder, file)
|
||||
@@ -346,8 +400,7 @@ class SaveImage:
|
||||
return results
|
||||
|
||||
def process_image(self, images, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
|
||||
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True,
|
||||
custom_prompt=""):
|
||||
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True):
|
||||
"""Process and save image with metadata"""
|
||||
# Make sure the output directory exists
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
@@ -368,8 +421,7 @@ class SaveImage:
|
||||
lossless_webp,
|
||||
quality,
|
||||
embed_workflow,
|
||||
add_counter_to_filename,
|
||||
custom_prompt if custom_prompt.strip() else None
|
||||
add_counter_to_filename
|
||||
)
|
||||
|
||||
return (images,)
|
||||
@@ -2,132 +2,105 @@ import os
|
||||
import json
|
||||
import logging
|
||||
from aiohttp import web
|
||||
from typing import Dict, List
|
||||
from typing import Dict
|
||||
|
||||
from ..utils.model_utils import determine_base_model
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
|
||||
from ..services.file_monitor import LoraFileMonitor
|
||||
from ..services.download_manager import DownloadManager
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
from ..config import config
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from operator import itemgetter
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.settings_manager import settings
|
||||
import asyncio
|
||||
from .update_routes import UpdateRoutes
|
||||
from ..services.recipe_scanner import RecipeScanner
|
||||
from ..utils.constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ApiRoutes:
|
||||
"""API route handlers for LoRA management"""
|
||||
|
||||
def __init__(self, file_monitor: LoraFileMonitor):
|
||||
self.scanner = LoraScanner()
|
||||
self.civitai_client = CivitaiClient()
|
||||
self.download_manager = DownloadManager(file_monitor)
|
||||
def __init__(self):
|
||||
self.scanner = None # Will be initialized in setup_routes
|
||||
self.civitai_client = None # Will be initialized in setup_routes
|
||||
self.download_manager = None # Will be initialized in setup_routes
|
||||
self._download_lock = asyncio.Lock()
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
self.download_manager = await ServiceRegistry.get_download_manager()
|
||||
|
||||
@classmethod
|
||||
def setup_routes(cls, app: web.Application, monitor: LoraFileMonitor):
|
||||
def setup_routes(cls, app: web.Application):
|
||||
"""Register API routes"""
|
||||
routes = cls(monitor)
|
||||
routes = cls()
|
||||
|
||||
# Schedule service initialization on app startup
|
||||
app.on_startup.append(lambda _: routes.initialize_services())
|
||||
|
||||
app.router.add_post('/api/delete_model', routes.delete_model)
|
||||
app.router.add_post('/api/fetch-civitai', routes.fetch_civitai)
|
||||
app.router.add_post('/api/replace_preview', routes.replace_preview)
|
||||
app.router.add_get('/api/loras', routes.get_loras)
|
||||
app.router.add_post('/api/fetch-all-civitai', routes.fetch_all_civitai)
|
||||
app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection)
|
||||
app.router.add_get('/ws/init-progress', ws_manager.handle_init_connection) # Add new WebSocket route
|
||||
app.router.add_get('/api/lora-roots', routes.get_lora_roots)
|
||||
app.router.add_get('/api/folders', routes.get_folders)
|
||||
app.router.add_get('/api/civitai/versions/{model_id}', routes.get_civitai_versions)
|
||||
app.router.add_get('/api/civitai/model/{modelVersionId}', routes.get_civitai_model)
|
||||
app.router.add_get('/api/civitai/model/{hash}', routes.get_civitai_model)
|
||||
app.router.add_get('/api/civitai/model/version/{modelVersionId}', routes.get_civitai_model_by_version)
|
||||
app.router.add_get('/api/civitai/model/hash/{hash}', routes.get_civitai_model_by_hash)
|
||||
app.router.add_post('/api/download-lora', routes.download_lora)
|
||||
app.router.add_post('/api/settings', routes.update_settings)
|
||||
app.router.add_post('/api/move_model', routes.move_model)
|
||||
app.router.add_get('/api/lora-model-description', routes.get_lora_model_description) # Add new route
|
||||
app.router.add_post('/loras/api/save-metadata', routes.save_metadata)
|
||||
app.router.add_post('/api/loras/save-metadata', routes.save_metadata)
|
||||
app.router.add_get('/api/lora-preview-url', routes.get_lora_preview_url) # Add new route
|
||||
app.router.add_post('/api/move_models_bulk', routes.move_models_bulk)
|
||||
app.router.add_get('/api/loras/top-tags', routes.get_top_tags) # Add new route for top tags
|
||||
app.router.add_get('/api/loras/base-models', routes.get_base_models) # Add new route for base models
|
||||
app.router.add_get('/api/lora-civitai-url', routes.get_lora_civitai_url) # Add new route for Civitai URL
|
||||
app.router.add_post('/api/rename_lora', routes.rename_lora) # Add new route for renaming LoRA files
|
||||
app.router.add_get('/api/loras/scan', routes.scan_loras) # Add new route for scanning LoRA files
|
||||
|
||||
# Add update check routes
|
||||
UpdateRoutes.setup_routes(app)
|
||||
|
||||
async def delete_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model deletion request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
if not file_path:
|
||||
return web.Response(text='Model path is required', status=400)
|
||||
|
||||
target_dir = os.path.dirname(file_path)
|
||||
file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
|
||||
deleted_files = await self._delete_model_files(target_dir, file_name)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'deleted_files': deleted_files
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting model: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
return await ModelRouteUtils.handle_delete_model(request, self.scanner)
|
||||
|
||||
async def fetch_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Handle CivitAI metadata fetch request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
metadata_path = os.path.splitext(data['file_path'])[0] + '.metadata.json'
|
||||
|
||||
# Check if model is from CivitAI
|
||||
local_metadata = await self._load_local_metadata(metadata_path)
|
||||
|
||||
# Fetch and update metadata
|
||||
civitai_metadata = await self.civitai_client.get_model_by_hash(local_metadata["sha256"])
|
||||
if not civitai_metadata:
|
||||
return await self._handle_not_found_on_civitai(metadata_path, local_metadata)
|
||||
|
||||
await self._update_model_metadata(metadata_path, local_metadata, civitai_metadata, self.civitai_client)
|
||||
|
||||
return web.json_response({"success": True})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
return await ModelRouteUtils.handle_fetch_civitai(request, self.scanner)
|
||||
|
||||
async def replace_preview(self, request: web.Request) -> web.Response:
|
||||
"""Handle preview image replacement request"""
|
||||
try:
|
||||
reader = await request.multipart()
|
||||
preview_data, content_type = await self._read_preview_file(reader)
|
||||
model_path = await self._read_model_path(reader)
|
||||
|
||||
preview_path = await self._save_preview_file(model_path, preview_data, content_type)
|
||||
await self._update_preview_metadata(model_path, preview_path)
|
||||
|
||||
# Update preview URL in scanner cache
|
||||
await self.scanner.update_preview_in_cache(model_path, preview_path)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"preview_url": config.get_preview_static_url(preview_path)
|
||||
})
|
||||
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
return await ModelRouteUtils.handle_replace_preview(request, self.scanner)
|
||||
|
||||
async def scan_loras(self, request: web.Request) -> web.Response:
|
||||
"""Force a rescan of LoRA files"""
|
||||
try:
|
||||
await self.scanner.get_cached_data(force_refresh=True)
|
||||
return web.json_response({"status": "success", "message": "LoRA scan completed"})
|
||||
except Exception as e:
|
||||
logger.error(f"Error replacing preview: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
logger.error(f"Error in scan_loras: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_loras(self, request: web.Request) -> web.Response:
|
||||
"""Handle paginated LoRA data request"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
# Parse query parameters
|
||||
page = int(request.query.get('page', '1'))
|
||||
page_size = int(request.query.get('page_size', '20'))
|
||||
@@ -137,10 +110,12 @@ class ApiRoutes:
|
||||
fuzzy_search = request.query.get('fuzzy', 'false').lower() == 'true'
|
||||
|
||||
# Parse search options
|
||||
search_filename = request.query.get('search_filename', 'true').lower() == 'true'
|
||||
search_modelname = request.query.get('search_modelname', 'true').lower() == 'true'
|
||||
search_tags = request.query.get('search_tags', 'false').lower() == 'true'
|
||||
recursive = request.query.get('recursive', 'false').lower() == 'true'
|
||||
search_options = {
|
||||
'filename': request.query.get('search_filename', 'true').lower() == 'true',
|
||||
'modelname': request.query.get('search_modelname', 'true').lower() == 'true',
|
||||
'tags': request.query.get('search_tags', 'false').lower() == 'true',
|
||||
'recursive': request.query.get('recursive', 'false').lower() == 'true'
|
||||
}
|
||||
|
||||
# Get filter parameters
|
||||
base_models = request.query.get('base_models', None)
|
||||
@@ -157,14 +132,6 @@ class ApiRoutes:
|
||||
if tags:
|
||||
filters['tags'] = tags.split(',')
|
||||
|
||||
# Add search options to filters
|
||||
search_options = {
|
||||
'filename': search_filename,
|
||||
'modelname': search_modelname,
|
||||
'tags': search_tags,
|
||||
'recursive': recursive
|
||||
}
|
||||
|
||||
# Add lora hash filtering options
|
||||
hash_filters = {}
|
||||
if lora_hash:
|
||||
@@ -223,73 +190,10 @@ class ApiRoutes:
|
||||
"from_civitai": lora.get("from_civitai", True),
|
||||
"usage_tips": lora.get("usage_tips", ""),
|
||||
"notes": lora.get("notes", ""),
|
||||
"civitai": self._filter_civitai_data(lora.get("civitai", {}))
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(lora.get("civitai", {}))
|
||||
}
|
||||
|
||||
def _filter_civitai_data(self, data: Dict) -> Dict:
|
||||
"""Filter relevant fields from CivitAI data"""
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
fields = [
|
||||
"id", "modelId", "name", "createdAt", "updatedAt",
|
||||
"publishedAt", "trainedWords", "baseModel", "description",
|
||||
"model", "images"
|
||||
]
|
||||
return {k: data[k] for k in fields if k in data}
|
||||
|
||||
# Private helper methods
|
||||
async def _delete_model_files(self, target_dir: str, file_name: str) -> List[str]:
|
||||
"""Delete model and associated files"""
|
||||
patterns = [
|
||||
f"{file_name}.safetensors", # Required
|
||||
f"{file_name}.metadata.json",
|
||||
f"{file_name}.preview.png",
|
||||
f"{file_name}.preview.jpg",
|
||||
f"{file_name}.preview.jpeg",
|
||||
f"{file_name}.preview.webp",
|
||||
f"{file_name}.preview.mp4",
|
||||
f"{file_name}.png",
|
||||
f"{file_name}.jpg",
|
||||
f"{file_name}.jpeg",
|
||||
f"{file_name}.webp",
|
||||
f"{file_name}.mp4"
|
||||
]
|
||||
|
||||
deleted = []
|
||||
main_file = patterns[0]
|
||||
main_path = os.path.join(target_dir, main_file).replace(os.sep, '/')
|
||||
|
||||
if os.path.exists(main_path):
|
||||
# Notify file monitor to ignore delete event
|
||||
self.download_manager.file_monitor.handler.add_ignore_path(main_path, 0)
|
||||
|
||||
# Delete file
|
||||
os.remove(main_path)
|
||||
deleted.append(main_path)
|
||||
else:
|
||||
logger.warning(f"Model file not found: {main_file}")
|
||||
|
||||
# Remove from cache
|
||||
cache = await self.scanner.get_cached_data()
|
||||
cache.raw_data = [item for item in cache.raw_data if item['file_path'] != main_path]
|
||||
await cache.resort()
|
||||
|
||||
# update hash index
|
||||
self.scanner._hash_index.remove_by_path(main_path)
|
||||
|
||||
# Delete optional files
|
||||
for pattern in patterns[1:]:
|
||||
path = os.path.join(target_dir, pattern)
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
os.remove(path)
|
||||
deleted.append(pattern)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete {pattern}: {e}")
|
||||
|
||||
return deleted
|
||||
|
||||
async def _read_preview_file(self, reader) -> tuple[bytes, str]:
|
||||
"""Read preview file and content type from multipart request"""
|
||||
field = await reader.next()
|
||||
@@ -307,18 +211,29 @@ class ApiRoutes:
|
||||
|
||||
async def _save_preview_file(self, model_path: str, preview_data: bytes, content_type: str) -> str:
|
||||
"""Save preview file and return its path"""
|
||||
# Determine file extension based on content type
|
||||
if content_type.startswith('video/'):
|
||||
extension = '.preview.mp4'
|
||||
else:
|
||||
extension = '.preview.png'
|
||||
|
||||
base_name = os.path.splitext(os.path.basename(model_path))[0]
|
||||
folder = os.path.dirname(model_path)
|
||||
|
||||
# Determine if content is video or image
|
||||
if content_type.startswith('video/'):
|
||||
# For videos, keep original format and use .mp4 extension
|
||||
extension = '.mp4'
|
||||
optimized_data = preview_data
|
||||
else:
|
||||
# For images, optimize and convert to WebP
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=preview_data,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
extension = '.webp' # Use .webp without .preview part
|
||||
|
||||
preview_path = os.path.join(folder, base_name + extension).replace(os.sep, '/')
|
||||
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(preview_data)
|
||||
f.write(optimized_data)
|
||||
|
||||
return preview_path
|
||||
|
||||
@@ -338,83 +253,26 @@ class ApiRoutes:
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating metadata: {e}")
|
||||
|
||||
async def _load_local_metadata(self, metadata_path: str) -> Dict:
|
||||
"""Load local metadata file"""
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metadata from {metadata_path}: {e}")
|
||||
return {}
|
||||
|
||||
async def _handle_not_found_on_civitai(self, metadata_path: str, local_metadata: Dict) -> web.Response:
|
||||
"""Handle case when model is not found on CivitAI"""
|
||||
local_metadata['from_civitai'] = False
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
||||
return web.json_response(
|
||||
{"success": False, "error": "Not found on CivitAI"},
|
||||
status=404
|
||||
)
|
||||
|
||||
async def _update_model_metadata(self, metadata_path: str, local_metadata: Dict,
|
||||
civitai_metadata: Dict, client: CivitaiClient) -> None:
|
||||
"""Update local metadata with CivitAI data"""
|
||||
local_metadata['civitai'] = civitai_metadata
|
||||
|
||||
# Update model name if available
|
||||
if 'model' in civitai_metadata:
|
||||
if civitai_metadata.get('model', {}).get('name'):
|
||||
local_metadata['model_name'] = civitai_metadata['model']['name']
|
||||
|
||||
# Fetch additional model metadata (description and tags) if we have model ID
|
||||
model_id = civitai_metadata['modelId']
|
||||
if model_id:
|
||||
model_metadata, _ = await client.get_model_metadata(str(model_id))
|
||||
if model_metadata:
|
||||
local_metadata['modelDescription'] = model_metadata.get('description', '')
|
||||
local_metadata['tags'] = model_metadata.get('tags', [])
|
||||
|
||||
# Update base model
|
||||
local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
|
||||
|
||||
# Update preview if needed
|
||||
if not local_metadata.get('preview_url') or not os.path.exists(local_metadata['preview_url']):
|
||||
first_preview = next((img for img in civitai_metadata.get('images', [])), None)
|
||||
if first_preview:
|
||||
preview_ext = '.mp4' if first_preview['type'] == 'video' else os.path.splitext(first_preview['url'])[-1]
|
||||
base_name = os.path.splitext(os.path.splitext(os.path.basename(metadata_path))[0])[0]
|
||||
preview_filename = base_name + preview_ext
|
||||
preview_path = os.path.join(os.path.dirname(metadata_path), preview_filename)
|
||||
|
||||
if await client.download_preview_image(first_preview['url'], preview_path):
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
|
||||
# Save updated metadata
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
await self.scanner.update_single_lora_cache(local_metadata['file_path'], local_metadata['file_path'], local_metadata)
|
||||
|
||||
async def fetch_all_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Fetch CivitAI metadata for all loras in the background"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
cache = await self.scanner.get_cached_data()
|
||||
total = len(cache.raw_data)
|
||||
processed = 0
|
||||
success = 0
|
||||
needs_resort = False
|
||||
|
||||
# 准备要处理的 loras
|
||||
# Prepare loras to process
|
||||
to_process = [
|
||||
lora for lora in cache.raw_data
|
||||
if lora.get('sha256') and (not lora.get('civitai') or 'id' not in lora.get('civitai')) and lora.get('from_civitai') # TODO: for lora not from CivitAI but added traineWords
|
||||
if lora.get('sha256') and (not lora.get('civitai') or 'id' not in lora.get('civitai')) and lora.get('from_civitai', True) # TODO: for lora not from CivitAI but added traineWords
|
||||
]
|
||||
total_to_process = len(to_process)
|
||||
|
||||
# 发送初始进度
|
||||
# Send initial progress
|
||||
await ws_manager.broadcast({
|
||||
'status': 'started',
|
||||
'total': total_to_process,
|
||||
@@ -425,10 +283,11 @@ class ApiRoutes:
|
||||
for lora in to_process:
|
||||
try:
|
||||
original_name = lora.get('model_name')
|
||||
if await self._fetch_and_update_single_lora(
|
||||
if await ModelRouteUtils.fetch_and_update_model(
|
||||
sha256=lora['sha256'],
|
||||
file_path=lora['file_path'],
|
||||
lora=lora
|
||||
model_data=lora,
|
||||
update_cache_func=self.scanner.update_single_model_cache
|
||||
):
|
||||
success += 1
|
||||
if original_name != lora.get('model_name'):
|
||||
@@ -436,7 +295,7 @@ class ApiRoutes:
|
||||
|
||||
processed += 1
|
||||
|
||||
# 每处理一个就发送进度更新
|
||||
# Send progress update
|
||||
await ws_manager.broadcast({
|
||||
'status': 'processing',
|
||||
'total': total_to_process,
|
||||
@@ -451,7 +310,7 @@ class ApiRoutes:
|
||||
if needs_resort:
|
||||
await cache.resort(name_only=True)
|
||||
|
||||
# 发送完成消息
|
||||
# Send completion message
|
||||
await ws_manager.broadcast({
|
||||
'status': 'completed',
|
||||
'total': total_to_process,
|
||||
@@ -465,7 +324,7 @@ class ApiRoutes:
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
# 发送错误消息
|
||||
# Send error message
|
||||
await ws_manager.broadcast({
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
@@ -473,58 +332,6 @@ class ApiRoutes:
|
||||
logger.error(f"Error in fetch_all_civitai: {e}")
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def _fetch_and_update_single_lora(self, sha256: str, file_path: str, lora: dict) -> bool:
|
||||
"""Fetch and update metadata for a single lora without sorting
|
||||
|
||||
Args:
|
||||
sha256: SHA256 hash of the lora file
|
||||
file_path: Path to the lora file
|
||||
lora: The lora object in cache to update
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Check if model is from CivitAI
|
||||
local_metadata = await self._load_local_metadata(metadata_path)
|
||||
|
||||
# Fetch metadata
|
||||
civitai_metadata = await client.get_model_by_hash(sha256)
|
||||
if not civitai_metadata:
|
||||
# Mark as not from CivitAI if not found
|
||||
local_metadata['from_civitai'] = False
|
||||
lora['from_civitai'] = False
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
||||
return False
|
||||
|
||||
# Update metadata
|
||||
await self._update_model_metadata(
|
||||
metadata_path,
|
||||
local_metadata,
|
||||
civitai_metadata,
|
||||
client
|
||||
)
|
||||
|
||||
# Update cache object directly
|
||||
lora.update({
|
||||
'model_name': local_metadata.get('model_name'),
|
||||
'preview_url': local_metadata.get('preview_url'),
|
||||
'from_civitai': True,
|
||||
'civitai': civitai_metadata
|
||||
})
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivitAI data: {e}")
|
||||
return False
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
async def get_lora_roots(self, request: web.Request) -> web.Response:
|
||||
"""Get all configured LoRA root directories"""
|
||||
return web.json_response({
|
||||
@@ -533,6 +340,9 @@ class ApiRoutes:
|
||||
|
||||
async def get_folders(self, request: web.Request) -> web.Response:
|
||||
"""Get all folders in the cache"""
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
cache = await self.scanner.get_cached_data()
|
||||
return web.json_response({
|
||||
'folders': cache.folders
|
||||
@@ -541,11 +351,26 @@ class ApiRoutes:
|
||||
async def get_civitai_versions(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai model with local availability info"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
if self.civitai_client is None:
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
model_id = request.match_info['model_id']
|
||||
versions = await self.civitai_client.get_model_versions(model_id)
|
||||
if not versions:
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be LORA
|
||||
if model_type.lower() != 'lora':
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected LORA, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the model file (type="Model") in the files list
|
||||
@@ -556,9 +381,9 @@ class ApiRoutes:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.scanner.has_lora_hash(sha256)
|
||||
version['existsLocally'] = self.scanner.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.scanner.get_lora_path_by_hash(sha256)
|
||||
version['localPath'] = self.scanner.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
@@ -571,26 +396,59 @@ class ApiRoutes:
|
||||
logger.error(f"Error fetching model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_civitai_model(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by model version ID or hash"""
|
||||
async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by model version ID"""
|
||||
try:
|
||||
model_version_id = request.match_info['modelVersionId']
|
||||
if not model_version_id:
|
||||
hash = request.match_info['hash']
|
||||
model = await self.civitai_client.get_model_by_hash(hash)
|
||||
return web.json_response(model)
|
||||
if self.civitai_client is None:
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
model_version_id = request.match_info.get('modelVersionId')
|
||||
|
||||
# Get model details from Civitai API
|
||||
model = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
model, error_msg = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if not model:
|
||||
# Log warning for failed model retrieval
|
||||
logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
|
||||
|
||||
# Determine status code based on error message
|
||||
status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
|
||||
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error_msg or "Failed to fetch model information"
|
||||
}, status=status_code)
|
||||
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by hash"""
|
||||
try:
|
||||
if self.civitai_client is None:
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
hash = request.match_info.get('hash')
|
||||
model = await self.civitai_client.get_model_by_hash(hash)
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details by hash: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def download_lora(self, request: web.Request) -> web.Response:
|
||||
async with self._download_lock:
|
||||
try:
|
||||
if self.download_manager is None:
|
||||
self.download_manager = await ServiceRegistry.get_download_manager()
|
||||
|
||||
data = await request.json()
|
||||
|
||||
# Create progress callback
|
||||
@@ -662,12 +520,15 @@ class ApiRoutes:
|
||||
|
||||
return web.json_response({'success': True})
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating settings: {e}", exc_info=True) # 添加 exc_info=True 以获取完整堆栈
|
||||
logger.error(f"Error updating settings: {e}", exc_info=True)
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def move_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model move request"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path') # full path of the model file, e.g. /path/to/model.safetensors
|
||||
target_path = data.get('target_path') # folder path to move the model to, e.g. /path/to/target_folder
|
||||
@@ -706,12 +567,17 @@ class ApiRoutes:
|
||||
@classmethod
|
||||
async def cleanup(cls):
|
||||
"""Add cleanup method for application shutdown"""
|
||||
if hasattr(cls, '_instance'):
|
||||
await cls._instance.civitai_client.close()
|
||||
# Now we don't need to store an instance, as services are managed by ServiceRegistry
|
||||
civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
if civitai_client:
|
||||
await civitai_client.close()
|
||||
|
||||
async def save_metadata(self, request: web.Request) -> web.Response:
|
||||
"""Handle saving metadata updates"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
if not file_path:
|
||||
@@ -724,11 +590,7 @@ class ApiRoutes:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Load existing metadata
|
||||
if os.path.exists(metadata_path):
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
else:
|
||||
metadata = {}
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Handle nested updates (for civitai.trainedWords)
|
||||
for key, value in metadata_updates.items():
|
||||
@@ -745,7 +607,7 @@ class ApiRoutes:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Update cache
|
||||
await self.scanner.update_single_lora_cache(file_path, file_path, metadata)
|
||||
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||
|
||||
# If model_name was updated, resort the cache
|
||||
if 'model_name' in metadata_updates:
|
||||
@@ -761,6 +623,9 @@ class ApiRoutes:
|
||||
async def get_lora_preview_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the static preview URL for a LoRA file"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
# Get lora file name from query parameters
|
||||
lora_name = request.query.get('name')
|
||||
if not lora_name:
|
||||
@@ -791,11 +656,17 @@ class ApiRoutes:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting lora preview URL: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_civitai_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the Civitai URL for a LoRA file"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
# Get lora file name from query parameters
|
||||
lora_name = request.query.get('name')
|
||||
if not lora_name:
|
||||
@@ -841,6 +712,9 @@ class ApiRoutes:
|
||||
async def move_models_bulk(self, request: web.Request) -> web.Response:
|
||||
"""Handle bulk model move request"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths', []) # list of full paths of the model files, e.g. ["/path/to/model1.safetensors", "/path/to/model2.safetensors"]
|
||||
target_path = data.get('target_path') # folder path to move the models to, e.g. "/path/to/target_folder"
|
||||
@@ -899,6 +773,9 @@ class ApiRoutes:
|
||||
async def get_lora_model_description(self, request: web.Request) -> web.Response:
|
||||
"""Get model description for a Lora model"""
|
||||
try:
|
||||
if self.civitai_client is None:
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Get parameters
|
||||
model_id = request.query.get('model_id')
|
||||
file_path = request.query.get('file_path')
|
||||
@@ -914,21 +791,16 @@ class ApiRoutes:
|
||||
tags = []
|
||||
if file_path:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
description = metadata.get('modelDescription')
|
||||
tags = metadata.get('tags', [])
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metadata from {metadata_path}: {e}")
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
description = metadata.get('modelDescription')
|
||||
tags = metadata.get('tags', [])
|
||||
|
||||
# If description is not in metadata, fetch from CivitAI
|
||||
if not description:
|
||||
logger.info(f"Fetching model metadata for model ID: {model_id}")
|
||||
model_metadata, _ = await self.civitai_client.get_model_metadata(model_id)
|
||||
|
||||
if model_metadata:
|
||||
if (model_metadata):
|
||||
description = model_metadata.get('description')
|
||||
tags = model_metadata.get('tags', [])
|
||||
|
||||
@@ -936,16 +808,14 @@ class ApiRoutes:
|
||||
if file_path:
|
||||
try:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
if os.path.exists(metadata_path):
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
metadata['modelDescription'] = description
|
||||
metadata['tags'] = tags
|
||||
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
logger.info(f"Saved model metadata to file for {file_path}")
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
metadata['modelDescription'] = description
|
||||
metadata['tags'] = tags
|
||||
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
logger.info(f"Saved model metadata to file for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving model metadata: {e}")
|
||||
|
||||
@@ -956,7 +826,7 @@ class ApiRoutes:
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model metadata: {e}", exc_info=True)
|
||||
logger.error(f"Error getting model metadata: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
@@ -965,6 +835,9 @@ class ApiRoutes:
|
||||
async def get_top_tags(self, request: web.Request) -> web.Response:
|
||||
"""Handle request for top tags sorted by frequency"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
# Parse query parameters
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
|
||||
@@ -990,6 +863,9 @@ class ApiRoutes:
|
||||
async def get_base_models(self, request: web.Request) -> web.Response:
|
||||
"""Get base models used in loras"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
# Parse query parameters
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
|
||||
@@ -1011,15 +887,15 @@ class ApiRoutes:
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
def get_multipart_ext(self, filename):
|
||||
parts = filename.split(".")
|
||||
if len(parts) > 2: # 如果包含多级扩展名
|
||||
return "." + ".".join(parts[-2:]) # 取最后两部分,如 ".metadata.json"
|
||||
return os.path.splitext(filename)[1] # 否则取普通扩展名,如 ".safetensors"
|
||||
|
||||
async def rename_lora(self, request: web.Request) -> web.Response:
|
||||
"""Handle renaming a LoRA file and its associated files"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
if self.download_manager is None:
|
||||
self.download_manager = await ServiceRegistry.get_download_manager()
|
||||
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
new_file_name = data.get('new_file_name')
|
||||
@@ -1054,18 +930,12 @@ class ApiRoutes:
|
||||
patterns = [
|
||||
f"{old_file_name}.safetensors", # Required
|
||||
f"{old_file_name}.metadata.json",
|
||||
f"{old_file_name}.preview.png",
|
||||
f"{old_file_name}.preview.jpg",
|
||||
f"{old_file_name}.preview.jpeg",
|
||||
f"{old_file_name}.preview.webp",
|
||||
f"{old_file_name}.preview.mp4",
|
||||
f"{old_file_name}.png",
|
||||
f"{old_file_name}.jpg",
|
||||
f"{old_file_name}.jpeg",
|
||||
f"{old_file_name}.webp",
|
||||
f"{old_file_name}.mp4"
|
||||
]
|
||||
|
||||
# Add all preview file extensions
|
||||
for ext in PREVIEW_EXTENSIONS:
|
||||
patterns.append(f"{old_file_name}{ext}")
|
||||
|
||||
# Find all matching files
|
||||
existing_files = []
|
||||
for pattern in patterns:
|
||||
@@ -1079,12 +949,8 @@ class ApiRoutes:
|
||||
metadata_path = os.path.join(target_dir, f"{old_file_name}.metadata.json")
|
||||
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
hash_value = metadata.get('sha256')
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metadata for rename: {e}")
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
hash_value = metadata.get('sha256')
|
||||
|
||||
# Rename all files
|
||||
renamed_files = []
|
||||
@@ -1092,15 +958,18 @@ class ApiRoutes:
|
||||
|
||||
# Notify file monitor to ignore these events
|
||||
main_file_path = os.path.join(target_dir, f"{old_file_name}.safetensors")
|
||||
if os.path.exists(main_file_path) and self.download_manager.file_monitor:
|
||||
# Add old and new paths to ignore list
|
||||
file_size = os.path.getsize(main_file_path)
|
||||
self.download_manager.file_monitor.handler.add_ignore_path(main_file_path, file_size)
|
||||
self.download_manager.file_monitor.handler.add_ignore_path(new_file_path, file_size)
|
||||
if os.path.exists(main_file_path):
|
||||
# Get lora monitor through ServiceRegistry instead of download_manager
|
||||
lora_monitor = await ServiceRegistry.get_lora_monitor()
|
||||
if lora_monitor:
|
||||
# Add old and new paths to ignore list
|
||||
file_size = os.path.getsize(main_file_path)
|
||||
lora_monitor.handler.add_ignore_path(main_file_path, file_size)
|
||||
lora_monitor.handler.add_ignore_path(new_file_path, file_size)
|
||||
|
||||
for old_path, pattern in existing_files:
|
||||
# Get the file extension like .safetensors or .metadata.json
|
||||
ext = self.get_multipart_ext(pattern)
|
||||
ext = ModelRouteUtils.get_multipart_ext(pattern)
|
||||
|
||||
# Create the new path
|
||||
new_path = os.path.join(target_dir, f"{new_file_name}{ext}").replace(os.sep, '/')
|
||||
@@ -1122,7 +991,7 @@ class ApiRoutes:
|
||||
# Update preview_url if it exists
|
||||
if 'preview_url' in metadata and metadata['preview_url']:
|
||||
old_preview = metadata['preview_url']
|
||||
ext = self.get_multipart_ext(old_preview)
|
||||
ext = ModelRouteUtils.get_multipart_ext(old_preview)
|
||||
new_preview = os.path.join(target_dir, f"{new_file_name}{ext}").replace(os.sep, '/')
|
||||
metadata['preview_url'] = new_preview
|
||||
|
||||
@@ -1132,11 +1001,11 @@ class ApiRoutes:
|
||||
|
||||
# Update the scanner cache
|
||||
if metadata:
|
||||
await self.scanner.update_single_lora_cache(file_path, new_file_path, metadata)
|
||||
await self.scanner.update_single_model_cache(file_path, new_file_path, metadata)
|
||||
|
||||
# Update recipe files and cache if hash is available
|
||||
if hash_value:
|
||||
recipe_scanner = RecipeScanner(self.scanner)
|
||||
recipe_scanner = await ServiceRegistry.get_recipe_scanner()
|
||||
recipes_updated, cache_updated = await recipe_scanner.update_lora_filename_by_hash(hash_value, new_file_name)
|
||||
logger.info(f"Updated {recipes_updated} recipe files and {cache_updated} cache entries for renamed LoRA")
|
||||
|
||||
|
||||
@@ -1,37 +1,483 @@
|
||||
import os
|
||||
from aiohttp import web
|
||||
import json
|
||||
import jinja2
|
||||
from aiohttp import web
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..utils.constants import NSFW_LEVELS
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings
|
||||
from ..utils.utils import fuzzy_match
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
|
||||
|
||||
class CheckpointsRoutes:
|
||||
"""Route handlers for Checkpoints management endpoints"""
|
||||
"""API routes for checkpoint management"""
|
||||
|
||||
def __init__(self):
|
||||
self.scanner = None # Will be initialized in setup_routes
|
||||
self.template_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
self.download_manager = None # Will be initialized in setup_routes
|
||||
self._download_lock = asyncio.Lock()
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.download_manager = await ServiceRegistry.get_download_manager()
|
||||
|
||||
def setup_routes(self, app):
|
||||
"""Register routes with the aiohttp app"""
|
||||
# Schedule service initialization on app startup
|
||||
app.on_startup.append(lambda _: self.initialize_services())
|
||||
|
||||
app.router.add_get('/checkpoints', self.handle_checkpoints_page)
|
||||
app.router.add_get('/api/checkpoints', self.get_checkpoints)
|
||||
app.router.add_post('/api/checkpoints/fetch-all-civitai', self.fetch_all_civitai)
|
||||
app.router.add_get('/api/checkpoints/base-models', self.get_base_models)
|
||||
app.router.add_get('/api/checkpoints/top-tags', self.get_top_tags)
|
||||
app.router.add_get('/api/checkpoints/scan', self.scan_checkpoints)
|
||||
app.router.add_get('/api/checkpoints/info/{name}', self.get_checkpoint_info)
|
||||
app.router.add_get('/api/checkpoints/roots', self.get_checkpoint_roots)
|
||||
app.router.add_get('/api/checkpoints/civitai/versions/{model_id}', self.get_civitai_versions) # Add new route
|
||||
|
||||
# Add new routes for model management similar to LoRA routes
|
||||
app.router.add_post('/api/checkpoints/delete', self.delete_model)
|
||||
app.router.add_post('/api/checkpoints/fetch-civitai', self.fetch_civitai)
|
||||
app.router.add_post('/api/checkpoints/replace-preview', self.replace_preview)
|
||||
app.router.add_post('/api/checkpoints/download', self.download_checkpoint)
|
||||
app.router.add_post('/api/checkpoints/save-metadata', self.save_metadata) # Add new route
|
||||
|
||||
# Add new WebSocket endpoint for checkpoint progress
|
||||
app.router.add_get('/ws/checkpoint-progress', ws_manager.handle_checkpoint_connection)
|
||||
|
||||
async def get_checkpoints(self, request):
|
||||
"""Get paginated checkpoint data"""
|
||||
try:
|
||||
# Parse query parameters
|
||||
page = int(request.query.get('page', '1'))
|
||||
page_size = min(int(request.query.get('page_size', '20')), 100)
|
||||
sort_by = request.query.get('sort_by', 'name')
|
||||
folder = request.query.get('folder', None)
|
||||
search = request.query.get('search', None)
|
||||
fuzzy_search = request.query.get('fuzzy_search', 'false').lower() == 'true'
|
||||
base_models = request.query.getall('base_model', [])
|
||||
tags = request.query.getall('tag', [])
|
||||
|
||||
# Process search options
|
||||
search_options = {
|
||||
'filename': request.query.get('search_filename', 'true').lower() == 'true',
|
||||
'modelname': request.query.get('search_modelname', 'true').lower() == 'true',
|
||||
'tags': request.query.get('search_tags', 'false').lower() == 'true',
|
||||
'recursive': request.query.get('recursive', 'false').lower() == 'true',
|
||||
}
|
||||
|
||||
# Process hash filters if provided
|
||||
hash_filters = {}
|
||||
if 'hash' in request.query:
|
||||
hash_filters['single_hash'] = request.query['hash']
|
||||
elif 'hashes' in request.query:
|
||||
try:
|
||||
hash_list = json.loads(request.query['hashes'])
|
||||
if isinstance(hash_list, list):
|
||||
hash_filters['multiple_hashes'] = hash_list
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
# Get data from scanner
|
||||
result = await self.get_paginated_data(
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
sort_by=sort_by,
|
||||
folder=folder,
|
||||
search=search,
|
||||
fuzzy_search=fuzzy_search,
|
||||
base_models=base_models,
|
||||
tags=tags,
|
||||
search_options=search_options,
|
||||
hash_filters=hash_filters
|
||||
)
|
||||
|
||||
# Format response items
|
||||
formatted_result = {
|
||||
'items': [self._format_checkpoint_response(cp) for cp in result['items']],
|
||||
'total': result['total'],
|
||||
'page': result['page'],
|
||||
'page_size': result['page_size'],
|
||||
'total_pages': result['total_pages']
|
||||
}
|
||||
|
||||
# Return as JSON
|
||||
return web.json_response(formatted_result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_checkpoints: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_paginated_data(self, page, page_size, sort_by='name',
|
||||
folder=None, search=None, fuzzy_search=False,
|
||||
base_models=None, tags=None,
|
||||
search_options=None, hash_filters=None):
|
||||
"""Get paginated and filtered checkpoint data"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
# Get default search options if not provided
|
||||
if search_options is None:
|
||||
search_options = {
|
||||
'filename': True,
|
||||
'modelname': True,
|
||||
'tags': False,
|
||||
'recursive': False,
|
||||
}
|
||||
|
||||
# Get the base data set
|
||||
filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name
|
||||
|
||||
# Apply hash filtering if provided (highest priority)
|
||||
if hash_filters:
|
||||
single_hash = hash_filters.get('single_hash')
|
||||
multiple_hashes = hash_filters.get('multiple_hashes')
|
||||
|
||||
if single_hash:
|
||||
# Filter by single hash
|
||||
single_hash = single_hash.lower() # Ensure lowercase for matching
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if cp.get('sha256', '').lower() == single_hash
|
||||
]
|
||||
elif multiple_hashes:
|
||||
# Filter by multiple hashes
|
||||
hash_set = set(hash.lower() for hash in multiple_hashes) # Convert to set for faster lookup
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if cp.get('sha256', '').lower() in hash_set
|
||||
]
|
||||
|
||||
# Jump to pagination
|
||||
total_items = len(filtered_data)
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = min(start_idx + page_size, total_items)
|
||||
|
||||
result = {
|
||||
'items': filtered_data[start_idx:end_idx],
|
||||
'total': total_items,
|
||||
'page': page,
|
||||
'page_size': page_size,
|
||||
'total_pages': (total_items + page_size - 1) // page_size
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
# Apply SFW filtering if enabled in settings
|
||||
if settings.get('show_only_sfw', False):
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if not cp.get('preview_nsfw_level') or cp.get('preview_nsfw_level') < NSFW_LEVELS['R']
|
||||
]
|
||||
|
||||
# Apply folder filtering
|
||||
if folder is not None:
|
||||
if search_options.get('recursive', False):
|
||||
# Recursive folder filtering - include all subfolders
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if cp['folder'].startswith(folder)
|
||||
]
|
||||
else:
|
||||
# Exact folder filtering
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if cp['folder'] == folder
|
||||
]
|
||||
|
||||
# Apply base model filtering
|
||||
if base_models and len(base_models) > 0:
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if cp.get('base_model') in base_models
|
||||
]
|
||||
|
||||
# Apply tag filtering
|
||||
if tags and len(tags) > 0:
|
||||
filtered_data = [
|
||||
cp for cp in filtered_data
|
||||
if any(tag in cp.get('tags', []) for tag in tags)
|
||||
]
|
||||
|
||||
# Apply search filtering
|
||||
if search:
|
||||
search_results = []
|
||||
|
||||
for cp in filtered_data:
|
||||
# Search by file name
|
||||
if search_options.get('filename', True):
|
||||
if fuzzy_search:
|
||||
if fuzzy_match(cp.get('file_name', ''), search):
|
||||
search_results.append(cp)
|
||||
continue
|
||||
elif search.lower() in cp.get('file_name', '').lower():
|
||||
search_results.append(cp)
|
||||
continue
|
||||
|
||||
# Search by model name
|
||||
if search_options.get('modelname', True):
|
||||
if fuzzy_search:
|
||||
if fuzzy_match(cp.get('model_name', ''), search):
|
||||
search_results.append(cp)
|
||||
continue
|
||||
elif search.lower() in cp.get('model_name', '').lower():
|
||||
search_results.append(cp)
|
||||
continue
|
||||
|
||||
# Search by tags
|
||||
if search_options.get('tags', False) and 'tags' in cp:
|
||||
if any((fuzzy_match(tag, search) if fuzzy_search else search.lower() in tag.lower()) for tag in cp['tags']):
|
||||
search_results.append(cp)
|
||||
continue
|
||||
|
||||
filtered_data = search_results
|
||||
|
||||
# Calculate pagination
|
||||
total_items = len(filtered_data)
|
||||
start_idx = (page - 1) * page_size
|
||||
end_idx = min(start_idx + page_size, total_items)
|
||||
|
||||
result = {
|
||||
'items': filtered_data[start_idx:end_idx],
|
||||
'total': total_items,
|
||||
'page': page,
|
||||
'page_size': page_size,
|
||||
'total_pages': (total_items + page_size - 1) // page_size
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def _format_checkpoint_response(self, checkpoint):
|
||||
"""Format checkpoint data for API response"""
|
||||
return {
|
||||
"model_name": checkpoint["model_name"],
|
||||
"file_name": checkpoint["file_name"],
|
||||
"preview_url": config.get_preview_static_url(checkpoint.get("preview_url", "")),
|
||||
"preview_nsfw_level": checkpoint.get("preview_nsfw_level", 0),
|
||||
"base_model": checkpoint.get("base_model", ""),
|
||||
"folder": checkpoint["folder"],
|
||||
"sha256": checkpoint.get("sha256", ""),
|
||||
"file_path": checkpoint["file_path"].replace(os.sep, "/"),
|
||||
"file_size": checkpoint.get("size", 0),
|
||||
"modified": checkpoint.get("modified", ""),
|
||||
"tags": checkpoint.get("tags", []),
|
||||
"modelDescription": checkpoint.get("modelDescription", ""),
|
||||
"from_civitai": checkpoint.get("from_civitai", True),
|
||||
"notes": checkpoint.get("notes", ""),
|
||||
"model_type": checkpoint.get("model_type", "checkpoint"),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(checkpoint.get("civitai", {}))
|
||||
}
|
||||
|
||||
async def fetch_all_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Fetch CivitAI metadata for all checkpoints in the background"""
|
||||
try:
|
||||
cache = await self.scanner.get_cached_data()
|
||||
total = len(cache.raw_data)
|
||||
processed = 0
|
||||
success = 0
|
||||
needs_resort = False
|
||||
|
||||
# Prepare checkpoints to process
|
||||
to_process = [
|
||||
cp for cp in cache.raw_data
|
||||
if cp.get('sha256') and (not cp.get('civitai') or 'id' not in cp.get('civitai')) and cp.get('from_civitai', True)
|
||||
]
|
||||
total_to_process = len(to_process)
|
||||
|
||||
# Send initial progress
|
||||
await ws_manager.broadcast({
|
||||
'status': 'started',
|
||||
'total': total_to_process,
|
||||
'processed': 0,
|
||||
'success': 0
|
||||
})
|
||||
|
||||
# Process each checkpoint
|
||||
for cp in to_process:
|
||||
try:
|
||||
original_name = cp.get('model_name')
|
||||
if await ModelRouteUtils.fetch_and_update_model(
|
||||
sha256=cp['sha256'],
|
||||
file_path=cp['file_path'],
|
||||
model_data=cp,
|
||||
update_cache_func=self.scanner.update_single_model_cache
|
||||
):
|
||||
success += 1
|
||||
if original_name != cp.get('model_name'):
|
||||
needs_resort = True
|
||||
|
||||
processed += 1
|
||||
|
||||
# Send progress update
|
||||
await ws_manager.broadcast({
|
||||
'status': 'processing',
|
||||
'total': total_to_process,
|
||||
'processed': processed,
|
||||
'success': success,
|
||||
'current_name': cp.get('model_name', 'Unknown')
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivitAI data for {cp['file_path']}: {e}")
|
||||
|
||||
if needs_resort:
|
||||
await cache.resort(name_only=True)
|
||||
|
||||
# Send completion message
|
||||
await ws_manager.broadcast({
|
||||
'status': 'completed',
|
||||
'total': total_to_process,
|
||||
'processed': processed,
|
||||
'success': success
|
||||
})
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"message": f"Successfully updated {success} of {processed} processed checkpoints (total: {total})"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
# Send error message
|
||||
await ws_manager.broadcast({
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
})
|
||||
logger.error(f"Error in fetch_all_civitai for checkpoints: {e}")
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def get_top_tags(self, request: web.Request) -> web.Response:
|
||||
"""Handle request for top tags sorted by frequency"""
|
||||
try:
|
||||
# Parse query parameters
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
|
||||
# Validate limit
|
||||
if limit < 1 or limit > 100:
|
||||
limit = 20 # Default to a reasonable limit
|
||||
|
||||
# Get top tags
|
||||
top_tags = await self.scanner.get_top_tags(limit)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'tags': top_tags
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting top tags: {str(e)}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Internal server error'
|
||||
}, status=500)
|
||||
|
||||
async def get_base_models(self, request: web.Request) -> web.Response:
|
||||
"""Get base models used in loras"""
|
||||
try:
|
||||
# Parse query parameters
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
|
||||
# Validate limit
|
||||
if limit < 1 or limit > 100:
|
||||
limit = 20 # Default to a reasonable limit
|
||||
|
||||
# Get base models
|
||||
base_models = await self.scanner.get_base_models(limit)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'base_models': base_models
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving base models: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def scan_checkpoints(self, request):
|
||||
"""Force a rescan of checkpoint files"""
|
||||
try:
|
||||
await self.scanner.get_cached_data(force_refresh=True)
|
||||
return web.json_response({"status": "success", "message": "Checkpoint scan completed"})
|
||||
except Exception as e:
|
||||
logger.error(f"Error in scan_checkpoints: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_checkpoint_info(self, request):
|
||||
"""Get detailed information for a specific checkpoint by name"""
|
||||
try:
|
||||
name = request.match_info.get('name', '')
|
||||
checkpoint_info = await self.scanner.get_checkpoint_info_by_name(name)
|
||||
|
||||
if checkpoint_info:
|
||||
return web.json_response(checkpoint_info)
|
||||
else:
|
||||
return web.json_response({"error": "Checkpoint not found"}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_checkpoint_info: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def handle_checkpoints_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /checkpoints request"""
|
||||
try:
|
||||
template = self.template_env.get_template('checkpoints.html')
|
||||
rendered = template.render(
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request
|
||||
# Check if the CheckpointScanner is initializing
|
||||
# It's initializing if the cache object doesn't exist yet,
|
||||
# OR if the scanner explicitly says it's initializing (background task running).
|
||||
is_initializing = (
|
||||
self.scanner._cache is None or
|
||||
(hasattr(self.scanner, '_is_initializing') and self.scanner._is_initializing)
|
||||
)
|
||||
|
||||
if is_initializing:
|
||||
# If still initializing, return loading page
|
||||
template = self.template_env.get_template('checkpoints.html')
|
||||
rendered = template.render(
|
||||
folders=[], # 空文件夹列表
|
||||
is_initializing=True, # 新增标志
|
||||
settings=settings, # Pass settings to template
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
|
||||
logger.info("Checkpoints page is initializing, returning loading page")
|
||||
else:
|
||||
# 正常流程 - 获取已经初始化好的缓存数据
|
||||
try:
|
||||
cache = await self.scanner.get_cached_data(force_refresh=False)
|
||||
template = self.template_env.get_template('checkpoints.html')
|
||||
rendered = template.render(
|
||||
folders=cache.folders,
|
||||
is_initializing=False,
|
||||
settings=settings, # Pass settings to template
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading checkpoints cache data: {cache_error}")
|
||||
# 如果获取缓存失败,也显示初始化页面
|
||||
template = self.template_env.get_template('checkpoints.html')
|
||||
rendered = template.render(
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
logger.info("Checkpoints cache error, returning initialization page")
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling checkpoints request: {e}", exc_info=True)
|
||||
return web.Response(
|
||||
@@ -39,6 +485,194 @@ class CheckpointsRoutes:
|
||||
status=500
|
||||
)
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Register routes with the application"""
|
||||
app.router.add_get('/checkpoints', self.handle_checkpoints_page)
|
||||
async def delete_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle checkpoint model deletion request"""
|
||||
return await ModelRouteUtils.handle_delete_model(request, self.scanner)
|
||||
|
||||
async def fetch_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Handle CivitAI metadata fetch request for checkpoints"""
|
||||
return await ModelRouteUtils.handle_fetch_civitai(request, self.scanner)
|
||||
|
||||
async def replace_preview(self, request: web.Request) -> web.Response:
|
||||
"""Handle preview image replacement for checkpoints"""
|
||||
return await ModelRouteUtils.handle_replace_preview(request, self.scanner)
|
||||
|
||||
async def download_checkpoint(self, request: web.Request) -> web.Response:
|
||||
"""Handle checkpoint download request"""
|
||||
async with self._download_lock:
|
||||
# Get the download manager from service registry if not already initialized
|
||||
if self.download_manager is None:
|
||||
self.download_manager = await ServiceRegistry.get_download_manager()
|
||||
|
||||
try:
|
||||
data = await request.json()
|
||||
|
||||
# Create progress callback that uses checkpoint-specific WebSocket
|
||||
async def progress_callback(progress):
|
||||
await ws_manager.broadcast_checkpoint_progress({
|
||||
'status': 'progress',
|
||||
'progress': progress
|
||||
})
|
||||
|
||||
# Check which identifier is provided
|
||||
download_url = data.get('download_url')
|
||||
model_hash = data.get('model_hash')
|
||||
model_version_id = data.get('model_version_id')
|
||||
|
||||
# Validate that at least one identifier is provided
|
||||
if not any([download_url, model_hash, model_version_id]):
|
||||
return web.Response(
|
||||
status=400,
|
||||
text="Missing required parameter: Please provide either 'download_url', 'hash', or 'modelVersionId'"
|
||||
)
|
||||
|
||||
result = await self.download_manager.download_from_civitai(
|
||||
download_url=download_url,
|
||||
model_hash=model_hash,
|
||||
model_version_id=model_version_id,
|
||||
save_dir=data.get('checkpoint_root'),
|
||||
relative_path=data.get('relative_path', ''),
|
||||
progress_callback=progress_callback,
|
||||
model_type="checkpoint"
|
||||
)
|
||||
|
||||
if not result.get('success', False):
|
||||
error_message = result.get('error', 'Unknown error')
|
||||
|
||||
# Return 401 for early access errors
|
||||
if 'early access' in error_message.lower():
|
||||
logger.warning(f"Early access download failed: {error_message}")
|
||||
return web.Response(
|
||||
status=401,
|
||||
text=f"Early Access Restriction: {error_message}"
|
||||
)
|
||||
|
||||
return web.Response(status=500, text=error_message)
|
||||
|
||||
return web.json_response(result)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
|
||||
# Check if this might be an early access error
|
||||
if '401' in error_message:
|
||||
logger.warning(f"Early access error (401): {error_message}")
|
||||
return web.Response(
|
||||
status=401,
|
||||
text="Early Access Restriction: This model requires purchase. Please ensure you have purchased early access and are logged in to Civitai."
|
||||
)
|
||||
|
||||
logger.error(f"Error downloading checkpoint: {error_message}")
|
||||
return web.Response(status=500, text=error_message)
|
||||
|
||||
async def get_checkpoint_roots(self, request):
|
||||
"""Return the checkpoint root directories"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
roots = self.scanner.get_model_roots()
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"roots": roots
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting checkpoint roots: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def save_metadata(self, request: web.Request) -> web.Response:
|
||||
"""Handle saving metadata updates for checkpoints"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
if not file_path:
|
||||
return web.Response(text='File path is required', status=400)
|
||||
|
||||
# Remove file path from data to avoid saving it
|
||||
metadata_updates = {k: v for k, v in data.items() if k != 'file_path'}
|
||||
|
||||
# Get metadata file path
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Load existing metadata
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Update metadata
|
||||
metadata.update(metadata_updates)
|
||||
|
||||
# Save updated metadata
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Update cache
|
||||
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||
|
||||
# If model_name was updated, resort the cache
|
||||
if 'model_name' in metadata_updates:
|
||||
cache = await self.scanner.get_cached_data()
|
||||
await cache.resort(name_only=True)
|
||||
|
||||
return web.json_response({'success': True})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving checkpoint metadata: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def get_civitai_versions(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai checkpoint model with local availability info"""
|
||||
try:
|
||||
if self.scanner is None:
|
||||
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
# Get the civitai client from service registry
|
||||
civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
model_id = request.match_info['model_id']
|
||||
response = await civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be Checkpoint
|
||||
if model_type.lower() != 'checkpoint':
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
# If no primary file found, try to find any model file
|
||||
if not model_file:
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.scanner.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.scanner.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching checkpoint model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
import os
|
||||
from aiohttp import web
|
||||
import jinja2
|
||||
from typing import Dict, List
|
||||
from typing import Dict
|
||||
import logging
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..services.recipe_scanner import RecipeScanner
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings # Add this import
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.service_registry import ServiceRegistry # Add ServiceRegistry import
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
|
||||
@@ -15,13 +14,19 @@ class LoraRoutes:
|
||||
"""Route handlers for LoRA management endpoints"""
|
||||
|
||||
def __init__(self):
|
||||
self.scanner = LoraScanner()
|
||||
self.recipe_scanner = RecipeScanner(self.scanner)
|
||||
# Initialize service references as None, will be set during async init
|
||||
self.scanner = None
|
||||
self.recipe_scanner = None
|
||||
self.template_env = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
async def init_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
self.scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.recipe_scanner = await ServiceRegistry.get_recipe_scanner()
|
||||
|
||||
def format_lora_data(self, lora: Dict) -> Dict:
|
||||
"""Format LoRA data for template rendering"""
|
||||
return {
|
||||
@@ -58,41 +63,41 @@ class LoraRoutes:
|
||||
async def handle_loras_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /loras request"""
|
||||
try:
|
||||
# 检查缓存初始化状态,增强判断条件
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Check if the LoraScanner is initializing
|
||||
# It's initializing if the cache object doesn't exist yet,
|
||||
# OR if the scanner explicitly says it's initializing (background task running).
|
||||
is_initializing = (
|
||||
self.scanner._cache is None or
|
||||
(self.scanner._initialization_task is not None and
|
||||
not self.scanner._initialization_task.done()) or
|
||||
(self.scanner._cache is not None and len(self.scanner._cache.raw_data) == 0 and
|
||||
self.scanner._initialization_task is not None)
|
||||
self.scanner._cache is None or
|
||||
(hasattr(self.scanner, '_is_initializing') and self.scanner._is_initializing)
|
||||
)
|
||||
|
||||
if is_initializing:
|
||||
# 如果正在初始化,返回一个只包含加载提示的页面
|
||||
# If still initializing, return loading page
|
||||
template = self.template_env.get_template('loras.html')
|
||||
rendered = template.render(
|
||||
folders=[], # 空文件夹列表
|
||||
is_initializing=True, # 新增标志
|
||||
settings=settings, # Pass settings to template
|
||||
request=request # Pass the request object to the template
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
|
||||
logger.info("Loras page is initializing, returning loading page")
|
||||
else:
|
||||
# 正常流程 - 但不要等待缓存刷新
|
||||
# Normal flow - get data from initialized cache
|
||||
try:
|
||||
cache = await self.scanner.get_cached_data(force_refresh=False)
|
||||
template = self.template_env.get_template('loras.html')
|
||||
rendered = template.render(
|
||||
folders=cache.folders,
|
||||
is_initializing=False,
|
||||
settings=settings, # Pass settings to template
|
||||
request=request # Pass the request object to the template
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
logger.debug(f"Loras page loaded successfully with {len(cache.raw_data)} items")
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading cache data: {cache_error}")
|
||||
# 如果获取缓存失败,也显示初始化页面
|
||||
template = self.template_env.get_template('loras.html')
|
||||
rendered = template.render(
|
||||
folders=[],
|
||||
@@ -117,32 +122,30 @@ class LoraRoutes:
|
||||
async def handle_recipes_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /loras/recipes request"""
|
||||
try:
|
||||
# Check cache initialization status
|
||||
is_initializing = (
|
||||
self.recipe_scanner._cache is None and
|
||||
(self.recipe_scanner._initialization_task is not None and
|
||||
not self.recipe_scanner._initialization_task.done())
|
||||
)
|
||||
|
||||
if is_initializing:
|
||||
# If initializing, return a loading page
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Skip initialization check and directly try to get cached data
|
||||
try:
|
||||
# Recipe scanner will initialize cache if needed
|
||||
await self.recipe_scanner.get_cached_data(force_refresh=False)
|
||||
template = self.template_env.get_template('recipes.html')
|
||||
rendered = template.render(
|
||||
recipes=[], # Frontend will load recipes via API
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading recipe cache data: {cache_error}")
|
||||
# Still keep error handling - show initializing page on error
|
||||
template = self.template_env.get_template('recipes.html')
|
||||
rendered = template.render(
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request # Pass the request object to the template
|
||||
)
|
||||
else:
|
||||
# return empty recipes
|
||||
recipes_data = []
|
||||
|
||||
template = self.template_env.get_template('recipes.html')
|
||||
rendered = template.render(
|
||||
recipes=recipes_data,
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request # Pass the request object to the template
|
||||
request=request
|
||||
)
|
||||
logger.info("Recipe cache error, returning initialization page")
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
@@ -174,5 +177,13 @@ class LoraRoutes:
|
||||
|
||||
def setup_routes(self, app: web.Application):
|
||||
"""Register routes with the application"""
|
||||
# Add an app startup handler to initialize services
|
||||
app.on_startup.append(self._on_startup)
|
||||
|
||||
# Register routes
|
||||
app.router.add_get('/loras', self.handle_loras_page)
|
||||
app.router.add_get('/loras/recipes', self.handle_recipes_page)
|
||||
|
||||
async def _on_startup(self, app):
|
||||
"""Initialize services when the app starts"""
|
||||
await self.init_services()
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
import os
|
||||
import time
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import torch
|
||||
import io
|
||||
import logging
|
||||
from aiohttp import web
|
||||
from typing import Dict
|
||||
@@ -8,13 +12,14 @@ import json
|
||||
import asyncio
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.recipe_parsers import RecipeParserFactory
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
from ..utils.constants import CARD_PREVIEW_WIDTH
|
||||
|
||||
from ..services.recipe_scanner import RecipeScanner
|
||||
from ..services.lora_scanner import LoraScanner
|
||||
from ..config import config
|
||||
from ..workflow.parser import WorkflowParser
|
||||
from ..metadata_collector import get_metadata # Add MetadataCollector import
|
||||
from ..metadata_collector.metadata_processor import MetadataProcessor # Add MetadataProcessor import
|
||||
from ..utils.utils import download_civitai_image
|
||||
from ..services.service_registry import ServiceRegistry # Add ServiceRegistry import
|
||||
from ..metadata_collector.metadata_registry import MetadataRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -22,13 +27,19 @@ class RecipeRoutes:
|
||||
"""API route handlers for Recipe management"""
|
||||
|
||||
def __init__(self):
|
||||
self.recipe_scanner = RecipeScanner(LoraScanner())
|
||||
self.civitai_client = CivitaiClient()
|
||||
self.parser = WorkflowParser()
|
||||
# Initialize service references as None, will be set during async init
|
||||
self.recipe_scanner = None
|
||||
self.civitai_client = None
|
||||
# Remove WorkflowParser instance
|
||||
|
||||
# Pre-warm the cache
|
||||
self._init_cache_task = None
|
||||
|
||||
async def init_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
self.recipe_scanner = await ServiceRegistry.get_recipe_scanner()
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
@classmethod
|
||||
def setup_routes(cls, app: web.Application):
|
||||
"""Register API routes"""
|
||||
@@ -67,7 +78,10 @@ class RecipeRoutes:
|
||||
async def _init_cache(self, app):
|
||||
"""Initialize cache on startup"""
|
||||
try:
|
||||
# First, ensure the lora scanner is fully initialized
|
||||
# Initialize services first
|
||||
await self.init_services()
|
||||
|
||||
# Now that services are initialized, get the lora scanner
|
||||
lora_scanner = self.recipe_scanner._lora_scanner
|
||||
|
||||
# Get lora cache to ensure it's initialized
|
||||
@@ -85,6 +99,9 @@ class RecipeRoutes:
|
||||
async def get_recipes(self, request: web.Request) -> web.Response:
|
||||
"""API endpoint for getting paginated recipes"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Get query parameters with defaults
|
||||
page = int(request.query.get('page', '1'))
|
||||
page_size = int(request.query.get('page_size', '20'))
|
||||
@@ -154,6 +171,9 @@ class RecipeRoutes:
|
||||
async def get_recipe_detail(self, request: web.Request) -> web.Response:
|
||||
"""Get detailed information about a specific recipe"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
recipe_id = request.match_info['recipe_id']
|
||||
|
||||
# Use the new get_recipe_by_id method from recipe_scanner
|
||||
@@ -207,6 +227,9 @@ class RecipeRoutes:
|
||||
"""Analyze an uploaded image or URL for recipe metadata"""
|
||||
temp_path = None
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Check if request contains multipart data (image) or JSON data (url)
|
||||
content_type = request.headers.get('Content-Type', '')
|
||||
|
||||
@@ -325,6 +348,9 @@ class RecipeRoutes:
|
||||
async def save_recipe(self, request: web.Request) -> web.Response:
|
||||
"""Save a recipe to the recipes folder"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
reader = await request.multipart()
|
||||
|
||||
# Process form data
|
||||
@@ -424,7 +450,7 @@ class RecipeRoutes:
|
||||
# Optimize the image (resize and convert to WebP)
|
||||
optimized_image, extension = ExifUtils.optimize_image(
|
||||
image_data=image,
|
||||
target_width=480,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=True
|
||||
@@ -526,6 +552,9 @@ class RecipeRoutes:
|
||||
async def delete_recipe(self, request: web.Request) -> web.Response:
|
||||
"""Delete a recipe by ID"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
recipe_id = request.match_info['recipe_id']
|
||||
|
||||
# Get recipes directory
|
||||
@@ -573,6 +602,9 @@ class RecipeRoutes:
|
||||
async def get_top_tags(self, request: web.Request) -> web.Response:
|
||||
"""Get top tags used in recipes"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Get limit parameter with default
|
||||
limit = int(request.query.get('limit', '20'))
|
||||
|
||||
@@ -605,6 +637,9 @@ class RecipeRoutes:
|
||||
async def get_base_models(self, request: web.Request) -> web.Response:
|
||||
"""Get base models used in recipes"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Get all recipes from cache
|
||||
cache = await self.recipe_scanner.get_cached_data()
|
||||
|
||||
@@ -627,12 +662,15 @@ class RecipeRoutes:
|
||||
logger.error(f"Error retrieving base models: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
'error': str(e)}
|
||||
, status=500)
|
||||
|
||||
async def share_recipe(self, request: web.Request) -> web.Response:
|
||||
"""Process a recipe image for sharing by adding metadata to EXIF"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
recipe_id = request.match_info['recipe_id']
|
||||
|
||||
# Get all recipes from cache
|
||||
@@ -692,6 +730,9 @@ class RecipeRoutes:
|
||||
async def download_shared_recipe(self, request: web.Request) -> web.Response:
|
||||
"""Serve a processed recipe image for download"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
recipe_id = request.match_info['recipe_id']
|
||||
|
||||
# Check if we have this shared recipe
|
||||
@@ -748,50 +789,75 @@ class RecipeRoutes:
|
||||
async def save_recipe_from_widget(self, request: web.Request) -> web.Response:
|
||||
"""Save a recipe from the LoRAs widget"""
|
||||
try:
|
||||
reader = await request.multipart()
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Process form data
|
||||
workflow_json = None
|
||||
# Get metadata using the metadata collector instead of workflow parsing
|
||||
raw_metadata = get_metadata()
|
||||
metadata_dict = MetadataProcessor.to_dict(raw_metadata)
|
||||
|
||||
while True:
|
||||
field = await reader.next()
|
||||
if field is None:
|
||||
break
|
||||
# Check if we have valid metadata
|
||||
if not metadata_dict:
|
||||
return web.json_response({"error": "No generation metadata found"}, status=400)
|
||||
|
||||
# Get the most recent image from metadata registry instead of temp directory
|
||||
metadata_registry = MetadataRegistry()
|
||||
latest_image = metadata_registry.get_first_decoded_image()
|
||||
|
||||
if not latest_image:
|
||||
return web.json_response({"error": "No recent images found to use for recipe. Try generating an image first."}, status=400)
|
||||
|
||||
# Convert the image data to bytes - handle tuple and tensor cases
|
||||
logger.debug(f"Image type: {type(latest_image)}")
|
||||
|
||||
try:
|
||||
# Handle the tuple case first
|
||||
if isinstance(latest_image, tuple):
|
||||
# Extract the tensor from the tuple
|
||||
if len(latest_image) > 0:
|
||||
tensor_image = latest_image[0]
|
||||
else:
|
||||
return web.json_response({"error": "Empty image tuple received"}, status=400)
|
||||
else:
|
||||
tensor_image = latest_image
|
||||
|
||||
if field.name == 'workflow_json':
|
||||
workflow_text = await field.text()
|
||||
try:
|
||||
workflow_json = json.loads(workflow_text)
|
||||
except:
|
||||
return web.json_response({"error": "Invalid workflow JSON"}, status=400)
|
||||
# Get the shape info for debugging
|
||||
if hasattr(tensor_image, 'shape'):
|
||||
shape_info = tensor_image.shape
|
||||
logger.debug(f"Tensor shape: {shape_info}, dtype: {tensor_image.dtype}")
|
||||
|
||||
# Convert tensor to numpy array
|
||||
if isinstance(tensor_image, torch.Tensor):
|
||||
image_np = tensor_image.cpu().numpy()
|
||||
else:
|
||||
image_np = np.array(tensor_image)
|
||||
|
||||
# Handle different tensor shapes
|
||||
# Case: (1, 1, H, W, 3) or (1, H, W, 3) - batch or multi-batch
|
||||
if len(image_np.shape) > 3:
|
||||
# Remove batch dimensions until we get to (H, W, 3)
|
||||
while len(image_np.shape) > 3:
|
||||
image_np = image_np[0]
|
||||
|
||||
# If values are in [0, 1] range, convert to [0, 255]
|
||||
if image_np.dtype == np.float32 or image_np.dtype == np.float64:
|
||||
if image_np.max() <= 1.0:
|
||||
image_np = (image_np * 255).astype(np.uint8)
|
||||
|
||||
# Ensure image is in the right format (HWC with RGB channels)
|
||||
if len(image_np.shape) == 3 and image_np.shape[2] == 3:
|
||||
pil_image = Image.fromarray(image_np)
|
||||
img_byte_arr = io.BytesIO()
|
||||
pil_image.save(img_byte_arr, format='PNG')
|
||||
image = img_byte_arr.getvalue()
|
||||
else:
|
||||
return web.json_response({"error": f"Cannot handle this data shape: {image_np.shape}, {image_np.dtype}"}, status=400)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing image data: {str(e)}", exc_info=True)
|
||||
return web.json_response({"error": f"Error processing image: {str(e)}"}, status=400)
|
||||
|
||||
if not workflow_json:
|
||||
return web.json_response({"error": "Missing workflow JSON"}, status=400)
|
||||
|
||||
# Find the latest image in the temp directory
|
||||
temp_dir = config.temp_directory
|
||||
image_files = []
|
||||
|
||||
for file in os.listdir(temp_dir):
|
||||
if file.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
|
||||
file_path = os.path.join(temp_dir, file)
|
||||
image_files.append((file_path, os.path.getmtime(file_path)))
|
||||
|
||||
if not image_files:
|
||||
return web.json_response({"error": "No recent images found to use for recipe"}, status=400)
|
||||
|
||||
# Sort by modification time (newest first)
|
||||
image_files.sort(key=lambda x: x[1], reverse=True)
|
||||
latest_image_path = image_files[0][0]
|
||||
|
||||
# Parse the workflow to extract generation parameters and loras
|
||||
parsed_workflow = self.parser.parse_workflow(workflow_json)
|
||||
|
||||
if not parsed_workflow:
|
||||
return web.json_response({"error": "Could not extract parameters from workflow"}, status=400)
|
||||
|
||||
# Get the lora stack from the parsed workflow
|
||||
lora_stack = parsed_workflow.get("loras", "")
|
||||
# Get the lora stack from the metadata
|
||||
lora_stack = metadata_dict.get("loras", "")
|
||||
|
||||
# Parse the lora stack format: "<lora:name:strength> <lora:name2:strength2> ..."
|
||||
import re
|
||||
@@ -799,7 +865,7 @@ class RecipeRoutes:
|
||||
|
||||
# Check if any loras were found
|
||||
if not lora_matches:
|
||||
return web.json_response({"error": "No LoRAs found in the workflow"}, status=400)
|
||||
return web.json_response({"error": "No LoRAs found in the generation metadata"}, status=400)
|
||||
|
||||
# Generate recipe name from the first 3 loras (or less if fewer are available)
|
||||
loras_for_name = lora_matches[:3] # Take at most 3 loras for the name
|
||||
@@ -813,10 +879,6 @@ class RecipeRoutes:
|
||||
|
||||
recipe_name = " ".join(recipe_name_parts)
|
||||
|
||||
# Read the image
|
||||
with open(latest_image_path, 'rb') as f:
|
||||
image = f.read()
|
||||
|
||||
# Create recipes directory if it doesn't exist
|
||||
recipes_dir = self.recipe_scanner.recipes_dir
|
||||
os.makedirs(recipes_dir, exist_ok=True)
|
||||
@@ -828,7 +890,7 @@ class RecipeRoutes:
|
||||
# Optimize the image (resize and convert to WebP)
|
||||
optimized_image, extension = ExifUtils.optimize_image(
|
||||
image_data=image,
|
||||
target_width=480,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=True
|
||||
@@ -884,8 +946,8 @@ class RecipeRoutes:
|
||||
"created_date": time.time(),
|
||||
"base_model": most_common_base_model,
|
||||
"loras": loras_data,
|
||||
"checkpoint": parsed_workflow.get("checkpoint", ""),
|
||||
"gen_params": {key: value for key, value in parsed_workflow.items()
|
||||
"checkpoint": metadata_dict.get("checkpoint", ""),
|
||||
"gen_params": {key: value for key, value in metadata_dict.items()
|
||||
if key not in ['checkpoint', 'loras']},
|
||||
"loras_stack": lora_stack # Include the original lora stack string
|
||||
}
|
||||
@@ -922,6 +984,9 @@ class RecipeRoutes:
|
||||
async def get_recipe_syntax(self, request: web.Request) -> web.Response:
|
||||
"""Generate recipe syntax for LoRAs in the recipe, looking up proper file names using hash_index"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
recipe_id = request.match_info['recipe_id']
|
||||
|
||||
# Get all recipes from cache
|
||||
@@ -1002,6 +1067,9 @@ class RecipeRoutes:
|
||||
async def update_recipe(self, request: web.Request) -> web.Response:
|
||||
"""Update recipe metadata (name and tags)"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
recipe_id = request.match_info['recipe_id']
|
||||
data = await request.json()
|
||||
|
||||
@@ -1029,6 +1097,9 @@ class RecipeRoutes:
|
||||
async def reconnect_lora(self, request: web.Request) -> web.Response:
|
||||
"""Reconnect a deleted LoRA in a recipe to a local LoRA file"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# Parse request data
|
||||
data = await request.json()
|
||||
|
||||
@@ -1139,6 +1210,9 @@ class RecipeRoutes:
|
||||
async def get_recipes_for_lora(self, request: web.Request) -> web.Response:
|
||||
"""Get recipes that use a specific Lora"""
|
||||
try:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
lora_hash = request.query.get('hash')
|
||||
|
||||
# Hash is required
|
||||
@@ -1146,7 +1220,7 @@ class RecipeRoutes:
|
||||
return web.json_response({'success': False, 'error': 'Lora hash is required'}, status=400)
|
||||
|
||||
# Log the search parameters
|
||||
logger.info(f"Getting recipes for Lora by hash: {lora_hash}")
|
||||
logger.debug(f"Getting recipes for Lora by hash: {lora_hash}")
|
||||
|
||||
# Get all recipes from cache
|
||||
cache = await self.recipe_scanner.get_cached_data()
|
||||
|
||||
131
py/services/checkpoint_scanner.py
Normal file
131
py/services/checkpoint_scanner.py
Normal file
@@ -0,0 +1,131 @@
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
from typing import List, Dict, Optional, Set
|
||||
import folder_paths # type: ignore
|
||||
|
||||
from ..utils.models import CheckpointMetadata
|
||||
from ..config import config
|
||||
from .model_scanner import ModelScanner
|
||||
from .model_hash_index import ModelHashIndex
|
||||
from .service_registry import ServiceRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CheckpointScanner(ModelScanner):
|
||||
"""Service for scanning and managing checkpoint files"""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if not hasattr(self, '_initialized'):
|
||||
# Define supported file extensions
|
||||
file_extensions = {'.safetensors', '.ckpt', '.pt', '.pth', '.sft', '.gguf'}
|
||||
super().__init__(
|
||||
model_type="checkpoint",
|
||||
model_class=CheckpointMetadata,
|
||||
file_extensions=file_extensions,
|
||||
hash_index=ModelHashIndex()
|
||||
)
|
||||
self._checkpoint_roots = self._init_checkpoint_roots()
|
||||
self._initialized = True
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance with async support"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def _init_checkpoint_roots(self) -> List[str]:
|
||||
"""Initialize checkpoint roots from ComfyUI settings"""
|
||||
# Get both checkpoint and diffusion_models paths
|
||||
checkpoint_paths = folder_paths.get_folder_paths("checkpoints")
|
||||
diffusion_paths = folder_paths.get_folder_paths("diffusion_models")
|
||||
|
||||
# Combine, normalize and deduplicate paths
|
||||
all_paths = set()
|
||||
for path in checkpoint_paths + diffusion_paths:
|
||||
if os.path.exists(path):
|
||||
norm_path = path.replace(os.sep, "/")
|
||||
all_paths.add(norm_path)
|
||||
|
||||
# Sort for consistent order
|
||||
sorted_paths = sorted(all_paths, key=lambda p: p.lower())
|
||||
|
||||
return sorted_paths
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get checkpoint root directories"""
|
||||
return self._checkpoint_roots
|
||||
|
||||
async def scan_all_models(self) -> List[Dict]:
|
||||
"""Scan all checkpoint directories and return metadata"""
|
||||
all_checkpoints = []
|
||||
|
||||
# Create scan tasks for each directory
|
||||
scan_tasks = []
|
||||
for root in self._checkpoint_roots:
|
||||
task = asyncio.create_task(self._scan_directory(root))
|
||||
scan_tasks.append(task)
|
||||
|
||||
# Wait for all tasks to complete
|
||||
for task in scan_tasks:
|
||||
try:
|
||||
checkpoints = await task
|
||||
all_checkpoints.extend(checkpoints)
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning checkpoint directory: {e}")
|
||||
|
||||
return all_checkpoints
|
||||
|
||||
async def _scan_directory(self, root_path: str) -> List[Dict]:
|
||||
"""Scan a directory for checkpoint files"""
|
||||
checkpoints = []
|
||||
original_root = root_path
|
||||
|
||||
async def scan_recursive(path: str, visited_paths: set):
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
logger.debug(f"Skipping already visited path: {path}")
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True):
|
||||
# Check if file has supported extension
|
||||
ext = os.path.splitext(entry.name)[1].lower()
|
||||
if ext in self.file_extensions:
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, checkpoints)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
# For directories, continue scanning with original path
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
await scan_recursive(root_path, set())
|
||||
return checkpoints
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, checkpoints: list):
|
||||
"""Process a single checkpoint file and add to results"""
|
||||
try:
|
||||
result = await self._process_model_file(file_path, root_path)
|
||||
if result:
|
||||
checkpoints.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
@@ -3,6 +3,7 @@ import aiohttp
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
from email.parser import Parser
|
||||
from typing import Optional, Dict, Tuple, List
|
||||
from urllib.parse import unquote
|
||||
@@ -11,20 +12,51 @@ from ..utils.models import LoraMetadata
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CivitaiClient:
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of CivitaiClient"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
self.base_url = "https://civitai.com/api/v1"
|
||||
self.headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
|
||||
}
|
||||
self._session = None
|
||||
# Set default buffer size to 1MB for higher throughput
|
||||
self.chunk_size = 1024 * 1024
|
||||
|
||||
@property
|
||||
async def session(self) -> aiohttp.ClientSession:
|
||||
"""Lazy initialize the session"""
|
||||
if self._session is None:
|
||||
connector = aiohttp.TCPConnector(ssl=True)
|
||||
trust_env = True # 允许使用系统环境变量中的代理设置
|
||||
self._session = aiohttp.ClientSession(connector=connector, trust_env=trust_env)
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=10, # Increase parallel connections
|
||||
ttl_dns_cache=300, # DNS cache time
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
trust_env = True # Allow using system environment proxy settings
|
||||
# Configure timeout parameters
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=trust_env,
|
||||
timeout=timeout
|
||||
)
|
||||
return self._session
|
||||
|
||||
def _parse_content_disposition(self, header: str) -> str:
|
||||
@@ -74,6 +106,10 @@ class CivitaiClient:
|
||||
session = await self.session
|
||||
try:
|
||||
headers = self._get_request_headers()
|
||||
|
||||
# Add Range header to allow resumable downloads
|
||||
headers['Accept-Encoding'] = 'identity' # Disable compression for better chunked downloads
|
||||
|
||||
async with session.get(url, headers=headers, allow_redirects=True) as response:
|
||||
if response.status != 200:
|
||||
# Handle 401 unauthorized responses
|
||||
@@ -101,16 +137,23 @@ class CivitaiClient:
|
||||
# Get total file size for progress calculation
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
current_size = 0
|
||||
last_progress_report_time = datetime.now()
|
||||
|
||||
# Stream download to file with progress updates
|
||||
# Stream download to file with progress updates using larger buffer
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
async for chunk in response.content.iter_chunked(self.chunk_size):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
current_size += len(chunk)
|
||||
if progress_callback and total_size:
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
now = datetime.now()
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
|
||||
if progress_callback and total_size and time_diff >= 0.5:
|
||||
progress = (current_size / total_size) * 100
|
||||
await progress_callback(progress)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Ensure 100% progress is reported
|
||||
if progress_callback:
|
||||
@@ -118,6 +161,9 @@ class CivitaiClient:
|
||||
|
||||
return True, save_path
|
||||
|
||||
except aiohttp.ClientError as e:
|
||||
logger.error(f"Network error during download: {e}")
|
||||
return False, f"Network error: {str(e)}"
|
||||
except Exception as e:
|
||||
logger.error(f"Download error: {e}")
|
||||
return False, str(e)
|
||||
@@ -155,13 +201,26 @@ class CivitaiClient:
|
||||
if response.status != 200:
|
||||
return None
|
||||
data = await response.json()
|
||||
return data.get('modelVersions', [])
|
||||
# Also return model type along with versions
|
||||
return {
|
||||
'modelVersions': data.get('modelVersions', []),
|
||||
'type': data.get('type', '')
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model versions: {e}")
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Optional[Dict]:
|
||||
"""Fetch model version metadata from Civitai"""
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version metadata from Civitai
|
||||
|
||||
Args:
|
||||
version_id: The Civitai model version ID
|
||||
|
||||
Returns:
|
||||
Tuple[Optional[Dict], Optional[str]]: A tuple containing:
|
||||
- The model version data or None if not found
|
||||
- An error message if there was an error, or None on success
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
url = f"{self.base_url}/model-versions/{version_id}"
|
||||
@@ -169,11 +228,25 @@ class CivitaiClient:
|
||||
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
return await response.json()
|
||||
return None
|
||||
return await response.json(), None
|
||||
|
||||
# Handle specific error cases
|
||||
if response.status == 404:
|
||||
# Try to parse the error message
|
||||
try:
|
||||
error_data = await response.json()
|
||||
error_msg = error_data.get('error', f"Model not found (status 404)")
|
||||
logger.warning(f"Model version not found: {version_id} - {error_msg}")
|
||||
return None, error_msg
|
||||
except:
|
||||
return None, "Model not found (status 404)"
|
||||
|
||||
# Other error cases
|
||||
return None, f"Failed to fetch model info (status {response.status})"
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model version info: {e}")
|
||||
return None
|
||||
error_msg = f"Error fetching model version info: {e}"
|
||||
logger.error(error_msg)
|
||||
return None, error_msg
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
"""Fetch model metadata (description and tags) from Civitai API
|
||||
|
||||
@@ -1,21 +1,79 @@
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
from typing import Optional, Dict
|
||||
import asyncio
|
||||
from typing import Optional, Dict, Any
|
||||
from .civitai_client import CivitaiClient
|
||||
from .file_monitor import LoraFileMonitor
|
||||
from ..utils.models import LoraMetadata
|
||||
from ..utils.models import LoraMetadata, CheckpointMetadata
|
||||
from ..utils.constants import CARD_PREVIEW_WIDTH
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from .service_registry import ServiceRegistry
|
||||
|
||||
# Download to temporary file first
|
||||
import tempfile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DownloadManager:
|
||||
def __init__(self, file_monitor: Optional[LoraFileMonitor] = None):
|
||||
self.civitai_client = CivitaiClient()
|
||||
self.file_monitor = file_monitor
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of DownloadManager"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
self._civitai_client = None # Will be lazily initialized
|
||||
|
||||
async def _get_civitai_client(self):
|
||||
"""Lazily initialize CivitaiClient from registry"""
|
||||
if self._civitai_client is None:
|
||||
self._civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
return self._civitai_client
|
||||
|
||||
async def _get_lora_monitor(self):
|
||||
"""Get the lora file monitor from registry"""
|
||||
return await ServiceRegistry.get_lora_monitor()
|
||||
|
||||
async def _get_checkpoint_monitor(self):
|
||||
"""Get the checkpoint file monitor from registry"""
|
||||
return await ServiceRegistry.get_checkpoint_monitor()
|
||||
|
||||
async def _get_lora_scanner(self):
|
||||
"""Get the lora scanner from registry"""
|
||||
return await ServiceRegistry.get_lora_scanner()
|
||||
|
||||
async def _get_checkpoint_scanner(self):
|
||||
"""Get the checkpoint scanner from registry"""
|
||||
return await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
async def download_from_civitai(self, download_url: str = None, model_hash: str = None,
|
||||
model_version_id: str = None, save_dir: str = None,
|
||||
relative_path: str = '', progress_callback=None) -> Dict:
|
||||
relative_path: str = '', progress_callback=None,
|
||||
model_type: str = "lora") -> Dict:
|
||||
"""Download model from Civitai
|
||||
|
||||
Args:
|
||||
download_url: Direct download URL for the model
|
||||
model_hash: SHA256 hash of the model
|
||||
model_version_id: Civitai model version ID
|
||||
save_dir: Directory to save the model to
|
||||
relative_path: Relative path within save_dir
|
||||
progress_callback: Callback function for progress updates
|
||||
model_type: Type of model ('lora' or 'checkpoint')
|
||||
|
||||
Returns:
|
||||
Dict with download result
|
||||
"""
|
||||
try:
|
||||
# Update save directory with relative path if provided
|
||||
if relative_path:
|
||||
@@ -23,25 +81,31 @@ class DownloadManager:
|
||||
# Create directory if it doesn't exist
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# Get civitai client
|
||||
civitai_client = await self._get_civitai_client()
|
||||
|
||||
# Get version info based on the provided identifier
|
||||
version_info = None
|
||||
error_msg = None
|
||||
|
||||
if download_url:
|
||||
# Extract version ID from download URL
|
||||
version_id = download_url.split('/')[-1]
|
||||
version_info = await self.civitai_client.get_model_version_info(version_id)
|
||||
version_info, error_msg = await civitai_client.get_model_version_info(version_id)
|
||||
elif model_version_id:
|
||||
# Use model version ID directly
|
||||
version_info = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
version_info, error_msg = await civitai_client.get_model_version_info(model_version_id)
|
||||
elif model_hash:
|
||||
# Get model by hash
|
||||
version_info = await self.civitai_client.get_model_by_hash(model_hash)
|
||||
version_info = await civitai_client.get_model_by_hash(model_hash)
|
||||
|
||||
|
||||
if not version_info:
|
||||
return {'success': False, 'error': 'Failed to fetch model metadata'}
|
||||
if error_msg and "model not found" in error_msg.lower():
|
||||
return {'success': False, 'error': f'Model not found on Civitai: {error_msg}'}
|
||||
return {'success': False, 'error': error_msg or 'Failed to fetch model metadata'}
|
||||
|
||||
# Check if this is an early access LoRA
|
||||
# Check if this is an early access model
|
||||
if version_info.get('earlyAccessEndsAt'):
|
||||
early_access_date = version_info.get('earlyAccessEndsAt', '')
|
||||
# Convert to a readable date if possible
|
||||
@@ -49,12 +113,12 @@ class DownloadManager:
|
||||
from datetime import datetime
|
||||
date_obj = datetime.fromisoformat(early_access_date.replace('Z', '+00:00'))
|
||||
formatted_date = date_obj.strftime('%Y-%m-%d')
|
||||
early_access_msg = f"This LoRA requires early access payment (until {formatted_date}). "
|
||||
early_access_msg = f"This model requires early access payment (until {formatted_date}). "
|
||||
except:
|
||||
early_access_msg = "This LoRA requires early access payment. "
|
||||
early_access_msg = "This model requires early access payment. "
|
||||
|
||||
early_access_msg += "Please ensure you have purchased early access and are logged in to Civitai."
|
||||
logger.warning(f"Early access LoRA detected: {version_info.get('name', 'Unknown')}")
|
||||
logger.warning(f"Early access model detected: {version_info.get('name', 'Unknown')}")
|
||||
|
||||
# We'll still try to download, but log a warning and prepare for potential failure
|
||||
if progress_callback:
|
||||
@@ -64,43 +128,51 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
await progress_callback(0)
|
||||
|
||||
# 2. 获取文件信息
|
||||
# 2. Get file information
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if not file_info:
|
||||
return {'success': False, 'error': 'No primary file found in metadata'}
|
||||
|
||||
# 3. 准备下载
|
||||
# 3. Prepare download
|
||||
file_name = file_info['name']
|
||||
save_path = os.path.join(save_dir, file_name)
|
||||
file_size = file_info.get('sizeKB', 0) * 1024
|
||||
|
||||
# 4. 通知文件监控系统 - 使用规范化路径和文件大小
|
||||
self.file_monitor.handler.add_ignore_path(
|
||||
save_path.replace(os.sep, '/'),
|
||||
file_size
|
||||
)
|
||||
# 4. Notify file monitor - use normalized path and file size
|
||||
file_monitor = await self._get_lora_monitor() if model_type == "lora" else await self._get_checkpoint_monitor()
|
||||
if file_monitor and file_monitor.handler:
|
||||
file_monitor.handler.add_ignore_path(
|
||||
save_path.replace(os.sep, '/'),
|
||||
file_size
|
||||
)
|
||||
|
||||
# 5. 准备元数据
|
||||
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
# 5. Prepare metadata based on model type
|
||||
if model_type == "checkpoint":
|
||||
metadata = CheckpointMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
logger.info(f"Creating CheckpointMetadata for {file_name}")
|
||||
else:
|
||||
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
|
||||
logger.info(f"Creating LoraMetadata for {file_name}")
|
||||
|
||||
# 5.1 获取并更新模型标签和描述信息
|
||||
# 5.1 Get and update model tags and description
|
||||
model_id = version_info.get('modelId')
|
||||
if model_id:
|
||||
model_metadata, _ = await self.civitai_client.get_model_metadata(str(model_id))
|
||||
model_metadata, _ = await civitai_client.get_model_metadata(str(model_id))
|
||||
if model_metadata:
|
||||
if model_metadata.get("tags"):
|
||||
metadata.tags = model_metadata.get("tags", [])
|
||||
if model_metadata.get("description"):
|
||||
metadata.modelDescription = model_metadata.get("description", "")
|
||||
|
||||
# 6. 开始下载流程
|
||||
# 6. Start download process
|
||||
result = await self._execute_download(
|
||||
download_url=file_info.get('downloadUrl', ''),
|
||||
save_dir=save_dir,
|
||||
metadata=metadata,
|
||||
version_info=version_info,
|
||||
relative_path=relative_path,
|
||||
progress_callback=progress_callback
|
||||
progress_callback=progress_callback,
|
||||
model_type=model_type
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -114,10 +186,12 @@ class DownloadManager:
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
async def _execute_download(self, download_url: str, save_dir: str,
|
||||
metadata: LoraMetadata, version_info: Dict,
|
||||
relative_path: str, progress_callback=None) -> Dict:
|
||||
metadata, version_info: Dict,
|
||||
relative_path: str, progress_callback=None,
|
||||
model_type: str = "lora") -> Dict:
|
||||
"""Execute the actual download process including preview images and model files"""
|
||||
try:
|
||||
civitai_client = await self._get_civitai_client()
|
||||
save_path = metadata.file_path
|
||||
metadata_path = os.path.splitext(save_path)[0] + '.metadata.json'
|
||||
|
||||
@@ -128,20 +202,61 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
await progress_callback(1) # 1% progress for starting preview download
|
||||
|
||||
preview_ext = '.mp4' if images[0].get('type') == 'video' else '.png'
|
||||
preview_path = os.path.splitext(save_path)[0] + '.preview' + preview_ext
|
||||
if await self.civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
# Check if it's a video or an image
|
||||
is_video = images[0].get('type') == 'video'
|
||||
|
||||
if (is_video):
|
||||
# For videos, use .mp4 extension
|
||||
preview_ext = '.mp4'
|
||||
preview_path = os.path.splitext(save_path)[0] + preview_ext
|
||||
|
||||
# Download video directly
|
||||
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
else:
|
||||
# For images, use WebP format for better performance
|
||||
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
# Download the original image to temp path
|
||||
if await civitai_client.download_preview_image(images[0]['url'], temp_path):
|
||||
# Optimize and convert to WebP
|
||||
preview_path = os.path.splitext(save_path)[0] + '.webp'
|
||||
|
||||
# Use ExifUtils to optimize and convert the image
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=temp_path,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
|
||||
# Save the optimized image
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
|
||||
# Update metadata
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Remove temporary file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete temp file: {e}")
|
||||
|
||||
# Report preview download completion
|
||||
if progress_callback:
|
||||
await progress_callback(3) # 3% progress after preview download
|
||||
|
||||
# Download model file with progress tracking
|
||||
success, result = await self.civitai_client._download_file(
|
||||
success, result = await civitai_client._download_file(
|
||||
download_url,
|
||||
save_dir,
|
||||
os.path.basename(save_path),
|
||||
@@ -155,15 +270,22 @@ class DownloadManager:
|
||||
os.remove(path)
|
||||
return {'success': False, 'error': result}
|
||||
|
||||
# 4. 更新文件信息(大小和修改时间)
|
||||
# 4. Update file information (size and modified time)
|
||||
metadata.update_file_info(save_path)
|
||||
|
||||
# 5. 最终更新元数据
|
||||
# 5. Final metadata update
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
||||
|
||||
# 6. update lora cache
|
||||
cache = await self.file_monitor.scanner.get_cached_data()
|
||||
# 6. Update cache based on model type
|
||||
if model_type == "checkpoint":
|
||||
scanner = await self._get_checkpoint_scanner()
|
||||
logger.info(f"Updating checkpoint cache for {save_path}")
|
||||
else:
|
||||
scanner = await self._get_lora_scanner()
|
||||
logger.info(f"Updating lora cache for {save_path}")
|
||||
|
||||
cache = await scanner.get_cached_data()
|
||||
metadata_dict = metadata.to_dict()
|
||||
metadata_dict['folder'] = relative_path
|
||||
cache.raw_data.append(metadata_dict)
|
||||
@@ -172,11 +294,8 @@ class DownloadManager:
|
||||
all_folders.add(relative_path)
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
# Update the hash index with the new LoRA entry
|
||||
self.file_monitor.scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
|
||||
|
||||
# Update the hash index with the new LoRA entry
|
||||
self.file_monitor.scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
|
||||
# Update the hash index with the new model entry
|
||||
scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
|
||||
|
||||
# Report 100% completion
|
||||
if progress_callback:
|
||||
|
||||
@@ -1,37 +1,42 @@
|
||||
from operator import itemgetter
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import time
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
from typing import List, Dict, Set
|
||||
from typing import List, Dict, Set, Optional
|
||||
from threading import Lock
|
||||
from .lora_scanner import LoraScanner
|
||||
|
||||
from ..config import config
|
||||
from .service_registry import ServiceRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraFileHandler(FileSystemEventHandler):
|
||||
"""Handler for LoRA file system events"""
|
||||
# Configuration constant to control file monitoring functionality
|
||||
ENABLE_FILE_MONITORING = False
|
||||
|
||||
class BaseFileHandler(FileSystemEventHandler):
|
||||
"""Base handler for file system events"""
|
||||
|
||||
def __init__(self, scanner: LoraScanner, loop: asyncio.AbstractEventLoop):
|
||||
self.scanner = scanner
|
||||
self.loop = loop # 存储事件循环引用
|
||||
self.pending_changes = set() # 待处理的变更
|
||||
self.lock = Lock() # 线程安全锁
|
||||
self.update_task = None # 异步更新任务
|
||||
self._ignore_paths = set() # Add ignore paths set
|
||||
self._min_ignore_timeout = 5 # minimum timeout in seconds
|
||||
self._download_speed = 1024 * 1024 # assume 1MB/s as base speed
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||
self.loop = loop # Store event loop reference
|
||||
self.pending_changes = set() # Pending changes
|
||||
self.lock = Lock() # Thread-safe lock
|
||||
self.update_task = None # Async update task
|
||||
self._ignore_paths = set() # Paths to ignore
|
||||
self._min_ignore_timeout = 5 # Minimum timeout in seconds
|
||||
self._download_speed = 1024 * 1024 # Assume 1MB/s as base speed
|
||||
|
||||
# Track modified files with timestamps for debouncing
|
||||
self.modified_files: Dict[str, float] = {}
|
||||
self.debounce_timer = None
|
||||
self.debounce_delay = 3.0 # seconds to wait after last modification
|
||||
self.debounce_delay = 3.0 # Seconds to wait after last modification
|
||||
|
||||
# Track files that are already scheduled for processing
|
||||
# Track files already scheduled for processing
|
||||
self.scheduled_files: Set[str] = set()
|
||||
|
||||
# File extensions to monitor - should be overridden by subclasses
|
||||
self.file_extensions = set()
|
||||
|
||||
def _should_ignore(self, path: str) -> bool:
|
||||
"""Check if path should be ignored"""
|
||||
@@ -56,35 +61,33 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
if event.is_directory:
|
||||
return
|
||||
|
||||
# Handle safetensors files directly
|
||||
if event.src_path.endswith('.safetensors'):
|
||||
# Handle appropriate files based on extensions
|
||||
file_ext = os.path.splitext(event.src_path)[1].lower()
|
||||
if file_ext in self.file_extensions:
|
||||
if self._should_ignore(event.src_path):
|
||||
return
|
||||
|
||||
# We'll process this file directly and ignore subsequent modifications
|
||||
# to prevent duplicate processing
|
||||
# Process this file directly and ignore subsequent modifications
|
||||
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
|
||||
if normalized_path not in self.scheduled_files:
|
||||
logger.info(f"LoRA file created: {event.src_path}")
|
||||
logger.info(f"File created: {event.src_path}")
|
||||
self.scheduled_files.add(normalized_path)
|
||||
self._schedule_update('add', event.src_path)
|
||||
|
||||
# Ignore modifications for a short period after creation
|
||||
# This helps avoid duplicate processing
|
||||
self.loop.call_later(
|
||||
self.debounce_delay * 2,
|
||||
self.scheduled_files.discard,
|
||||
normalized_path
|
||||
)
|
||||
|
||||
# For browser downloads, we'll catch them when they're renamed to .safetensors
|
||||
|
||||
def on_modified(self, event):
|
||||
if event.is_directory:
|
||||
return
|
||||
|
||||
# Only process safetensors files
|
||||
if event.src_path.endswith('.safetensors'):
|
||||
# Only process files with supported extensions
|
||||
file_ext = os.path.splitext(event.src_path)[1].lower()
|
||||
if file_ext in self.file_extensions:
|
||||
if self._should_ignore(event.src_path):
|
||||
return
|
||||
|
||||
@@ -132,12 +135,17 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
|
||||
# Process stable files
|
||||
for file_path in files_to_process:
|
||||
logger.info(f"Processing modified LoRA file: {file_path}")
|
||||
logger.info(f"Processing modified file: {file_path}")
|
||||
self._schedule_update('add', file_path)
|
||||
|
||||
def on_deleted(self, event):
|
||||
if event.is_directory or not event.src_path.endswith('.safetensors'):
|
||||
if event.is_directory:
|
||||
return
|
||||
|
||||
file_ext = os.path.splitext(event.src_path)[1].lower()
|
||||
if file_ext not in self.file_extensions:
|
||||
return
|
||||
|
||||
if self._should_ignore(event.src_path):
|
||||
return
|
||||
|
||||
@@ -145,14 +153,17 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
|
||||
self.scheduled_files.discard(normalized_path)
|
||||
|
||||
logger.info(f"LoRA file deleted: {event.src_path}")
|
||||
logger.info(f"File deleted: {event.src_path}")
|
||||
self._schedule_update('remove', event.src_path)
|
||||
|
||||
def on_moved(self, event):
|
||||
"""Handle file move/rename events"""
|
||||
|
||||
# If destination is a safetensors file, treat it as a new file
|
||||
if event.dest_path.endswith('.safetensors'):
|
||||
src_ext = os.path.splitext(event.src_path)[1].lower()
|
||||
dest_ext = os.path.splitext(event.dest_path)[1].lower()
|
||||
|
||||
# If destination has supported extension, treat as new file
|
||||
if dest_ext in self.file_extensions:
|
||||
if self._should_ignore(event.dest_path):
|
||||
return
|
||||
|
||||
@@ -160,7 +171,7 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
|
||||
# Only process if not already scheduled
|
||||
if normalized_path not in self.scheduled_files:
|
||||
logger.info(f"LoRA file renamed/moved to: {event.dest_path}")
|
||||
logger.info(f"File renamed/moved to: {event.dest_path}")
|
||||
self.scheduled_files.add(normalized_path)
|
||||
self._schedule_update('add', event.dest_path)
|
||||
|
||||
@@ -171,21 +182,21 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
normalized_path
|
||||
)
|
||||
|
||||
# If source was a safetensors file, treat it as deleted
|
||||
if event.src_path.endswith('.safetensors'):
|
||||
# If source was a supported file, treat it as deleted
|
||||
if src_ext in self.file_extensions:
|
||||
if self._should_ignore(event.src_path):
|
||||
return
|
||||
|
||||
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
|
||||
self.scheduled_files.discard(normalized_path)
|
||||
|
||||
logger.info(f"LoRA file moved/renamed from: {event.src_path}")
|
||||
logger.info(f"File moved/renamed from: {event.src_path}")
|
||||
self._schedule_update('remove', event.src_path)
|
||||
|
||||
def _schedule_update(self, action: str, file_path: str): #file_path is a real path
|
||||
def _schedule_update(self, action: str, file_path: str):
|
||||
"""Schedule a cache update"""
|
||||
with self.lock:
|
||||
# 使用 config 中的方法映射路径
|
||||
# Use config method to map path
|
||||
mapped_path = config.map_path_to_link(file_path)
|
||||
normalized_path = mapped_path.replace(os.sep, '/')
|
||||
self.pending_changes.add((action, normalized_path))
|
||||
@@ -196,7 +207,20 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
"""Create update task in the event loop"""
|
||||
if self.update_task is None or self.update_task.done():
|
||||
self.update_task = asyncio.create_task(self._process_changes())
|
||||
|
||||
async def _process_changes(self, delay: float = 2.0):
|
||||
"""Process pending changes with debouncing - should be implemented by subclasses"""
|
||||
raise NotImplementedError("Subclasses must implement _process_changes")
|
||||
|
||||
|
||||
class LoraFileHandler(BaseFileHandler):
|
||||
"""Handler for LoRA file system events"""
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||
super().__init__(loop)
|
||||
# Set supported file extensions for LoRAs
|
||||
self.file_extensions = {'.safetensors'}
|
||||
|
||||
async def _process_changes(self, delay: float = 2.0):
|
||||
"""Process pending changes with debouncing"""
|
||||
await asyncio.sleep(delay)
|
||||
@@ -209,9 +233,11 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
if not changes:
|
||||
return
|
||||
|
||||
logger.info(f"Processing {len(changes)} file changes")
|
||||
logger.info(f"Processing {len(changes)} LoRA file changes")
|
||||
|
||||
cache = await self.scanner.get_cached_data()
|
||||
# Get scanner through ServiceRegistry
|
||||
scanner = await ServiceRegistry.get_lora_scanner()
|
||||
cache = await scanner.get_cached_data()
|
||||
needs_resort = False
|
||||
new_folders = set()
|
||||
|
||||
@@ -225,36 +251,36 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
continue
|
||||
|
||||
# Scan new file
|
||||
lora_data = await self.scanner.scan_single_lora(file_path)
|
||||
if lora_data:
|
||||
model_data = await scanner.scan_single_model(file_path)
|
||||
if model_data:
|
||||
# Update tags count
|
||||
for tag in lora_data.get('tags', []):
|
||||
self.scanner._tags_count[tag] = self.scanner._tags_count.get(tag, 0) + 1
|
||||
for tag in model_data.get('tags', []):
|
||||
scanner._tags_count[tag] = scanner._tags_count.get(tag, 0) + 1
|
||||
|
||||
cache.raw_data.append(lora_data)
|
||||
new_folders.add(lora_data['folder'])
|
||||
cache.raw_data.append(model_data)
|
||||
new_folders.add(model_data['folder'])
|
||||
# Update hash index
|
||||
if 'sha256' in lora_data:
|
||||
self.scanner._hash_index.add_entry(
|
||||
lora_data['sha256'],
|
||||
lora_data['file_path']
|
||||
if 'sha256' in model_data:
|
||||
scanner._hash_index.add_entry(
|
||||
model_data['sha256'],
|
||||
model_data['file_path']
|
||||
)
|
||||
needs_resort = True
|
||||
|
||||
elif action == 'remove':
|
||||
# Find the lora to remove so we can update tags count
|
||||
lora_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
|
||||
if lora_to_remove:
|
||||
# Find the model to remove so we can update tags count
|
||||
model_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
|
||||
if model_to_remove:
|
||||
# Update tags count by reducing counts
|
||||
for tag in lora_to_remove.get('tags', []):
|
||||
if tag in self.scanner._tags_count:
|
||||
self.scanner._tags_count[tag] = max(0, self.scanner._tags_count[tag] - 1)
|
||||
if self.scanner._tags_count[tag] == 0:
|
||||
del self.scanner._tags_count[tag]
|
||||
for tag in model_to_remove.get('tags', []):
|
||||
if tag in scanner._tags_count:
|
||||
scanner._tags_count[tag] = max(0, scanner._tags_count[tag] - 1)
|
||||
if scanner._tags_count[tag] == 0:
|
||||
del scanner._tags_count[tag]
|
||||
|
||||
# Remove from cache and hash index
|
||||
logger.info(f"Removing {file_path} from cache")
|
||||
self.scanner._hash_index.remove_by_path(file_path)
|
||||
scanner._hash_index.remove_by_path(file_path)
|
||||
cache.raw_data = [
|
||||
item for item in cache.raw_data
|
||||
if item['file_path'] != file_path
|
||||
@@ -272,62 +298,245 @@ class LoraFileHandler(FileSystemEventHandler):
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in process_changes: {e}")
|
||||
logger.error(f"Error in process_changes for LoRA: {e}")
|
||||
|
||||
|
||||
class LoraFileMonitor:
|
||||
"""Monitor for LoRA file changes"""
|
||||
class CheckpointFileHandler(BaseFileHandler):
|
||||
"""Handler for checkpoint file system events"""
|
||||
|
||||
def __init__(self, scanner: LoraScanner, roots: List[str]):
|
||||
self.scanner = scanner
|
||||
scanner.set_file_monitor(self)
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop):
|
||||
super().__init__(loop)
|
||||
# Set supported file extensions for checkpoints
|
||||
self.file_extensions = {'.safetensors', '.ckpt', '.pt', '.pth', '.sft', '.gguf'}
|
||||
|
||||
async def _process_changes(self, delay: float = 2.0):
|
||||
"""Process pending changes with debouncing for checkpoint files"""
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
try:
|
||||
with self.lock:
|
||||
changes = self.pending_changes.copy()
|
||||
self.pending_changes.clear()
|
||||
|
||||
if not changes:
|
||||
return
|
||||
|
||||
logger.info(f"Processing {len(changes)} checkpoint file changes")
|
||||
|
||||
# Get scanner through ServiceRegistry
|
||||
scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
cache = await scanner.get_cached_data()
|
||||
needs_resort = False
|
||||
new_folders = set()
|
||||
|
||||
for action, file_path in changes:
|
||||
try:
|
||||
if action == 'add':
|
||||
# Check if file already exists in cache
|
||||
existing = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
|
||||
if existing:
|
||||
logger.info(f"File {file_path} already in cache, skipping")
|
||||
continue
|
||||
|
||||
# Scan new file
|
||||
model_data = await scanner.scan_single_model(file_path)
|
||||
if model_data:
|
||||
# Update tags count if applicable
|
||||
for tag in model_data.get('tags', []):
|
||||
scanner._tags_count[tag] = scanner._tags_count.get(tag, 0) + 1
|
||||
|
||||
cache.raw_data.append(model_data)
|
||||
new_folders.add(model_data['folder'])
|
||||
# Update hash index
|
||||
if 'sha256' in model_data:
|
||||
scanner._hash_index.add_entry(
|
||||
model_data['sha256'],
|
||||
model_data['file_path']
|
||||
)
|
||||
needs_resort = True
|
||||
|
||||
elif action == 'remove':
|
||||
# Find the model to remove so we can update tags count
|
||||
model_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
|
||||
if model_to_remove:
|
||||
# Update tags count by reducing counts
|
||||
for tag in model_to_remove.get('tags', []):
|
||||
if tag in scanner._tags_count:
|
||||
scanner._tags_count[tag] = max(0, scanner._tags_count[tag] - 1)
|
||||
if scanner._tags_count[tag] == 0:
|
||||
del scanner._tags_count[tag]
|
||||
|
||||
# Remove from cache and hash index
|
||||
logger.info(f"Removing {file_path} from checkpoint cache")
|
||||
scanner._hash_index.remove_by_path(file_path)
|
||||
cache.raw_data = [
|
||||
item for item in cache.raw_data
|
||||
if item['file_path'] != file_path
|
||||
]
|
||||
needs_resort = True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing checkpoint {action} for {file_path}: {e}")
|
||||
|
||||
if needs_resort:
|
||||
await cache.resort()
|
||||
|
||||
# Update folder list
|
||||
all_folders = set(cache.folders) | new_folders
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in process_changes for checkpoint: {e}")
|
||||
|
||||
|
||||
class BaseFileMonitor:
|
||||
"""Base class for file monitoring"""
|
||||
|
||||
def __init__(self, monitor_paths: List[str]):
|
||||
self.observer = Observer()
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.handler = LoraFileHandler(scanner, self.loop)
|
||||
|
||||
# 使用已存在的路径映射
|
||||
self.monitor_paths = set()
|
||||
for root in roots:
|
||||
self.monitor_paths.add(os.path.realpath(root).replace(os.sep, '/'))
|
||||
|
||||
# Process monitor paths
|
||||
for path in monitor_paths:
|
||||
self.monitor_paths.add(os.path.realpath(path).replace(os.sep, '/'))
|
||||
|
||||
# 添加所有已映射的目标路径
|
||||
# Add mapped paths from config
|
||||
for target_path in config._path_mappings.keys():
|
||||
self.monitor_paths.add(target_path)
|
||||
|
||||
|
||||
def start(self):
|
||||
"""Start monitoring"""
|
||||
for path_info in self.monitor_paths:
|
||||
"""Start file monitoring"""
|
||||
if not ENABLE_FILE_MONITORING:
|
||||
logger.debug("File monitoring is disabled via ENABLE_FILE_MONITORING setting")
|
||||
return
|
||||
|
||||
for path in self.monitor_paths:
|
||||
try:
|
||||
if isinstance(path_info, tuple):
|
||||
# 对于链接,监控目标路径
|
||||
_, target_path = path_info
|
||||
self.observer.schedule(self.handler, target_path, recursive=True)
|
||||
logger.info(f"Started monitoring target path: {target_path}")
|
||||
else:
|
||||
# 对于普通路径,直接监控
|
||||
self.observer.schedule(self.handler, path_info, recursive=True)
|
||||
logger.info(f"Started monitoring: {path_info}")
|
||||
self.observer.schedule(self.handler, path, recursive=True)
|
||||
logger.info(f"Started monitoring: {path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error monitoring {path_info}: {e}")
|
||||
logger.error(f"Error monitoring {path}: {e}")
|
||||
|
||||
self.observer.start()
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""Stop monitoring"""
|
||||
"""Stop file monitoring"""
|
||||
if not ENABLE_FILE_MONITORING:
|
||||
return
|
||||
|
||||
self.observer.stop()
|
||||
self.observer.join()
|
||||
|
||||
|
||||
def rescan_links(self):
|
||||
"""重新扫描链接(当添加新的链接时调用)"""
|
||||
"""Rescan links when new ones are added"""
|
||||
if not ENABLE_FILE_MONITORING:
|
||||
return
|
||||
|
||||
# Find new paths not yet being monitored
|
||||
new_paths = set()
|
||||
for path in self.monitor_paths.copy():
|
||||
self._add_link_targets(path)
|
||||
for path in config._path_mappings.keys():
|
||||
real_path = os.path.realpath(path).replace(os.sep, '/')
|
||||
if real_path not in self.monitor_paths:
|
||||
new_paths.add(real_path)
|
||||
self.monitor_paths.add(real_path)
|
||||
|
||||
# 添加新发现的路径到监控
|
||||
new_paths = self.monitor_paths - set(self.observer.watches.keys())
|
||||
# Add new paths to monitoring
|
||||
for path in new_paths:
|
||||
try:
|
||||
self.observer.schedule(self.handler, path, recursive=True)
|
||||
logger.info(f"Added new monitoring path: {path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding new monitor for {path}: {e}")
|
||||
logger.error(f"Error adding new monitor for {path}: {e}")
|
||||
|
||||
|
||||
class LoraFileMonitor(BaseFileMonitor):
|
||||
"""Monitor for LoRA file changes"""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
def __new__(cls, monitor_paths=None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, monitor_paths=None):
|
||||
if not hasattr(self, '_initialized'):
|
||||
if monitor_paths is None:
|
||||
from ..config import config
|
||||
monitor_paths = config.loras_roots
|
||||
|
||||
super().__init__(monitor_paths)
|
||||
self.handler = LoraFileHandler(self.loop)
|
||||
self._initialized = True
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance with async support"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
from ..config import config
|
||||
cls._instance = cls(config.loras_roots)
|
||||
return cls._instance
|
||||
|
||||
|
||||
class CheckpointFileMonitor(BaseFileMonitor):
|
||||
"""Monitor for checkpoint file changes"""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
def __new__(cls, monitor_paths=None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, monitor_paths=None):
|
||||
if not hasattr(self, '_initialized'):
|
||||
if monitor_paths is None:
|
||||
# Get checkpoint roots from scanner
|
||||
monitor_paths = []
|
||||
# We'll initialize monitor paths later when scanner is available
|
||||
|
||||
super().__init__(monitor_paths or [])
|
||||
self.handler = CheckpointFileHandler(self.loop)
|
||||
self._initialized = True
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance with async support"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls([])
|
||||
|
||||
# Now get checkpoint roots from scanner
|
||||
from .checkpoint_scanner import CheckpointScanner
|
||||
scanner = await CheckpointScanner.get_instance()
|
||||
monitor_paths = scanner.get_model_roots()
|
||||
|
||||
# Update monitor paths - but don't actually monitor them
|
||||
for path in monitor_paths:
|
||||
real_path = os.path.realpath(path).replace(os.sep, '/')
|
||||
cls._instance.monitor_paths.add(real_path)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def start(self):
|
||||
"""Override start to check global enable flag"""
|
||||
if not ENABLE_FILE_MONITORING:
|
||||
logger.debug("Checkpoint file monitoring is disabled via ENABLE_FILE_MONITORING setting")
|
||||
return
|
||||
|
||||
logger.debug("Checkpoint file monitoring is temporarily disabled")
|
||||
# Skip the actual monitoring setup
|
||||
pass
|
||||
|
||||
async def initialize_paths(self):
|
||||
"""Initialize monitor paths from scanner - currently disabled"""
|
||||
if not ENABLE_FILE_MONITORING:
|
||||
logger.debug("Checkpoint path initialization skipped (monitoring disabled)")
|
||||
return
|
||||
|
||||
logger.debug("Checkpoint file path initialization skipped (monitoring disabled)")
|
||||
pass
|
||||
@@ -4,22 +4,21 @@ import logging
|
||||
import asyncio
|
||||
import shutil
|
||||
import time
|
||||
from typing import List, Dict, Optional
|
||||
from typing import List, Dict, Optional, Set
|
||||
|
||||
from ..utils.models import LoraMetadata
|
||||
from ..config import config
|
||||
from ..utils.file_utils import load_metadata, get_file_info, normalize_path, find_preview_file, save_metadata
|
||||
from ..utils.lora_metadata import extract_lora_metadata
|
||||
from .lora_cache import LoraCache
|
||||
from .lora_hash_index import LoraHashIndex
|
||||
from .model_scanner import ModelScanner
|
||||
from .model_hash_index import ModelHashIndex # Changed from LoraHashIndex to ModelHashIndex
|
||||
from .settings_manager import settings
|
||||
from ..utils.constants import NSFW_LEVELS
|
||||
from ..utils.utils import fuzzy_match
|
||||
from .service_registry import ServiceRegistry
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraScanner:
|
||||
class LoraScanner(ModelScanner):
|
||||
"""Service for scanning and managing LoRA files"""
|
||||
|
||||
_instance = None
|
||||
@@ -31,20 +30,20 @@ class LoraScanner:
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
# 确保初始化只执行一次
|
||||
# Ensure initialization happens only once
|
||||
if not hasattr(self, '_initialized'):
|
||||
self._cache: Optional[LoraCache] = None
|
||||
self._hash_index = LoraHashIndex()
|
||||
self._initialization_lock = asyncio.Lock()
|
||||
self._initialization_task: Optional[asyncio.Task] = None
|
||||
# Define supported file extensions
|
||||
file_extensions = {'.safetensors'}
|
||||
|
||||
# Initialize parent class with ModelHashIndex
|
||||
super().__init__(
|
||||
model_type="lora",
|
||||
model_class=LoraMetadata,
|
||||
file_extensions=file_extensions,
|
||||
hash_index=ModelHashIndex() # Changed from LoraHashIndex to ModelHashIndex
|
||||
)
|
||||
self._initialized = True
|
||||
self.file_monitor = None # Add this line
|
||||
self._tags_count = {} # Add a dictionary to store tag counts
|
||||
|
||||
def set_file_monitor(self, monitor):
|
||||
"""Set file monitor instance"""
|
||||
self.file_monitor = monitor
|
||||
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance with async support"""
|
||||
@@ -52,89 +51,74 @@ class LoraScanner:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
async def get_cached_data(self, force_refresh: bool = False) -> LoraCache:
|
||||
"""Get cached LoRA data, refresh if needed"""
|
||||
async with self._initialization_lock:
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get lora root directories"""
|
||||
return config.loras_roots
|
||||
|
||||
async def scan_all_models(self) -> List[Dict]:
|
||||
"""Scan all LoRA directories and return metadata"""
|
||||
all_loras = []
|
||||
|
||||
# Create scan tasks for each directory
|
||||
scan_tasks = []
|
||||
for lora_root in self.get_model_roots():
|
||||
task = asyncio.create_task(self._scan_directory(lora_root))
|
||||
scan_tasks.append(task)
|
||||
|
||||
# 如果缓存未初始化但需要响应请求,返回空缓存
|
||||
if self._cache is None and not force_refresh:
|
||||
return LoraCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# 如果正在初始化,等待完成
|
||||
if self._initialization_task and not self._initialization_task.done():
|
||||
try:
|
||||
await self._initialization_task
|
||||
except Exception as e:
|
||||
logger.error(f"Cache initialization failed: {e}")
|
||||
self._initialization_task = None
|
||||
|
||||
if (self._cache is None or force_refresh):
|
||||
# Wait for all tasks to complete
|
||||
for task in scan_tasks:
|
||||
try:
|
||||
loras = await task
|
||||
all_loras.extend(loras)
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning directory: {e}")
|
||||
|
||||
# 创建新的初始化任务
|
||||
if not self._initialization_task or self._initialization_task.done():
|
||||
self._initialization_task = asyncio.create_task(self._initialize_cache())
|
||||
return all_loras
|
||||
|
||||
async def _scan_directory(self, root_path: str) -> List[Dict]:
|
||||
"""Scan a single directory for LoRA files"""
|
||||
loras = []
|
||||
original_root = root_path # Save original root path
|
||||
|
||||
async def scan_recursive(path: str, visited_paths: set):
|
||||
"""Recursively scan directory, avoiding circular symlinks"""
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
logger.debug(f"Skipping already visited path: {path}")
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
try:
|
||||
await self._initialization_task
|
||||
except Exception as e:
|
||||
logger.error(f"Cache initialization failed: {e}")
|
||||
# 如果缓存已存在,继续使用旧缓存
|
||||
if self._cache is None:
|
||||
raise # 如果没有缓存,则抛出异常
|
||||
|
||||
return self._cache
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True) and any(entry.name.endswith(ext) for ext in self.file_extensions):
|
||||
# Use original path instead of real path
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, loras)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
# For directories, continue scanning with original path
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
async def _initialize_cache(self) -> None:
|
||||
"""Initialize or refresh the cache"""
|
||||
await scan_recursive(root_path, set())
|
||||
return loras
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, loras: list):
|
||||
"""Process a single file and add to results list"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
# Clear existing hash index
|
||||
self._hash_index.clear()
|
||||
|
||||
# Clear existing tags count
|
||||
self._tags_count = {}
|
||||
|
||||
# Scan for new data
|
||||
raw_data = await self.scan_all_loras()
|
||||
|
||||
# Build hash index and tags count
|
||||
for lora_data in raw_data:
|
||||
if 'sha256' in lora_data and 'file_path' in lora_data:
|
||||
self._hash_index.add_entry(lora_data['sha256'].lower(), lora_data['file_path'])
|
||||
|
||||
# Count tags
|
||||
if 'tags' in lora_data and lora_data['tags']:
|
||||
for tag in lora_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Update cache
|
||||
self._cache = LoraCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# Call resort_cache to create sorted views
|
||||
await self._cache.resort()
|
||||
|
||||
self._initialization_task = None
|
||||
logger.info(f"LoRA Manager: Cache initialization completed in {time.time() - start_time:.2f} seconds, found {len(raw_data)} loras")
|
||||
result = await self._process_model_file(file_path, root_path)
|
||||
if result:
|
||||
loras.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing cache: {e}")
|
||||
self._cache = LoraCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
|
||||
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name',
|
||||
folder: str = None, search: str = None, fuzzy_search: bool = False,
|
||||
base_models: list = None, tags: list = None,
|
||||
@@ -280,348 +264,6 @@ class LoraScanner:
|
||||
|
||||
return result
|
||||
|
||||
def invalidate_cache(self):
|
||||
"""Invalidate the current cache"""
|
||||
self._cache = None
|
||||
|
||||
async def scan_all_loras(self) -> List[Dict]:
|
||||
"""Scan all LoRA directories and return metadata"""
|
||||
all_loras = []
|
||||
|
||||
# 分目录异步扫描
|
||||
scan_tasks = []
|
||||
for loras_root in config.loras_roots:
|
||||
task = asyncio.create_task(self._scan_directory(loras_root))
|
||||
scan_tasks.append(task)
|
||||
|
||||
for task in scan_tasks:
|
||||
try:
|
||||
loras = await task
|
||||
all_loras.extend(loras)
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning directory: {e}")
|
||||
|
||||
return all_loras
|
||||
|
||||
async def _scan_directory(self, root_path: str) -> List[Dict]:
|
||||
"""Scan a single directory for LoRA files"""
|
||||
loras = []
|
||||
original_root = root_path # 保存原始根路径
|
||||
|
||||
async def scan_recursive(path: str, visited_paths: set):
|
||||
"""递归扫描目录,避免循环链接"""
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
logger.debug(f"Skipping already visited path: {path}")
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True) and entry.name.endswith('.safetensors'):
|
||||
# 使用原始路径而不是真实路径
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, loras)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
# 对于目录,使用原始路径继续扫描
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
await scan_recursive(root_path, set())
|
||||
return loras
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, loras: list):
|
||||
"""处理单个文件并添加到结果列表"""
|
||||
try:
|
||||
result = await self._process_lora_file(file_path, root_path)
|
||||
if result:
|
||||
loras.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
|
||||
async def _process_lora_file(self, file_path: str, root_path: str) -> Dict:
|
||||
"""Process a single LoRA file and return its metadata"""
|
||||
# Try loading existing metadata
|
||||
metadata = await load_metadata(file_path)
|
||||
|
||||
if metadata is None:
|
||||
# Try to find and use .civitai.info file first
|
||||
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
||||
if os.path.exists(civitai_info_path):
|
||||
try:
|
||||
with open(civitai_info_path, 'r', encoding='utf-8') as f:
|
||||
version_info = json.load(f)
|
||||
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if file_info:
|
||||
# Create a minimal file_info with the required fields
|
||||
file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
file_info['name'] = file_name
|
||||
|
||||
# Use from_civitai_info to create metadata
|
||||
metadata = LoraMetadata.from_civitai_info(version_info, file_info, file_path)
|
||||
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
|
||||
await save_metadata(file_path, metadata)
|
||||
logger.debug(f"Created metadata from .civitai.info for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
|
||||
|
||||
# If still no metadata, create new metadata using get_file_info
|
||||
if metadata is None:
|
||||
metadata = await get_file_info(file_path)
|
||||
|
||||
# Convert to dict and add folder info
|
||||
lora_data = metadata.to_dict()
|
||||
# Try to fetch missing metadata from Civitai if needed
|
||||
await self._fetch_missing_metadata(file_path, lora_data)
|
||||
rel_path = os.path.relpath(file_path, root_path)
|
||||
folder = os.path.dirname(rel_path)
|
||||
lora_data['folder'] = folder.replace(os.path.sep, '/')
|
||||
|
||||
return lora_data
|
||||
|
||||
async def _fetch_missing_metadata(self, file_path: str, lora_data: Dict) -> None:
|
||||
"""Fetch missing description and tags from Civitai if needed
|
||||
|
||||
Args:
|
||||
file_path: Path to the lora file
|
||||
lora_data: Lora metadata dictionary to update
|
||||
"""
|
||||
try:
|
||||
# Skip if already marked as deleted on Civitai
|
||||
if lora_data.get('civitai_deleted', False):
|
||||
logger.debug(f"Skipping metadata fetch for {file_path}: marked as deleted on Civitai")
|
||||
return
|
||||
|
||||
# Check if we need to fetch additional metadata from Civitai
|
||||
needs_metadata_update = False
|
||||
model_id = None
|
||||
|
||||
# Check if we have Civitai model ID but missing metadata
|
||||
if lora_data.get('civitai'):
|
||||
# Try to get model ID directly from the correct location
|
||||
model_id = lora_data['civitai'].get('modelId')
|
||||
|
||||
if model_id:
|
||||
model_id = str(model_id)
|
||||
# Check if tags are missing or empty
|
||||
tags_missing = not lora_data.get('tags') or len(lora_data.get('tags', [])) == 0
|
||||
|
||||
# Check if description is missing or empty
|
||||
desc_missing = not lora_data.get('modelDescription') or lora_data.get('modelDescription') in (None, "")
|
||||
|
||||
needs_metadata_update = tags_missing or desc_missing
|
||||
|
||||
# Fetch missing metadata if needed
|
||||
if needs_metadata_update and model_id:
|
||||
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
client = CivitaiClient()
|
||||
|
||||
# Get metadata and status code
|
||||
model_metadata, status_code = await client.get_model_metadata(model_id)
|
||||
await client.close()
|
||||
|
||||
# Handle 404 status (model deleted from Civitai)
|
||||
if status_code == 404:
|
||||
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
||||
# Mark as deleted to avoid future API calls
|
||||
lora_data['civitai_deleted'] = True
|
||||
|
||||
# Save the updated metadata back to file
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(lora_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
# Process valid metadata if available
|
||||
elif model_metadata:
|
||||
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
||||
|
||||
# Update tags if they were missing
|
||||
if model_metadata.get('tags') and (not lora_data.get('tags') or len(lora_data.get('tags', [])) == 0):
|
||||
lora_data['tags'] = model_metadata['tags']
|
||||
|
||||
# Update description if it was missing
|
||||
if model_metadata.get('description') and (not lora_data.get('modelDescription') or lora_data.get('modelDescription') in (None, "")):
|
||||
lora_data['modelDescription'] = model_metadata['description']
|
||||
|
||||
# Save the updated metadata back to file
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(lora_data, f, indent=2, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
||||
|
||||
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
|
||||
"""Update preview URL in cache for a specific lora
|
||||
|
||||
Args:
|
||||
file_path: The file path of the lora to update
|
||||
preview_url: The new preview URL
|
||||
|
||||
Returns:
|
||||
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
|
||||
"""
|
||||
if self._cache is None:
|
||||
return False
|
||||
|
||||
return await self._cache.update_preview_url(file_path, preview_url)
|
||||
|
||||
async def scan_single_lora(self, file_path: str) -> Optional[Dict]:
|
||||
"""Scan a single LoRA file and return its metadata"""
|
||||
try:
|
||||
if not os.path.exists(os.path.realpath(file_path)):
|
||||
return None
|
||||
|
||||
# 获取基本文件信息
|
||||
metadata = await get_file_info(file_path)
|
||||
if not metadata:
|
||||
return None
|
||||
|
||||
folder = self._calculate_folder(file_path)
|
||||
|
||||
# 确保 folder 字段存在
|
||||
metadata_dict = metadata.to_dict()
|
||||
metadata_dict['folder'] = folder or ''
|
||||
|
||||
return metadata_dict
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {file_path}: {e}")
|
||||
return None
|
||||
|
||||
def _calculate_folder(self, file_path: str) -> str:
|
||||
"""Calculate the folder path for a LoRA file"""
|
||||
# 使用原始路径计算相对路径
|
||||
for root in config.loras_roots:
|
||||
if file_path.startswith(root):
|
||||
rel_path = os.path.relpath(file_path, root)
|
||||
return os.path.dirname(rel_path).replace(os.path.sep, '/')
|
||||
return ''
|
||||
|
||||
async def move_model(self, source_path: str, target_path: str) -> bool:
|
||||
"""Move a model and its associated files to a new location"""
|
||||
try:
|
||||
# 保持原始路径格式
|
||||
source_path = source_path.replace(os.sep, '/')
|
||||
target_path = target_path.replace(os.sep, '/')
|
||||
|
||||
# 其余代码保持不变
|
||||
base_name = os.path.splitext(os.path.basename(source_path))[0]
|
||||
source_dir = os.path.dirname(source_path)
|
||||
|
||||
os.makedirs(target_path, exist_ok=True)
|
||||
|
||||
target_lora = os.path.join(target_path, f"{base_name}.safetensors").replace(os.sep, '/')
|
||||
|
||||
# 使用真实路径进行文件操作
|
||||
real_source = os.path.realpath(source_path)
|
||||
real_target = os.path.realpath(target_lora)
|
||||
|
||||
file_size = os.path.getsize(real_source)
|
||||
|
||||
if self.file_monitor:
|
||||
self.file_monitor.handler.add_ignore_path(
|
||||
real_source,
|
||||
file_size
|
||||
)
|
||||
self.file_monitor.handler.add_ignore_path(
|
||||
real_target,
|
||||
file_size
|
||||
)
|
||||
|
||||
# 使用真实路径进行文件操作
|
||||
shutil.move(real_source, real_target)
|
||||
|
||||
# Move associated files
|
||||
source_metadata = os.path.join(source_dir, f"{base_name}.metadata.json")
|
||||
if os.path.exists(source_metadata):
|
||||
target_metadata = os.path.join(target_path, f"{base_name}.metadata.json")
|
||||
shutil.move(source_metadata, target_metadata)
|
||||
metadata = await self._update_metadata_paths(target_metadata, target_lora)
|
||||
|
||||
# Move preview file if exists
|
||||
preview_extensions = ['.preview.png', '.preview.jpeg', '.preview.jpg', '.preview.mp4',
|
||||
'.png', '.jpeg', '.jpg', '.mp4']
|
||||
for ext in preview_extensions:
|
||||
source_preview = os.path.join(source_dir, f"{base_name}{ext}")
|
||||
if os.path.exists(source_preview):
|
||||
target_preview = os.path.join(target_path, f"{base_name}{ext}")
|
||||
shutil.move(source_preview, target_preview)
|
||||
break
|
||||
|
||||
# Update cache
|
||||
await self.update_single_lora_cache(source_path, target_lora, metadata)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
async def update_single_lora_cache(self, original_path: str, new_path: str, metadata: Dict) -> bool:
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
# Find the existing item to remove its tags from count
|
||||
existing_item = next((item for item in cache.raw_data if item['file_path'] == original_path), None)
|
||||
if existing_item and 'tags' in existing_item:
|
||||
for tag in existing_item.get('tags', []):
|
||||
if tag in self._tags_count:
|
||||
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
|
||||
if self._tags_count[tag] == 0:
|
||||
del self._tags_count[tag]
|
||||
|
||||
# Remove old path from hash index if exists
|
||||
self._hash_index.remove_by_path(original_path)
|
||||
|
||||
# Remove the old entry from raw_data
|
||||
cache.raw_data = [
|
||||
item for item in cache.raw_data
|
||||
if item['file_path'] != original_path
|
||||
]
|
||||
|
||||
if metadata:
|
||||
# If this is an update to an existing path (not a move), ensure folder is preserved
|
||||
if original_path == new_path:
|
||||
# Find the folder from existing entries or calculate it
|
||||
existing_folder = next((item['folder'] for item in cache.raw_data
|
||||
if item['file_path'] == original_path), None)
|
||||
if existing_folder:
|
||||
metadata['folder'] = existing_folder
|
||||
else:
|
||||
metadata['folder'] = self._calculate_folder(new_path)
|
||||
else:
|
||||
# For moved files, recalculate the folder
|
||||
metadata['folder'] = self._calculate_folder(new_path)
|
||||
|
||||
# Add the updated metadata to raw_data
|
||||
cache.raw_data.append(metadata)
|
||||
|
||||
# Update hash index with new path
|
||||
if 'sha256' in metadata:
|
||||
self._hash_index.add_entry(metadata['sha256'].lower(), new_path)
|
||||
|
||||
# Update folders list
|
||||
all_folders = set(item['folder'] for item in cache.raw_data)
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
# Update tags count with the new/updated tags
|
||||
if 'tags' in metadata:
|
||||
for tag in metadata.get('tags', []):
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Resort cache
|
||||
await cache.resort()
|
||||
|
||||
return True
|
||||
|
||||
async def _update_metadata_paths(self, metadata_path: str, lora_path: str) -> Dict:
|
||||
"""Update file paths in metadata file"""
|
||||
try:
|
||||
@@ -648,49 +290,21 @@ class LoraScanner:
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating metadata paths: {e}", exc_info=True)
|
||||
|
||||
# Add new methods for hash index functionality
|
||||
# Lora-specific hash index functionality
|
||||
def has_lora_hash(self, sha256: str) -> bool:
|
||||
"""Check if a LoRA with given hash exists"""
|
||||
return self._hash_index.has_hash(sha256.lower())
|
||||
return self.has_hash(sha256)
|
||||
|
||||
def get_lora_path_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a LoRA by its hash"""
|
||||
return self._hash_index.get_path(sha256.lower())
|
||||
return self.get_path_by_hash(sha256)
|
||||
|
||||
def get_lora_hash_by_path(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a LoRA by its file path"""
|
||||
return self._hash_index.get_hash(file_path)
|
||||
return self.get_hash_by_path(file_path)
|
||||
|
||||
def get_preview_url_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get preview static URL for a LoRA by its hash"""
|
||||
# Get the file path first
|
||||
file_path = self._hash_index.get_path(sha256.lower())
|
||||
if not file_path:
|
||||
return None
|
||||
|
||||
# Determine the preview file path (typically same name with different extension)
|
||||
base_name = os.path.splitext(file_path)[0]
|
||||
preview_extensions = ['.preview.png', '.preview.jpeg', '.preview.jpg', '.preview.mp4',
|
||||
'.png', '.jpeg', '.jpg', '.mp4']
|
||||
|
||||
for ext in preview_extensions:
|
||||
preview_path = f"{base_name}{ext}"
|
||||
if os.path.exists(preview_path):
|
||||
# Convert to static URL using config
|
||||
return config.get_preview_static_url(preview_path)
|
||||
|
||||
return None
|
||||
|
||||
# Add new method to get top tags
|
||||
async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get top tags sorted by count
|
||||
|
||||
Args:
|
||||
limit: Maximum number of tags to return
|
||||
|
||||
Returns:
|
||||
List of dictionaries with tag name and count, sorted by count
|
||||
"""
|
||||
"""Get top tags sorted by count"""
|
||||
# Make sure cache is initialized
|
||||
await self.get_cached_data()
|
||||
|
||||
@@ -705,14 +319,7 @@ class LoraScanner:
|
||||
return sorted_tags[:limit]
|
||||
|
||||
async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get base models used in loras sorted by frequency
|
||||
|
||||
Args:
|
||||
limit: Maximum number of base models to return
|
||||
|
||||
Returns:
|
||||
List of dictionaries with base model name and count, sorted by count
|
||||
"""
|
||||
"""Get base models used in loras sorted by frequency"""
|
||||
# Make sure cache is initialized
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
|
||||
64
py/services/model_cache.py
Normal file
64
py/services/model_cache.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import asyncio
|
||||
from typing import List, Dict
|
||||
from dataclasses import dataclass
|
||||
from operator import itemgetter
|
||||
|
||||
@dataclass
|
||||
class ModelCache:
|
||||
"""Cache structure for model data"""
|
||||
raw_data: List[Dict]
|
||||
sorted_by_name: List[Dict]
|
||||
sorted_by_date: List[Dict]
|
||||
folders: List[str]
|
||||
|
||||
def __post_init__(self):
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
async def resort(self, name_only: bool = False):
|
||||
"""Resort all cached data views"""
|
||||
async with self._lock:
|
||||
self.sorted_by_name = sorted(
|
||||
self.raw_data,
|
||||
key=lambda x: x['model_name'].lower() # Case-insensitive sort
|
||||
)
|
||||
if not name_only:
|
||||
self.sorted_by_date = sorted(
|
||||
self.raw_data,
|
||||
key=itemgetter('modified'),
|
||||
reverse=True
|
||||
)
|
||||
# Update folder list
|
||||
all_folders = set(l['folder'] for l in self.raw_data)
|
||||
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
async def update_preview_url(self, file_path: str, preview_url: str) -> bool:
|
||||
"""Update preview_url for a specific model in all cached data
|
||||
|
||||
Args:
|
||||
file_path: The file path of the model to update
|
||||
preview_url: The new preview URL
|
||||
|
||||
Returns:
|
||||
bool: True if the update was successful, False if the model wasn't found
|
||||
"""
|
||||
async with self._lock:
|
||||
# Update in raw_data
|
||||
for item in self.raw_data:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
break
|
||||
else:
|
||||
return False # Model not found
|
||||
|
||||
# Update in sorted lists (references to the same dict objects)
|
||||
for item in self.sorted_by_name:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
break
|
||||
|
||||
for item in self.sorted_by_date:
|
||||
if item['file_path'] == file_path:
|
||||
item['preview_url'] = preview_url
|
||||
break
|
||||
|
||||
return True
|
||||
96
py/services/model_hash_index.py
Normal file
96
py/services/model_hash_index.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from typing import Dict, Optional, Set
|
||||
import os
|
||||
|
||||
class ModelHashIndex:
|
||||
"""Index for looking up models by hash or path"""
|
||||
|
||||
def __init__(self):
|
||||
self._hash_to_path: Dict[str, str] = {}
|
||||
self._filename_to_hash: Dict[str, str] = {} # Changed from path_to_hash to filename_to_hash
|
||||
|
||||
def add_entry(self, sha256: str, file_path: str) -> None:
|
||||
"""Add or update hash index entry"""
|
||||
if not sha256 or not file_path:
|
||||
return
|
||||
|
||||
# Ensure hash is lowercase for consistency
|
||||
sha256 = sha256.lower()
|
||||
|
||||
# Extract filename without extension
|
||||
filename = self._get_filename_from_path(file_path)
|
||||
|
||||
# Remove old path mapping if hash exists
|
||||
if sha256 in self._hash_to_path:
|
||||
old_path = self._hash_to_path[sha256]
|
||||
old_filename = self._get_filename_from_path(old_path)
|
||||
if old_filename in self._filename_to_hash:
|
||||
del self._filename_to_hash[old_filename]
|
||||
|
||||
# Remove old hash mapping if filename exists
|
||||
if filename in self._filename_to_hash:
|
||||
old_hash = self._filename_to_hash[filename]
|
||||
if old_hash in self._hash_to_path:
|
||||
del self._hash_to_path[old_hash]
|
||||
|
||||
# Add new mappings
|
||||
self._hash_to_path[sha256] = file_path
|
||||
self._filename_to_hash[filename] = sha256
|
||||
|
||||
def _get_filename_from_path(self, file_path: str) -> str:
|
||||
"""Extract filename without extension from path"""
|
||||
return os.path.splitext(os.path.basename(file_path))[0]
|
||||
|
||||
def remove_by_path(self, file_path: str) -> None:
|
||||
"""Remove entry by file path"""
|
||||
filename = self._get_filename_from_path(file_path)
|
||||
if filename in self._filename_to_hash:
|
||||
hash_val = self._filename_to_hash[filename]
|
||||
if hash_val in self._hash_to_path:
|
||||
del self._hash_to_path[hash_val]
|
||||
del self._filename_to_hash[filename]
|
||||
|
||||
def remove_by_hash(self, sha256: str) -> None:
|
||||
"""Remove entry by hash"""
|
||||
sha256 = sha256.lower()
|
||||
if sha256 in self._hash_to_path:
|
||||
path = self._hash_to_path[sha256]
|
||||
filename = self._get_filename_from_path(path)
|
||||
if filename in self._filename_to_hash:
|
||||
del self._filename_to_hash[filename]
|
||||
del self._hash_to_path[sha256]
|
||||
|
||||
def has_hash(self, sha256: str) -> bool:
|
||||
"""Check if hash exists in index"""
|
||||
return sha256.lower() in self._hash_to_path
|
||||
|
||||
def get_path(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a hash"""
|
||||
return self._hash_to_path.get(sha256.lower())
|
||||
|
||||
def get_hash(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a file path"""
|
||||
filename = self._get_filename_from_path(file_path)
|
||||
return self._filename_to_hash.get(filename)
|
||||
|
||||
def get_hash_by_filename(self, filename: str) -> Optional[str]:
|
||||
"""Get hash for a filename without extension"""
|
||||
# Strip extension if present to make the function more flexible
|
||||
filename = os.path.splitext(filename)[0]
|
||||
return self._filename_to_hash.get(filename)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear all entries"""
|
||||
self._hash_to_path.clear()
|
||||
self._filename_to_hash.clear()
|
||||
|
||||
def get_all_hashes(self) -> Set[str]:
|
||||
"""Get all hashes in the index"""
|
||||
return set(self._hash_to_path.keys())
|
||||
|
||||
def get_all_filenames(self) -> Set[str]:
|
||||
"""Get all filenames in the index"""
|
||||
return set(self._filename_to_hash.keys())
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Get number of entries"""
|
||||
return len(self._hash_to_path)
|
||||
910
py/services/model_scanner.py
Normal file
910
py/services/model_scanner.py
Normal file
@@ -0,0 +1,910 @@
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import time
|
||||
import shutil
|
||||
from typing import List, Dict, Optional, Type, Set
|
||||
|
||||
from ..utils.models import BaseModelMetadata
|
||||
from ..config import config
|
||||
from ..utils.file_utils import load_metadata, get_file_info, find_preview_file, save_metadata
|
||||
from .model_cache import ModelCache
|
||||
from .model_hash_index import ModelHashIndex
|
||||
from ..utils.constants import PREVIEW_EXTENSIONS
|
||||
from .service_registry import ServiceRegistry
|
||||
from .websocket_manager import ws_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ModelScanner:
|
||||
"""Base service for scanning and managing model files"""
|
||||
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
def __init__(self, model_type: str, model_class: Type[BaseModelMetadata], file_extensions: Set[str], hash_index: Optional[ModelHashIndex] = None):
|
||||
"""Initialize the scanner
|
||||
|
||||
Args:
|
||||
model_type: Type of model (lora, checkpoint, etc.)
|
||||
model_class: Class used to create metadata instances
|
||||
file_extensions: Set of supported file extensions including the dot (e.g. {'.safetensors'})
|
||||
hash_index: Hash index instance (optional)
|
||||
"""
|
||||
self.model_type = model_type
|
||||
self.model_class = model_class
|
||||
self.file_extensions = file_extensions
|
||||
self._cache = None
|
||||
self._hash_index = hash_index or ModelHashIndex()
|
||||
self._tags_count = {} # Dictionary to store tag counts
|
||||
self._is_initializing = False # Flag to track initialization state
|
||||
|
||||
# Register this service
|
||||
asyncio.create_task(self._register_service())
|
||||
|
||||
async def _register_service(self):
|
||||
"""Register this instance with the ServiceRegistry"""
|
||||
service_name = f"{self.model_type}_scanner"
|
||||
await ServiceRegistry.register_service(service_name, self)
|
||||
|
||||
async def initialize_in_background(self) -> None:
|
||||
"""Initialize cache in background using thread pool"""
|
||||
try:
|
||||
# Set initial empty cache to avoid None reference errors
|
||||
if self._cache is None:
|
||||
self._cache = ModelCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# Set initializing flag to true
|
||||
self._is_initializing = True
|
||||
|
||||
# Determine the page type based on model type
|
||||
page_type = 'loras' if self.model_type == 'lora' else 'checkpoints'
|
||||
|
||||
# First, count all model files to track progress
|
||||
await ws_manager.broadcast_init_progress({
|
||||
'stage': 'scan_folders',
|
||||
'progress': 0,
|
||||
'details': f"Scanning {self.model_type} folders...",
|
||||
'scanner_type': self.model_type,
|
||||
'pageType': page_type
|
||||
})
|
||||
|
||||
# Count files in a separate thread to avoid blocking
|
||||
loop = asyncio.get_event_loop()
|
||||
total_files = await loop.run_in_executor(
|
||||
None, # Use default thread pool
|
||||
self._count_model_files # Run file counting in thread
|
||||
)
|
||||
|
||||
await ws_manager.broadcast_init_progress({
|
||||
'stage': 'count_models',
|
||||
'progress': 1, # Changed from 10 to 1
|
||||
'details': f"Found {total_files} {self.model_type} files",
|
||||
'scanner_type': self.model_type,
|
||||
'pageType': page_type
|
||||
})
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
# Use thread pool to execute CPU-intensive operations with progress reporting
|
||||
await loop.run_in_executor(
|
||||
None, # Use default thread pool
|
||||
self._initialize_cache_sync, # Run synchronous version in thread
|
||||
total_files, # Pass the total file count for progress reporting
|
||||
page_type # Pass the page type for progress reporting
|
||||
)
|
||||
|
||||
# Send final progress update
|
||||
await ws_manager.broadcast_init_progress({
|
||||
'stage': 'finalizing',
|
||||
'progress': 99, # Changed from 95 to 99
|
||||
'details': f"Finalizing {self.model_type} cache...",
|
||||
'scanner_type': self.model_type,
|
||||
'pageType': page_type
|
||||
})
|
||||
|
||||
logger.info(f"{self.model_type.capitalize()} cache initialized in {time.time() - start_time:.2f} seconds. Found {len(self._cache.raw_data)} models")
|
||||
|
||||
# Send completion message
|
||||
await asyncio.sleep(0.5) # Small delay to ensure final progress message is sent
|
||||
await ws_manager.broadcast_init_progress({
|
||||
'stage': 'finalizing',
|
||||
'progress': 100,
|
||||
'status': 'complete',
|
||||
'details': f"Completed! Found {len(self._cache.raw_data)} {self.model_type} files.",
|
||||
'scanner_type': self.model_type,
|
||||
'pageType': page_type
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.model_type.capitalize()} Scanner: Error initializing cache in background: {e}")
|
||||
finally:
|
||||
# Always clear the initializing flag when done
|
||||
self._is_initializing = False
|
||||
|
||||
def _count_model_files(self) -> int:
|
||||
"""Count all model files with supported extensions in all roots
|
||||
|
||||
Returns:
|
||||
int: Total number of model files found
|
||||
"""
|
||||
total_files = 0
|
||||
visited_real_paths = set()
|
||||
|
||||
for root_path in self.get_model_roots():
|
||||
if not os.path.exists(root_path):
|
||||
continue
|
||||
|
||||
def count_recursive(path):
|
||||
nonlocal total_files
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_real_paths:
|
||||
return
|
||||
visited_real_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
for entry in it:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True):
|
||||
ext = os.path.splitext(entry.name)[1].lower()
|
||||
if ext in self.file_extensions:
|
||||
total_files += 1
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
count_recursive(entry.path)
|
||||
except Exception as e:
|
||||
logger.error(f"Error counting files in entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error counting files in {path}: {e}")
|
||||
|
||||
count_recursive(root_path)
|
||||
|
||||
return total_files
|
||||
|
||||
def _initialize_cache_sync(self, total_files=0, page_type='loras'):
|
||||
"""Synchronous version of cache initialization for thread pool execution"""
|
||||
try:
|
||||
# Create a new event loop for this thread
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
# Create a synchronous method to bypass the async lock
|
||||
def sync_initialize_cache():
|
||||
# Track progress
|
||||
processed_files = 0
|
||||
last_progress_time = time.time()
|
||||
last_progress_percent = 0
|
||||
|
||||
# We need a wrapper around scan_all_models to track progress
|
||||
# This is a local function that will run in our thread's event loop
|
||||
async def scan_with_progress():
|
||||
nonlocal processed_files, last_progress_time, last_progress_percent
|
||||
|
||||
# For storing raw model data
|
||||
all_models = []
|
||||
|
||||
# Process each model root
|
||||
for root_path in self.get_model_roots():
|
||||
if not os.path.exists(root_path):
|
||||
continue
|
||||
|
||||
# Track visited paths to avoid symlink loops
|
||||
visited_paths = set()
|
||||
|
||||
# Recursively process directory
|
||||
async def scan_dir_with_progress(path):
|
||||
nonlocal processed_files, last_progress_time, last_progress_percent
|
||||
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True):
|
||||
ext = os.path.splitext(entry.name)[1].lower()
|
||||
if ext in self.file_extensions:
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
result = await self._process_model_file(file_path, root_path)
|
||||
if result:
|
||||
all_models.append(result)
|
||||
|
||||
# Update progress counter
|
||||
processed_files += 1
|
||||
|
||||
# Update progress periodically (not every file to avoid excessive updates)
|
||||
current_time = time.time()
|
||||
if total_files > 0 and (current_time - last_progress_time > 0.5 or processed_files == total_files):
|
||||
# Adjusted progress calculation
|
||||
progress_percent = min(99, int(1 + (processed_files / total_files) * 98))
|
||||
if progress_percent > last_progress_percent:
|
||||
last_progress_percent = progress_percent
|
||||
last_progress_time = current_time
|
||||
|
||||
# Send progress update through websocket
|
||||
await ws_manager.broadcast_init_progress({
|
||||
'stage': 'process_models',
|
||||
'progress': progress_percent,
|
||||
'details': f"Processing {self.model_type} files: {processed_files}/{total_files}",
|
||||
'scanner_type': self.model_type,
|
||||
'pageType': page_type
|
||||
})
|
||||
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
await scan_dir_with_progress(entry.path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
# Process the root path
|
||||
await scan_dir_with_progress(root_path)
|
||||
|
||||
return all_models
|
||||
|
||||
# Run the progress-tracking scan function
|
||||
raw_data = loop.run_until_complete(scan_with_progress())
|
||||
|
||||
# Update hash index and tags count
|
||||
for model_data in raw_data:
|
||||
if 'sha256' in model_data and 'file_path' in model_data:
|
||||
self._hash_index.add_entry(model_data['sha256'].lower(), model_data['file_path'])
|
||||
|
||||
# Count tags
|
||||
if 'tags' in model_data and model_data['tags']:
|
||||
for tag in model_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Update cache
|
||||
self._cache.raw_data = raw_data
|
||||
loop.run_until_complete(self._cache.resort())
|
||||
|
||||
return self._cache
|
||||
|
||||
# Run our sync initialization that avoids lock conflicts
|
||||
return sync_initialize_cache()
|
||||
except Exception as e:
|
||||
logger.error(f"Error in thread-based {self.model_type} cache initialization: {e}")
|
||||
finally:
|
||||
# Clean up the event loop
|
||||
loop.close()
|
||||
|
||||
async def get_cached_data(self, force_refresh: bool = False) -> ModelCache:
|
||||
"""Get cached model data, refresh if needed"""
|
||||
# If cache is not initialized, return an empty cache
|
||||
# Actual initialization should be done via initialize_in_background
|
||||
if self._cache is None and not force_refresh:
|
||||
return ModelCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# If force refresh is requested, initialize the cache directly
|
||||
if (force_refresh):
|
||||
if self._cache is None:
|
||||
# For initial creation, do a full initialization
|
||||
await self._initialize_cache()
|
||||
else:
|
||||
# For subsequent refreshes, use fast reconciliation
|
||||
await self._reconcile_cache()
|
||||
|
||||
return self._cache
|
||||
|
||||
async def _initialize_cache(self) -> None:
|
||||
"""Initialize or refresh the cache"""
|
||||
self._is_initializing = True # Set flag
|
||||
try:
|
||||
start_time = time.time()
|
||||
# Clear existing hash index
|
||||
self._hash_index.clear()
|
||||
|
||||
# Clear existing tags count
|
||||
self._tags_count = {}
|
||||
|
||||
# Determine the page type based on model type
|
||||
page_type = 'loras' if self.model_type == 'lora' else 'checkpoints'
|
||||
|
||||
# Scan for new data
|
||||
raw_data = await self.scan_all_models()
|
||||
|
||||
# Build hash index and tags count
|
||||
for model_data in raw_data:
|
||||
if 'sha256' in model_data and 'file_path' in model_data:
|
||||
self._hash_index.add_entry(model_data['sha256'].lower(), model_data['file_path'])
|
||||
|
||||
# Count tags
|
||||
if 'tags' in model_data and model_data['tags']:
|
||||
for tag in model_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Update cache
|
||||
self._cache = ModelCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
|
||||
# Resort cache
|
||||
await self._cache.resort()
|
||||
|
||||
logger.info(f"{self.model_type.capitalize()} Scanner: Cache initialization completed in {time.time() - start_time:.2f} seconds, found {len(raw_data)} models")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.model_type.capitalize()} Scanner: Error initializing cache: {e}")
|
||||
# Ensure cache is at least an empty structure on error
|
||||
if self._cache is None:
|
||||
self._cache = ModelCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[]
|
||||
)
|
||||
finally:
|
||||
self._is_initializing = False # Unset flag
|
||||
|
||||
async def _reconcile_cache(self) -> None:
|
||||
"""Fast cache reconciliation - only process differences between cache and filesystem"""
|
||||
self._is_initializing = True # Set flag for reconciliation duration
|
||||
try:
|
||||
start_time = time.time()
|
||||
logger.info(f"{self.model_type.capitalize()} Scanner: Starting fast cache reconciliation...")
|
||||
|
||||
# Get current cached file paths
|
||||
cached_paths = {item['file_path'] for item in self._cache.raw_data}
|
||||
path_to_item = {item['file_path']: item for item in self._cache.raw_data}
|
||||
|
||||
# Track found files and new files
|
||||
found_paths = set()
|
||||
new_files = []
|
||||
|
||||
# Scan all model roots
|
||||
for root_path in self.get_model_roots():
|
||||
if not os.path.exists(root_path):
|
||||
continue
|
||||
|
||||
# Track visited real paths to avoid symlink loops
|
||||
visited_real_paths = set()
|
||||
|
||||
# Recursively scan directory
|
||||
for root, _, files in os.walk(root_path, followlinks=True):
|
||||
real_root = os.path.realpath(root)
|
||||
if real_root in visited_real_paths:
|
||||
continue
|
||||
visited_real_paths.add(real_root)
|
||||
|
||||
for file in files:
|
||||
ext = os.path.splitext(file)[1].lower()
|
||||
if ext in self.file_extensions:
|
||||
# Construct paths exactly as they would be in cache
|
||||
file_path = os.path.join(root, file).replace(os.sep, '/')
|
||||
|
||||
# Check if this file is already in cache
|
||||
if file_path in cached_paths:
|
||||
found_paths.add(file_path)
|
||||
continue
|
||||
|
||||
# Try case-insensitive match on Windows
|
||||
if os.name == 'nt':
|
||||
lower_path = file_path.lower()
|
||||
matched = False
|
||||
for cached_path in cached_paths:
|
||||
if cached_path.lower() == lower_path:
|
||||
found_paths.add(cached_path)
|
||||
matched = True
|
||||
break
|
||||
if matched:
|
||||
continue
|
||||
|
||||
# This is a new file to process
|
||||
new_files.append(file_path)
|
||||
|
||||
# Yield control periodically
|
||||
await asyncio.sleep(0)
|
||||
|
||||
# Process new files in batches
|
||||
total_added = 0
|
||||
if new_files:
|
||||
logger.info(f"{self.model_type.capitalize()} Scanner: Found {len(new_files)} new files to process")
|
||||
batch_size = 50
|
||||
for i in range(0, len(new_files), batch_size):
|
||||
batch = new_files[i:i+batch_size]
|
||||
for path in batch:
|
||||
try:
|
||||
model_data = await self.scan_single_model(path)
|
||||
if model_data:
|
||||
# Add to cache
|
||||
self._cache.raw_data.append(model_data)
|
||||
|
||||
# Update hash index if available
|
||||
if 'sha256' in model_data and 'file_path' in model_data:
|
||||
self._hash_index.add_entry(model_data['sha256'].lower(), model_data['file_path'])
|
||||
|
||||
# Update tags count
|
||||
if 'tags' in model_data and model_data['tags']:
|
||||
for tag in model_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
total_added += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding {path} to cache: {e}")
|
||||
|
||||
# Yield control after each batch
|
||||
await asyncio.sleep(0)
|
||||
|
||||
# Find missing files (in cache but not in filesystem)
|
||||
missing_files = cached_paths - found_paths
|
||||
total_removed = 0
|
||||
|
||||
if missing_files:
|
||||
logger.info(f"{self.model_type.capitalize()} Scanner: Found {len(missing_files)} files to remove from cache")
|
||||
|
||||
# Process files to remove
|
||||
for path in missing_files:
|
||||
try:
|
||||
model_to_remove = path_to_item[path]
|
||||
|
||||
# Update tags count
|
||||
for tag in model_to_remove.get('tags', []):
|
||||
if tag in self._tags_count:
|
||||
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
|
||||
if self._tags_count[tag] == 0:
|
||||
del self._tags_count[tag]
|
||||
|
||||
# Remove from hash index
|
||||
self._hash_index.remove_by_path(path)
|
||||
total_removed += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing {path} from cache: {e}")
|
||||
|
||||
# Update cache data
|
||||
self._cache.raw_data = [item for item in self._cache.raw_data if item['file_path'] not in missing_files]
|
||||
|
||||
# Resort cache if changes were made
|
||||
if total_added > 0 or total_removed > 0:
|
||||
# Update folders list
|
||||
all_folders = set(item.get('folder', '') for item in self._cache.raw_data)
|
||||
self._cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
# Resort cache
|
||||
await self._cache.resort()
|
||||
|
||||
logger.info(f"{self.model_type.capitalize()} Scanner: Cache reconciliation completed in {time.time() - start_time:.2f} seconds. Added {total_added}, removed {total_removed} models.")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.model_type.capitalize()} Scanner: Error reconciling cache: {e}", exc_info=True)
|
||||
finally:
|
||||
self._is_initializing = False # Unset flag
|
||||
|
||||
# These methods should be implemented in child classes
|
||||
async def scan_all_models(self) -> List[Dict]:
|
||||
"""Scan all model directories and return metadata"""
|
||||
raise NotImplementedError("Subclasses must implement scan_all_models")
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get model root directories"""
|
||||
raise NotImplementedError("Subclasses must implement get_model_roots")
|
||||
|
||||
async def scan_single_model(self, file_path: str) -> Optional[Dict]:
|
||||
"""Scan a single model file and return its metadata"""
|
||||
try:
|
||||
if not os.path.exists(os.path.realpath(file_path)):
|
||||
return None
|
||||
|
||||
# Get basic file info
|
||||
metadata = await self._get_file_info(file_path)
|
||||
if not metadata:
|
||||
return None
|
||||
|
||||
folder = self._calculate_folder(file_path)
|
||||
|
||||
# Ensure folder field exists
|
||||
metadata_dict = metadata.to_dict()
|
||||
metadata_dict['folder'] = folder or ''
|
||||
|
||||
return metadata_dict
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {file_path}: {e}")
|
||||
return None
|
||||
|
||||
async def _get_file_info(self, file_path: str) -> Optional[BaseModelMetadata]:
|
||||
"""Get model file info and metadata (extensible for different model types)"""
|
||||
return await get_file_info(file_path, self.model_class)
|
||||
|
||||
def _calculate_folder(self, file_path: str) -> str:
|
||||
"""Calculate the folder path for a model file"""
|
||||
for root in self.get_model_roots():
|
||||
if file_path.startswith(root):
|
||||
rel_path = os.path.relpath(file_path, root)
|
||||
return os.path.dirname(rel_path).replace(os.path.sep, '/')
|
||||
return ''
|
||||
|
||||
# Common methods shared between scanners
|
||||
async def _process_model_file(self, file_path: str, root_path: str) -> Dict:
|
||||
"""Process a single model file and return its metadata"""
|
||||
metadata = await load_metadata(file_path, self.model_class)
|
||||
|
||||
if metadata is None:
|
||||
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
||||
if os.path.exists(civitai_info_path):
|
||||
try:
|
||||
with open(civitai_info_path, 'r', encoding='utf-8') as f:
|
||||
version_info = json.load(f)
|
||||
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if file_info:
|
||||
file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
file_info['name'] = file_name
|
||||
|
||||
metadata = self.model_class.from_civitai_info(version_info, file_info, file_path)
|
||||
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
|
||||
await save_metadata(file_path, metadata)
|
||||
logger.debug(f"Created metadata from .civitai.info for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
|
||||
else:
|
||||
# Check if metadata exists but civitai field is empty - try to restore from civitai.info
|
||||
if metadata.civitai is None or metadata.civitai == {}:
|
||||
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
||||
if os.path.exists(civitai_info_path):
|
||||
try:
|
||||
with open(civitai_info_path, 'r', encoding='utf-8') as f:
|
||||
version_info = json.load(f)
|
||||
|
||||
logger.debug(f"Restoring missing civitai data from .civitai.info for {file_path}")
|
||||
metadata.civitai = version_info
|
||||
|
||||
# Ensure tags are also updated if they're missing
|
||||
if (not metadata.tags or len(metadata.tags) == 0) and 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
metadata.tags = version_info['model']['tags']
|
||||
|
||||
# Also restore description if missing
|
||||
if (not metadata.modelDescription or metadata.modelDescription == "") and 'model' in version_info:
|
||||
if 'description' in version_info['model']:
|
||||
metadata.modelDescription = version_info['model']['description']
|
||||
|
||||
# Save the updated metadata
|
||||
await save_metadata(file_path, metadata)
|
||||
logger.debug(f"Updated metadata with civitai info for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error restoring civitai data from .civitai.info for {file_path}: {e}")
|
||||
|
||||
if metadata is None:
|
||||
metadata = await self._get_file_info(file_path)
|
||||
|
||||
model_data = metadata.to_dict()
|
||||
|
||||
await self._fetch_missing_metadata(file_path, model_data)
|
||||
rel_path = os.path.relpath(file_path, root_path)
|
||||
folder = os.path.dirname(rel_path)
|
||||
model_data['folder'] = folder.replace(os.path.sep, '/')
|
||||
|
||||
return model_data
|
||||
|
||||
async def _fetch_missing_metadata(self, file_path: str, model_data: Dict) -> None:
|
||||
"""Fetch missing description and tags from Civitai if needed"""
|
||||
try:
|
||||
if model_data.get('civitai_deleted', False):
|
||||
logger.debug(f"Skipping metadata fetch for {file_path}: marked as deleted on Civitai")
|
||||
return
|
||||
|
||||
needs_metadata_update = False
|
||||
model_id = None
|
||||
|
||||
if model_data.get('civitai'):
|
||||
model_id = model_data['civitai'].get('modelId')
|
||||
|
||||
if model_id:
|
||||
model_id = str(model_id)
|
||||
tags_missing = not model_data.get('tags') or len(model_data.get('tags', [])) == 0
|
||||
desc_missing = not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")
|
||||
needs_metadata_update = tags_missing or desc_missing
|
||||
|
||||
if needs_metadata_update and model_id:
|
||||
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
client = CivitaiClient()
|
||||
|
||||
model_metadata, status_code = await client.get_model_metadata(model_id)
|
||||
await client.close()
|
||||
|
||||
if status_code == 404:
|
||||
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
||||
model_data['civitai_deleted'] = True
|
||||
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(model_data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
elif model_metadata:
|
||||
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
||||
|
||||
if model_metadata.get('tags') and (not model_data.get('tags') or len(model_data.get('tags', [])) == 0):
|
||||
model_data['tags'] = model_metadata['tags']
|
||||
|
||||
if model_metadata.get('description') and (not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")):
|
||||
model_data['modelDescription'] = model_metadata['description']
|
||||
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(model_data, f, indent=2, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
||||
|
||||
async def _scan_directory(self, root_path: str) -> List[Dict]:
|
||||
"""Base implementation for directory scanning"""
|
||||
models = []
|
||||
original_root = root_path
|
||||
|
||||
async def scan_recursive(path: str, visited_paths: set):
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
logger.debug(f"Skipping already visited path: {path}")
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True):
|
||||
ext = os.path.splitext(entry.name)[1].lower()
|
||||
if ext in self.file_extensions:
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, models)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
await scan_recursive(root_path, set())
|
||||
return models
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, models_list: list):
|
||||
"""Process a single file and add to results list"""
|
||||
try:
|
||||
result = await self._process_model_file(file_path, root_path)
|
||||
if result:
|
||||
models_list.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
|
||||
async def move_model(self, source_path: str, target_path: str) -> bool:
|
||||
"""Move a model and its associated files to a new location"""
|
||||
try:
|
||||
source_path = source_path.replace(os.sep, '/')
|
||||
target_path = target_path.replace(os.sep, '/')
|
||||
|
||||
file_ext = os.path.splitext(source_path)[1]
|
||||
|
||||
if not file_ext or file_ext.lower() not in self.file_extensions:
|
||||
logger.error(f"Invalid file extension for model: {file_ext}")
|
||||
return False
|
||||
|
||||
base_name = os.path.splitext(os.path.basename(source_path))[0]
|
||||
source_dir = os.path.dirname(source_path)
|
||||
|
||||
os.makedirs(target_path, exist_ok=True)
|
||||
|
||||
target_file = os.path.join(target_path, f"{base_name}{file_ext}").replace(os.sep, '/')
|
||||
|
||||
real_source = os.path.realpath(source_path)
|
||||
real_target = os.path.realpath(target_file)
|
||||
|
||||
file_size = os.path.getsize(real_source)
|
||||
|
||||
# Get the appropriate file monitor through ServiceRegistry
|
||||
if self.model_type == "lora":
|
||||
monitor = await ServiceRegistry.get_lora_monitor()
|
||||
elif self.model_type == "checkpoint":
|
||||
monitor = await ServiceRegistry.get_checkpoint_monitor()
|
||||
else:
|
||||
monitor = None
|
||||
|
||||
if monitor:
|
||||
monitor.handler.add_ignore_path(
|
||||
real_source,
|
||||
file_size
|
||||
)
|
||||
monitor.handler.add_ignore_path(
|
||||
real_target,
|
||||
file_size
|
||||
)
|
||||
|
||||
shutil.move(real_source, real_target)
|
||||
|
||||
source_metadata = os.path.join(source_dir, f"{base_name}.metadata.json")
|
||||
metadata = None
|
||||
if os.path.exists(source_metadata):
|
||||
target_metadata = os.path.join(target_path, f"{base_name}.metadata.json")
|
||||
shutil.move(source_metadata, target_metadata)
|
||||
metadata = await self._update_metadata_paths(target_metadata, target_file)
|
||||
|
||||
for ext in PREVIEW_EXTENSIONS:
|
||||
source_preview = os.path.join(source_dir, f"{base_name}{ext}")
|
||||
if os.path.exists(source_preview):
|
||||
target_preview = os.path.join(target_path, f"{base_name}{ext}")
|
||||
shutil.move(source_preview, target_preview)
|
||||
break
|
||||
|
||||
await self.update_single_model_cache(source_path, target_file, metadata)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
async def _update_metadata_paths(self, metadata_path: str, model_path: str) -> Dict:
|
||||
"""Update file paths in metadata file"""
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
metadata['file_path'] = model_path.replace(os.sep, '/')
|
||||
|
||||
if 'preview_url' in metadata:
|
||||
preview_dir = os.path.dirname(model_path)
|
||||
preview_name = os.path.splitext(os.path.basename(metadata['preview_url']))[0]
|
||||
preview_ext = os.path.splitext(metadata['preview_url'])[1]
|
||||
new_preview_path = os.path.join(preview_dir, f"{preview_name}{preview_ext}")
|
||||
metadata['preview_url'] = new_preview_path.replace(os.sep, '/')
|
||||
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating metadata paths: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
async def update_single_model_cache(self, original_path: str, new_path: str, metadata: Dict) -> bool:
|
||||
"""Update cache after a model has been moved or modified"""
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
existing_item = next((item for item in cache.raw_data if item['file_path'] == original_path), None)
|
||||
if existing_item and 'tags' in existing_item:
|
||||
for tag in existing_item.get('tags', []):
|
||||
if tag in self._tags_count:
|
||||
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
|
||||
if self._tags_count[tag] == 0:
|
||||
del self._tags_count[tag]
|
||||
|
||||
self._hash_index.remove_by_path(original_path)
|
||||
|
||||
cache.raw_data = [
|
||||
item for item in cache.raw_data
|
||||
if item['file_path'] != original_path
|
||||
]
|
||||
|
||||
if metadata:
|
||||
if original_path == new_path:
|
||||
existing_folder = next((item['folder'] for item in cache.raw_data
|
||||
if item['file_path'] == original_path), None)
|
||||
if existing_folder:
|
||||
metadata['folder'] = existing_folder
|
||||
else:
|
||||
metadata['folder'] = self._calculate_folder(new_path)
|
||||
else:
|
||||
metadata['folder'] = self._calculate_folder(new_path)
|
||||
|
||||
cache.raw_data.append(metadata)
|
||||
|
||||
if 'sha256' in metadata:
|
||||
self._hash_index.add_entry(metadata['sha256'].lower(), new_path)
|
||||
|
||||
all_folders = set(item['folder'] for item in cache.raw_data)
|
||||
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||
|
||||
if 'tags' in metadata:
|
||||
for tag in metadata.get('tags', []):
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
await cache.resort()
|
||||
|
||||
return True
|
||||
|
||||
def has_hash(self, sha256: str) -> bool:
|
||||
"""Check if a model with given hash exists"""
|
||||
return self._hash_index.has_hash(sha256.lower())
|
||||
|
||||
def get_path_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get file path for a model by its hash"""
|
||||
return self._hash_index.get_path(sha256.lower())
|
||||
|
||||
def get_hash_by_path(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a model by its file path"""
|
||||
return self._hash_index.get_hash(file_path)
|
||||
|
||||
def get_hash_by_filename(self, filename: str) -> Optional[str]:
|
||||
"""Get hash for a model by its filename without path"""
|
||||
return self._hash_index.get_hash_by_filename(filename)
|
||||
|
||||
# TODO: Adjust this method to use metadata instead of finding the file
|
||||
def get_preview_url_by_hash(self, sha256: str) -> Optional[str]:
|
||||
"""Get preview static URL for a model by its hash"""
|
||||
file_path = self._hash_index.get_path(sha256.lower())
|
||||
if not file_path:
|
||||
return None
|
||||
|
||||
base_name = os.path.splitext(file_path)[0]
|
||||
|
||||
for ext in PREVIEW_EXTENSIONS:
|
||||
preview_path = f"{base_name}{ext}"
|
||||
if os.path.exists(preview_path):
|
||||
return config.get_preview_static_url(preview_path)
|
||||
|
||||
return None
|
||||
|
||||
async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get top tags sorted by count"""
|
||||
await self.get_cached_data()
|
||||
|
||||
sorted_tags = sorted(
|
||||
[{"tag": tag, "count": count} for tag, count in self._tags_count.items()],
|
||||
key=lambda x: x['count'],
|
||||
reverse=True
|
||||
)
|
||||
|
||||
return sorted_tags[:limit]
|
||||
|
||||
async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get base models sorted by frequency"""
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
base_model_counts = {}
|
||||
for model in cache.raw_data:
|
||||
if 'base_model' in model and model['base_model']:
|
||||
base_model = model['base_model']
|
||||
base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
|
||||
|
||||
sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()]
|
||||
sorted_models.sort(key=lambda x: x['count'], reverse=True)
|
||||
|
||||
return sorted_models[:limit]
|
||||
|
||||
async def get_model_info_by_name(self, name):
|
||||
"""Get model information by name"""
|
||||
try:
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
for model in cache.raw_data:
|
||||
if model.get("file_name") == name:
|
||||
return model
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model info by name: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
|
||||
"""Update preview URL in cache for a specific lora
|
||||
|
||||
Args:
|
||||
file_path: The file path of the lora to update
|
||||
preview_url: The new preview URL
|
||||
|
||||
Returns:
|
||||
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
|
||||
"""
|
||||
if self._cache is None:
|
||||
return False
|
||||
|
||||
return await self._cache.update_preview_url(file_path, preview_url)
|
||||
@@ -2,11 +2,12 @@ import os
|
||||
import logging
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from typing import List, Dict, Optional, Any, Tuple
|
||||
from ..config import config
|
||||
from .recipe_cache import RecipeCache
|
||||
from .service_registry import ServiceRegistry
|
||||
from .lora_scanner import LoraScanner
|
||||
from .civitai_client import CivitaiClient
|
||||
from ..utils.utils import fuzzy_match
|
||||
import sys
|
||||
|
||||
@@ -18,11 +19,22 @@ class RecipeScanner:
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls, lora_scanner: Optional[LoraScanner] = None):
|
||||
"""Get singleton instance of RecipeScanner"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
if not lora_scanner:
|
||||
# Get lora scanner from service registry if not provided
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
cls._instance = cls(lora_scanner)
|
||||
return cls._instance
|
||||
|
||||
def __new__(cls, lora_scanner: Optional[LoraScanner] = None):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._lora_scanner = lora_scanner
|
||||
cls._instance._civitai_client = CivitaiClient()
|
||||
cls._instance._civitai_client = None # Will be lazily initialized
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, lora_scanner: Optional[LoraScanner] = None):
|
||||
@@ -35,9 +47,148 @@ class RecipeScanner:
|
||||
if lora_scanner:
|
||||
self._lora_scanner = lora_scanner
|
||||
self._initialized = True
|
||||
|
||||
# Initialization will be scheduled by LoraManager
|
||||
|
||||
async def _get_civitai_client(self):
|
||||
"""Lazily initialize CivitaiClient from registry"""
|
||||
if self._civitai_client is None:
|
||||
self._civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
return self._civitai_client
|
||||
|
||||
async def initialize_in_background(self) -> None:
|
||||
"""Initialize cache in background using thread pool"""
|
||||
try:
|
||||
# Set initial empty cache to avoid None reference errors
|
||||
if self._cache is None:
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
|
||||
# Mark as initializing to prevent concurrent initializations
|
||||
self._is_initializing = True
|
||||
|
||||
try:
|
||||
# Start timer
|
||||
start_time = time.time()
|
||||
|
||||
# Use thread pool to execute CPU-intensive operations
|
||||
loop = asyncio.get_event_loop()
|
||||
cache = await loop.run_in_executor(
|
||||
None, # Use default thread pool
|
||||
self._initialize_recipe_cache_sync # Run synchronous version in thread
|
||||
)
|
||||
|
||||
# Calculate elapsed time and log it
|
||||
elapsed_time = time.time() - start_time
|
||||
recipe_count = len(cache.raw_data) if cache and hasattr(cache, 'raw_data') else 0
|
||||
logger.info(f"Recipe cache initialized in {elapsed_time:.2f} seconds. Found {recipe_count} recipes")
|
||||
finally:
|
||||
# Mark initialization as complete regardless of outcome
|
||||
self._is_initializing = False
|
||||
except Exception as e:
|
||||
logger.error(f"Recipe Scanner: Error initializing cache in background: {e}")
|
||||
|
||||
def _initialize_recipe_cache_sync(self):
|
||||
"""Synchronous version of recipe cache initialization for thread pool execution"""
|
||||
try:
|
||||
# Create a new event loop for this thread
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
# Create a synchronous method to bypass the async lock
|
||||
def sync_initialize_cache():
|
||||
# We need to implement scan_all_recipes logic synchronously here
|
||||
# instead of calling the async method to avoid event loop issues
|
||||
recipes = []
|
||||
recipes_dir = self.recipes_dir
|
||||
|
||||
if not recipes_dir or not os.path.exists(recipes_dir):
|
||||
logger.warning(f"Recipes directory not found: {recipes_dir}")
|
||||
return recipes
|
||||
|
||||
# Get all recipe JSON files in the recipes directory
|
||||
recipe_files = []
|
||||
for root, _, files in os.walk(recipes_dir):
|
||||
recipe_count = sum(1 for f in files if f.lower().endswith('.recipe.json'))
|
||||
if recipe_count > 0:
|
||||
for file in files:
|
||||
if file.lower().endswith('.recipe.json'):
|
||||
recipe_files.append(os.path.join(root, file))
|
||||
|
||||
# Process each recipe file
|
||||
for recipe_path in recipe_files:
|
||||
try:
|
||||
with open(recipe_path, 'r', encoding='utf-8') as f:
|
||||
recipe_data = json.load(f)
|
||||
|
||||
# Validate recipe data
|
||||
if not recipe_data or not isinstance(recipe_data, dict):
|
||||
logger.warning(f"Invalid recipe data in {recipe_path}")
|
||||
continue
|
||||
|
||||
# Ensure required fields exist
|
||||
required_fields = ['id', 'file_path', 'title']
|
||||
if not all(field in recipe_data for field in required_fields):
|
||||
logger.warning(f"Missing required fields in {recipe_path}")
|
||||
continue
|
||||
|
||||
# Ensure the image file exists
|
||||
image_path = recipe_data.get('file_path')
|
||||
if not os.path.exists(image_path):
|
||||
recipe_dir = os.path.dirname(recipe_path)
|
||||
image_filename = os.path.basename(image_path)
|
||||
alternative_path = os.path.join(recipe_dir, image_filename)
|
||||
if os.path.exists(alternative_path):
|
||||
recipe_data['file_path'] = alternative_path
|
||||
|
||||
# Ensure loras array exists
|
||||
if 'loras' not in recipe_data:
|
||||
recipe_data['loras'] = []
|
||||
|
||||
# Ensure gen_params exists
|
||||
if 'gen_params' not in recipe_data:
|
||||
recipe_data['gen_params'] = {}
|
||||
|
||||
# Add to list without async operations
|
||||
recipes.append(recipe_data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading recipe file {recipe_path}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
|
||||
# Update cache with the collected data
|
||||
self._cache.raw_data = recipes
|
||||
|
||||
# Create a simplified resort function that doesn't use await
|
||||
if hasattr(self._cache, "resort"):
|
||||
try:
|
||||
# Sort by name
|
||||
self._cache.sorted_by_name = sorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('title', '').lower()
|
||||
)
|
||||
|
||||
# Sort by date (modified or created)
|
||||
self._cache.sorted_by_date = sorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('modified', x.get('created_date', 0)),
|
||||
reverse=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sorting recipe cache: {e}")
|
||||
|
||||
return self._cache
|
||||
|
||||
# Run our sync initialization that avoids lock conflicts
|
||||
return sync_initialize_cache()
|
||||
except Exception as e:
|
||||
logger.error(f"Error in thread-based recipe cache initialization: {e}")
|
||||
return self._cache if hasattr(self, '_cache') else None
|
||||
finally:
|
||||
# Clean up the event loop
|
||||
loop.close()
|
||||
|
||||
@property
|
||||
def recipes_dir(self) -> str:
|
||||
"""Get path to recipes directory"""
|
||||
@@ -60,49 +211,48 @@ class RecipeScanner:
|
||||
if self._is_initializing and not force_refresh:
|
||||
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
|
||||
|
||||
# Try to acquire the lock with a timeout to prevent deadlocks
|
||||
try:
|
||||
async with self._initialization_lock:
|
||||
# Check again after acquiring the lock
|
||||
if self._cache is not None and not force_refresh:
|
||||
return self._cache
|
||||
|
||||
# Mark as initializing to prevent concurrent initializations
|
||||
self._is_initializing = True
|
||||
|
||||
try:
|
||||
# Remove dependency on lora scanner initialization
|
||||
# Scan for recipe data directly
|
||||
raw_data = await self.scan_all_recipes()
|
||||
# If force refresh is requested, initialize the cache directly
|
||||
if force_refresh:
|
||||
# Try to acquire the lock with a timeout to prevent deadlocks
|
||||
try:
|
||||
async with self._initialization_lock:
|
||||
# Mark as initializing to prevent concurrent initializations
|
||||
self._is_initializing = True
|
||||
|
||||
# Update cache
|
||||
self._cache = RecipeCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
try:
|
||||
# Scan for recipe data directly
|
||||
raw_data = await self.scan_all_recipes()
|
||||
|
||||
# Update cache
|
||||
self._cache = RecipeCache(
|
||||
raw_data=raw_data,
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
|
||||
# Resort cache
|
||||
await self._cache.resort()
|
||||
|
||||
return self._cache
|
||||
|
||||
# Resort cache
|
||||
await self._cache.resort()
|
||||
|
||||
return self._cache
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Recipe Manager: Error initializing cache: {e}", exc_info=True)
|
||||
# Create empty cache on error
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
return self._cache
|
||||
finally:
|
||||
# Mark initialization as complete
|
||||
self._is_initializing = False
|
||||
except Exception as e:
|
||||
logger.error(f"Recipe Manager: Error initializing cache: {e}", exc_info=True)
|
||||
# Create empty cache on error
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[]
|
||||
)
|
||||
return self._cache
|
||||
finally:
|
||||
# Mark initialization as complete
|
||||
self._is_initializing = False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in get_cached_data: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in get_cached_data: {e}")
|
||||
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
|
||||
# Return the cache (may be empty or partially initialized)
|
||||
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
|
||||
|
||||
async def scan_all_recipes(self) -> List[Dict]:
|
||||
"""Scan all recipe JSON files and return metadata"""
|
||||
@@ -191,6 +341,10 @@ class RecipeScanner:
|
||||
metadata_updated = False
|
||||
|
||||
for lora in recipe_data['loras']:
|
||||
# Skip deleted loras that were already marked
|
||||
if lora.get('isDeleted', False):
|
||||
continue
|
||||
|
||||
# Skip if already has complete information
|
||||
if 'hash' in lora and 'file_name' in lora and lora['file_name']:
|
||||
continue
|
||||
@@ -206,10 +360,17 @@ class RecipeScanner:
|
||||
metadata_updated = True
|
||||
else:
|
||||
# If not in cache, fetch from Civitai
|
||||
hash_from_civitai = await self._get_hash_from_civitai(model_version_id)
|
||||
if hash_from_civitai:
|
||||
lora['hash'] = hash_from_civitai
|
||||
metadata_updated = True
|
||||
result = await self._get_hash_from_civitai(model_version_id)
|
||||
if isinstance(result, tuple):
|
||||
hash_from_civitai, is_deleted = result
|
||||
if hash_from_civitai:
|
||||
lora['hash'] = hash_from_civitai
|
||||
metadata_updated = True
|
||||
elif is_deleted:
|
||||
# Mark the lora as deleted if it was not found on Civitai
|
||||
lora['isDeleted'] = True
|
||||
logger.warning(f"Marked lora with modelVersionId {model_version_id} as deleted")
|
||||
metadata_updated = True
|
||||
else:
|
||||
logger.debug(f"Could not get hash for modelVersionId {model_version_id}")
|
||||
|
||||
@@ -255,42 +416,32 @@ class RecipeScanner:
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
if not self._civitai_client:
|
||||
# Get CivitaiClient from ServiceRegistry
|
||||
civitai_client = await self._get_civitai_client()
|
||||
if not civitai_client:
|
||||
logger.error("Failed to get CivitaiClient from ServiceRegistry")
|
||||
return None
|
||||
|
||||
version_info = await self._civitai_client.get_model_version_info(model_version_id)
|
||||
version_info, error_msg = await civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if not version_info or not version_info.get('files'):
|
||||
logger.debug(f"No files found in version info for ID: {model_version_id}")
|
||||
return None
|
||||
|
||||
if not version_info:
|
||||
if error_msg and "model not found" in error_msg.lower():
|
||||
logger.warning(f"Model with version ID {model_version_id} was not found on Civitai - marking as deleted")
|
||||
return None, True # Return None hash and True for isDeleted flag
|
||||
else:
|
||||
logger.debug(f"Could not get hash for modelVersionId {model_version_id}: {error_msg}")
|
||||
return None, False # Return None hash but not marked as deleted
|
||||
|
||||
# Get hash from the first file
|
||||
for file_info in version_info.get('files', []):
|
||||
if file_info.get('hashes', {}).get('SHA256'):
|
||||
return file_info['hashes']['SHA256']
|
||||
return file_info['hashes']['SHA256'], False # Return hash with False for isDeleted flag
|
||||
|
||||
logger.debug(f"No SHA256 hash found in version info for ID: {model_version_id}")
|
||||
return None
|
||||
return None, False
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting hash from Civitai: {e}")
|
||||
return None
|
||||
|
||||
async def _get_model_version_name(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get model version name from Civitai API"""
|
||||
try:
|
||||
if not self._civitai_client:
|
||||
return None
|
||||
|
||||
version_info = await self._civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if version_info and 'name' in version_info:
|
||||
return version_info['name']
|
||||
|
||||
logger.debug(f"No version name found for modelVersionId {model_version_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model version name from Civitai: {e}")
|
||||
return None
|
||||
return None, False
|
||||
|
||||
async def _determine_base_model(self, loras: List[Dict]) -> Optional[str]:
|
||||
"""Determine the most common base model among LoRAs"""
|
||||
|
||||
124
py/services/service_registry.py
Normal file
124
py/services/service_registry.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional, Dict, Any, TypeVar, Type
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar('T') # Define a type variable for service types
|
||||
|
||||
class ServiceRegistry:
|
||||
"""Centralized registry for service singletons"""
|
||||
|
||||
_instance = None
|
||||
_services: Dict[str, Any] = {}
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
"""Get singleton instance of the registry"""
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
async def register_service(cls, service_name: str, service_instance: Any) -> None:
|
||||
"""Register a service instance with the registry"""
|
||||
registry = cls.get_instance()
|
||||
async with cls._lock:
|
||||
registry._services[service_name] = service_instance
|
||||
logger.debug(f"Registered service: {service_name}")
|
||||
|
||||
@classmethod
|
||||
async def get_service(cls, service_name: str) -> Any:
|
||||
"""Get a service instance by name"""
|
||||
registry = cls.get_instance()
|
||||
async with cls._lock:
|
||||
if service_name not in registry._services:
|
||||
logger.debug(f"Service {service_name} not found in registry")
|
||||
return None
|
||||
return registry._services[service_name]
|
||||
|
||||
# Convenience methods for common services
|
||||
@classmethod
|
||||
async def get_lora_scanner(cls):
|
||||
"""Get the LoraScanner instance"""
|
||||
from .lora_scanner import LoraScanner
|
||||
scanner = await cls.get_service("lora_scanner")
|
||||
if scanner is None:
|
||||
scanner = await LoraScanner.get_instance()
|
||||
await cls.register_service("lora_scanner", scanner)
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
async def get_checkpoint_scanner(cls):
|
||||
"""Get the CheckpointScanner instance"""
|
||||
from .checkpoint_scanner import CheckpointScanner
|
||||
scanner = await cls.get_service("checkpoint_scanner")
|
||||
if scanner is None:
|
||||
scanner = await CheckpointScanner.get_instance()
|
||||
await cls.register_service("checkpoint_scanner", scanner)
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
async def get_lora_monitor(cls):
|
||||
"""Get the LoraFileMonitor instance"""
|
||||
from .file_monitor import LoraFileMonitor
|
||||
monitor = await cls.get_service("lora_monitor")
|
||||
if monitor is None:
|
||||
monitor = await LoraFileMonitor.get_instance()
|
||||
await cls.register_service("lora_monitor", monitor)
|
||||
return monitor
|
||||
|
||||
@classmethod
|
||||
async def get_checkpoint_monitor(cls):
|
||||
"""Get the CheckpointFileMonitor instance"""
|
||||
from .file_monitor import CheckpointFileMonitor
|
||||
monitor = await cls.get_service("checkpoint_monitor")
|
||||
if monitor is None:
|
||||
monitor = await CheckpointFileMonitor.get_instance()
|
||||
await cls.register_service("checkpoint_monitor", monitor)
|
||||
return monitor
|
||||
|
||||
@classmethod
|
||||
async def get_civitai_client(cls):
|
||||
"""Get the CivitaiClient instance"""
|
||||
from .civitai_client import CivitaiClient
|
||||
client = await cls.get_service("civitai_client")
|
||||
if client is None:
|
||||
client = await CivitaiClient.get_instance()
|
||||
await cls.register_service("civitai_client", client)
|
||||
return client
|
||||
|
||||
@classmethod
|
||||
async def get_download_manager(cls):
|
||||
"""Get the DownloadManager instance"""
|
||||
from .download_manager import DownloadManager
|
||||
manager = await cls.get_service("download_manager")
|
||||
if manager is None:
|
||||
# We'll let DownloadManager.get_instance handle file_monitor parameter
|
||||
manager = await DownloadManager.get_instance()
|
||||
await cls.register_service("download_manager", manager)
|
||||
return manager
|
||||
|
||||
@classmethod
|
||||
async def get_recipe_scanner(cls):
|
||||
"""Get the RecipeScanner instance"""
|
||||
from .recipe_scanner import RecipeScanner
|
||||
scanner = await cls.get_service("recipe_scanner")
|
||||
if scanner is None:
|
||||
lora_scanner = await cls.get_lora_scanner()
|
||||
scanner = RecipeScanner(lora_scanner)
|
||||
await cls.register_service("recipe_scanner", scanner)
|
||||
return scanner
|
||||
|
||||
@classmethod
|
||||
async def get_websocket_manager(cls):
|
||||
"""Get the WebSocketManager instance"""
|
||||
from .websocket_manager import ws_manager
|
||||
manager = await cls.get_service("websocket_manager")
|
||||
if manager is None:
|
||||
# ws_manager is already a global instance in websocket_manager.py
|
||||
from .websocket_manager import ws_manager
|
||||
await cls.register_service("websocket_manager", ws_manager)
|
||||
manager = ws_manager
|
||||
return manager
|
||||
@@ -9,6 +9,8 @@ class WebSocketManager:
|
||||
|
||||
def __init__(self):
|
||||
self._websockets: Set[web.WebSocketResponse] = set()
|
||||
self._init_websockets: Set[web.WebSocketResponse] = set() # New set for initialization progress clients
|
||||
self._checkpoint_websockets: Set[web.WebSocketResponse] = set() # New set for checkpoint download progress
|
||||
|
||||
async def handle_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection"""
|
||||
@@ -23,6 +25,34 @@ class WebSocketManager:
|
||||
finally:
|
||||
self._websockets.discard(ws)
|
||||
return ws
|
||||
|
||||
async def handle_init_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection for initialization progress"""
|
||||
ws = web.WebSocketResponse()
|
||||
await ws.prepare(request)
|
||||
self._init_websockets.add(ws)
|
||||
|
||||
try:
|
||||
async for msg in ws:
|
||||
if msg.type == web.WSMsgType.ERROR:
|
||||
logger.error(f'Init WebSocket error: {ws.exception()}')
|
||||
finally:
|
||||
self._init_websockets.discard(ws)
|
||||
return ws
|
||||
|
||||
async def handle_checkpoint_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection for checkpoint download progress"""
|
||||
ws = web.WebSocketResponse()
|
||||
await ws.prepare(request)
|
||||
self._checkpoint_websockets.add(ws)
|
||||
|
||||
try:
|
||||
async for msg in ws:
|
||||
if msg.type == web.WSMsgType.ERROR:
|
||||
logger.error(f'Checkpoint WebSocket error: {ws.exception()}')
|
||||
finally:
|
||||
self._checkpoint_websockets.discard(ws)
|
||||
return ws
|
||||
|
||||
async def broadcast(self, data: Dict):
|
||||
"""Broadcast message to all connected clients"""
|
||||
@@ -34,10 +64,48 @@ class WebSocketManager:
|
||||
await ws.send_json(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending progress: {e}")
|
||||
|
||||
async def broadcast_init_progress(self, data: Dict):
|
||||
"""Broadcast initialization progress to connected clients"""
|
||||
if not self._init_websockets:
|
||||
return
|
||||
|
||||
# Ensure data has all required fields
|
||||
if 'stage' not in data:
|
||||
data['stage'] = 'processing'
|
||||
if 'progress' not in data:
|
||||
data['progress'] = 0
|
||||
if 'details' not in data:
|
||||
data['details'] = 'Processing...'
|
||||
|
||||
for ws in self._init_websockets:
|
||||
try:
|
||||
await ws.send_json(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending initialization progress: {e}")
|
||||
|
||||
async def broadcast_checkpoint_progress(self, data: Dict):
|
||||
"""Broadcast checkpoint download progress to connected clients"""
|
||||
if not self._checkpoint_websockets:
|
||||
return
|
||||
|
||||
for ws in self._checkpoint_websockets:
|
||||
try:
|
||||
await ws.send_json(data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending checkpoint progress: {e}")
|
||||
|
||||
def get_connected_clients_count(self) -> int:
|
||||
"""Get number of connected clients"""
|
||||
return len(self._websockets)
|
||||
|
||||
def get_init_clients_count(self) -> int:
|
||||
"""Get number of initialization progress clients"""
|
||||
return len(self._init_websockets)
|
||||
|
||||
def get_checkpoint_clients_count(self) -> int:
|
||||
"""Get number of checkpoint progress clients"""
|
||||
return len(self._checkpoint_websockets)
|
||||
|
||||
# Global instance
|
||||
ws_manager = WebSocketManager()
|
||||
ws_manager = WebSocketManager()
|
||||
@@ -5,4 +5,21 @@ NSFW_LEVELS = {
|
||||
"X": 8,
|
||||
"XXX": 16,
|
||||
"Blocked": 32, # Probably not actually visible through the API without being logged in on model owner account?
|
||||
}
|
||||
}
|
||||
|
||||
# preview extensions
|
||||
PREVIEW_EXTENSIONS = [
|
||||
'.webp',
|
||||
'.preview.webp',
|
||||
'.preview.png',
|
||||
'.preview.jpeg',
|
||||
'.preview.jpg',
|
||||
'.preview.mp4',
|
||||
'.png',
|
||||
'.jpeg',
|
||||
'.jpg',
|
||||
'.mp4'
|
||||
]
|
||||
|
||||
# Card preview image width
|
||||
CARD_PREVIEW_WIDTH = 480
|
||||
@@ -203,7 +203,7 @@ class ExifUtils:
|
||||
return user_comment[:recipe_marker_index] + user_comment[next_line_index:]
|
||||
|
||||
@staticmethod
|
||||
def optimize_image(image_data, target_width=250, format='webp', quality=85, preserve_metadata=True):
|
||||
def optimize_image(image_data, target_width=250, format='webp', quality=85, preserve_metadata=False):
|
||||
"""
|
||||
Optimize an image by resizing and converting to WebP format
|
||||
|
||||
@@ -218,98 +218,144 @@ class ExifUtils:
|
||||
Tuple of (optimized_image_data, extension)
|
||||
"""
|
||||
try:
|
||||
# Extract metadata if needed
|
||||
# First validate the image data is usable
|
||||
img = None
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
# It's a file path - validate file
|
||||
try:
|
||||
with Image.open(image_data) as test_img:
|
||||
# Verify the image can be fully loaded by accessing its size
|
||||
width, height = test_img.size
|
||||
# If we got here, the image is valid
|
||||
img = Image.open(image_data)
|
||||
except (IOError, OSError) as e:
|
||||
logger.error(f"Invalid or corrupt image file: {image_data}: {e}")
|
||||
raise ValueError(f"Cannot process corrupt image: {e}")
|
||||
else:
|
||||
# It's binary data - validate data
|
||||
try:
|
||||
with BytesIO(image_data) as temp_buf:
|
||||
test_img = Image.open(temp_buf)
|
||||
# Verify the image can be fully loaded
|
||||
width, height = test_img.size
|
||||
# If successful, reopen for processing
|
||||
img = Image.open(BytesIO(image_data))
|
||||
except Exception as e:
|
||||
logger.error(f"Invalid binary image data: {e}")
|
||||
raise ValueError(f"Cannot process corrupt image data: {e}")
|
||||
|
||||
# Extract metadata if needed and valid
|
||||
metadata = None
|
||||
if preserve_metadata:
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
# It's a file path
|
||||
metadata = ExifUtils.extract_image_metadata(image_data)
|
||||
img = Image.open(image_data)
|
||||
else:
|
||||
# It's binary data
|
||||
temp_img = BytesIO(image_data)
|
||||
img = Image.open(temp_img)
|
||||
# Save to a temporary file to extract metadata
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(image_data)
|
||||
metadata = ExifUtils.extract_image_metadata(temp_path)
|
||||
os.unlink(temp_path)
|
||||
else:
|
||||
# Just open the image without extracting metadata
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
img = Image.open(image_data)
|
||||
else:
|
||||
img = Image.open(BytesIO(image_data))
|
||||
|
||||
try:
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
# For file path, extract directly
|
||||
metadata = ExifUtils.extract_image_metadata(image_data)
|
||||
else:
|
||||
# For binary data, save to temp file first
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(image_data)
|
||||
try:
|
||||
metadata = ExifUtils.extract_image_metadata(temp_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract metadata from temp file: {e}")
|
||||
finally:
|
||||
# Clean up temp file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract metadata, continuing without it: {e}")
|
||||
# Continue without metadata
|
||||
|
||||
# Calculate new height to maintain aspect ratio
|
||||
width, height = img.size
|
||||
new_height = int(height * (target_width / width))
|
||||
|
||||
# Resize the image
|
||||
resized_img = img.resize((target_width, new_height), Image.LANCZOS)
|
||||
# Resize the image with error handling
|
||||
try:
|
||||
resized_img = img.resize((target_width, new_height), Image.LANCZOS)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to resize image: {e}")
|
||||
# Return original image if resize fails
|
||||
return image_data, '.jpg' if not isinstance(image_data, str) else os.path.splitext(image_data)[1]
|
||||
|
||||
# Save to BytesIO in the specified format
|
||||
output = BytesIO()
|
||||
|
||||
# WebP format
|
||||
# Set format and extension
|
||||
if format.lower() == 'webp':
|
||||
resized_img.save(output, format='WEBP', quality=quality)
|
||||
extension = '.webp'
|
||||
# JPEG format
|
||||
save_format, extension = 'WEBP', '.webp'
|
||||
elif format.lower() in ('jpg', 'jpeg'):
|
||||
resized_img.save(output, format='JPEG', quality=quality)
|
||||
extension = '.jpg'
|
||||
# PNG format
|
||||
save_format, extension = 'JPEG', '.jpg'
|
||||
elif format.lower() == 'png':
|
||||
resized_img.save(output, format='PNG', optimize=True)
|
||||
extension = '.png'
|
||||
save_format, extension = 'PNG', '.png'
|
||||
else:
|
||||
# Default to WebP
|
||||
resized_img.save(output, format='WEBP', quality=quality)
|
||||
extension = '.webp'
|
||||
save_format, extension = 'WEBP', '.webp'
|
||||
|
||||
# Save with error handling
|
||||
try:
|
||||
if save_format == 'PNG':
|
||||
resized_img.save(output, format=save_format, optimize=True)
|
||||
else:
|
||||
resized_img.save(output, format=save_format, quality=quality)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save optimized image: {e}")
|
||||
# Return original image if save fails
|
||||
return image_data, '.jpg' if not isinstance(image_data, str) else os.path.splitext(image_data)[1]
|
||||
|
||||
# Get the optimized image data
|
||||
optimized_data = output.getvalue()
|
||||
|
||||
# If we need to preserve metadata, write it to a temporary file
|
||||
# Handle metadata preservation if requested and available
|
||||
if preserve_metadata and metadata:
|
||||
# For WebP format, we'll directly save with metadata
|
||||
if format.lower() == 'webp':
|
||||
# Create a new BytesIO with metadata
|
||||
output_with_metadata = BytesIO()
|
||||
|
||||
# Create EXIF data with user comment
|
||||
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
|
||||
# Save with metadata
|
||||
resized_img.save(output_with_metadata, format='WEBP', exif=exif_bytes, quality=quality)
|
||||
optimized_data = output_with_metadata.getvalue()
|
||||
else:
|
||||
# For other formats, use the temporary file approach
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(optimized_data)
|
||||
|
||||
# Add the metadata back
|
||||
ExifUtils.update_image_metadata(temp_path, metadata)
|
||||
|
||||
# Read the file with metadata
|
||||
with open(temp_path, 'rb') as f:
|
||||
optimized_data = f.read()
|
||||
|
||||
# Clean up
|
||||
os.unlink(temp_path)
|
||||
try:
|
||||
if save_format == 'WEBP':
|
||||
# For WebP format, directly save with metadata
|
||||
try:
|
||||
output_with_metadata = BytesIO()
|
||||
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
resized_img.save(output_with_metadata, format='WEBP', exif=exif_bytes, quality=quality)
|
||||
optimized_data = output_with_metadata.getvalue()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add metadata to WebP, continuing without it: {e}")
|
||||
else:
|
||||
# For other formats, use temporary file
|
||||
import tempfile
|
||||
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(optimized_data)
|
||||
|
||||
try:
|
||||
# Add metadata
|
||||
ExifUtils.update_image_metadata(temp_path, metadata)
|
||||
# Read back the file
|
||||
with open(temp_path, 'rb') as f:
|
||||
optimized_data = f.read()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add metadata to image, continuing without it: {e}")
|
||||
finally:
|
||||
# Clean up temp file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to preserve metadata: {e}, continuing with unmodified output")
|
||||
|
||||
return optimized_data, extension
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing image: {e}", exc_info=True)
|
||||
# Return original data if optimization fails
|
||||
# Return original data if optimization completely fails
|
||||
if isinstance(image_data, str) and os.path.exists(image_data):
|
||||
with open(image_data, 'rb') as f:
|
||||
return f.read(), os.path.splitext(image_data)[1]
|
||||
try:
|
||||
with open(image_data, 'rb') as f:
|
||||
return f.read(), os.path.splitext(image_data)[1]
|
||||
except Exception:
|
||||
return image_data, '.jpg' # Last resort fallback
|
||||
return image_data, '.jpg'
|
||||
@@ -2,12 +2,14 @@ import logging
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Dict, Optional
|
||||
import time
|
||||
from typing import Dict, Optional, Type
|
||||
|
||||
from .model_utils import determine_base_model
|
||||
|
||||
from .lora_metadata import extract_lora_metadata
|
||||
from .models import LoraMetadata
|
||||
from .lora_metadata import extract_lora_metadata, extract_checkpoint_metadata
|
||||
from .models import BaseModelMetadata, LoraMetadata, CheckpointMetadata
|
||||
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
||||
from .exif_utils import ExifUtils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -15,35 +17,56 @@ async def calculate_sha256(file_path: str) -> str:
|
||||
"""Calculate SHA256 hash of a file"""
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(file_path, "rb") as f:
|
||||
for byte_block in iter(lambda: f.read(4096), b""):
|
||||
for byte_block in iter(lambda: f.read(128 * 1024), b""):
|
||||
sha256_hash.update(byte_block)
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
def find_preview_file(base_name: str, dir_path: str) -> str:
|
||||
"""Find preview file for given base name in directory"""
|
||||
preview_patterns = [
|
||||
f"{base_name}.preview.png",
|
||||
f"{base_name}.preview.jpg",
|
||||
f"{base_name}.preview.jpeg",
|
||||
f"{base_name}.preview.mp4",
|
||||
f"{base_name}.png",
|
||||
f"{base_name}.jpg",
|
||||
f"{base_name}.jpeg",
|
||||
f"{base_name}.mp4"
|
||||
]
|
||||
|
||||
for pattern in preview_patterns:
|
||||
full_pattern = os.path.join(dir_path, pattern)
|
||||
for ext in PREVIEW_EXTENSIONS:
|
||||
full_pattern = os.path.join(dir_path, f"{base_name}{ext}")
|
||||
if os.path.exists(full_pattern):
|
||||
# Check if this is an image and not already webp
|
||||
if ext.lower().endswith(('.jpg', '.jpeg', '.png')) and not ext.lower().endswith('.webp'):
|
||||
try:
|
||||
# Optimize the image to webp format
|
||||
webp_path = os.path.join(dir_path, f"{base_name}.webp")
|
||||
|
||||
# Use ExifUtils to optimize the image
|
||||
with open(full_pattern, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=image_data,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False # Changed from True to False
|
||||
)
|
||||
|
||||
# Save the optimized webp file
|
||||
with open(webp_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
|
||||
logger.debug(f"Optimized preview image from {full_pattern} to {webp_path}")
|
||||
return webp_path.replace(os.sep, "/")
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing preview image {full_pattern}: {e}")
|
||||
# Fall back to original file if optimization fails
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
|
||||
# Return the original path for webp images or non-image files
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
|
||||
return ""
|
||||
|
||||
def normalize_path(path: str) -> str:
|
||||
"""Normalize file path to use forward slashes"""
|
||||
return path.replace(os.sep, "/") if path else path
|
||||
|
||||
async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
|
||||
"""Get basic file information as LoraMetadata object"""
|
||||
async def get_file_info(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||
"""Get basic file information as a model metadata object"""
|
||||
# First check if file actually exists and resolve symlinks
|
||||
try:
|
||||
real_path = os.path.realpath(file_path)
|
||||
@@ -70,31 +93,67 @@ async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
|
||||
logger.debug(f"Using SHA256 from .json file for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading .json file for {file_path}: {e}")
|
||||
|
||||
# If SHA256 is still not found, check for a .sha256 file
|
||||
if sha256 is None:
|
||||
sha256_file = f"{os.path.splitext(file_path)[0]}.sha256"
|
||||
if os.path.exists(sha256_file):
|
||||
try:
|
||||
with open(sha256_file, 'r', encoding='utf-8') as f:
|
||||
sha256 = f.read().strip().lower()
|
||||
logger.debug(f"Using SHA256 from .sha256 file for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading .sha256 file for {file_path}: {e}")
|
||||
|
||||
try:
|
||||
# If we didn't get SHA256 from the .json file, calculate it
|
||||
if not sha256:
|
||||
start_time = time.time()
|
||||
sha256 = await calculate_sha256(real_path)
|
||||
logger.debug(f"Calculated SHA256 for {file_path} in {time.time() - start_time:.2f} seconds")
|
||||
|
||||
# Create default metadata based on model class
|
||||
if model_class == CheckpointMetadata:
|
||||
metadata = CheckpointMetadata(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=os.path.getmtime(real_path),
|
||||
sha256=sha256,
|
||||
base_model="Unknown", # Will be updated later
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription="",
|
||||
model_type="checkpoint"
|
||||
)
|
||||
|
||||
metadata = LoraMetadata(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=os.path.getmtime(real_path),
|
||||
sha256=sha256,
|
||||
base_model="Unknown", # Will be updated later
|
||||
usage_tips="",
|
||||
notes="",
|
||||
from_civitai=True,
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription=""
|
||||
)
|
||||
# Extract checkpoint-specific metadata
|
||||
# model_info = await extract_checkpoint_metadata(real_path)
|
||||
# metadata.base_model = model_info['base_model']
|
||||
# if 'model_type' in model_info:
|
||||
# metadata.model_type = model_info['model_type']
|
||||
|
||||
else: # Default to LoraMetadata
|
||||
metadata = LoraMetadata(
|
||||
file_name=base_name,
|
||||
model_name=base_name,
|
||||
file_path=normalize_path(file_path),
|
||||
size=os.path.getsize(real_path),
|
||||
modified=os.path.getmtime(real_path),
|
||||
sha256=sha256,
|
||||
base_model="Unknown", # Will be updated later
|
||||
usage_tips="{}",
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription=""
|
||||
)
|
||||
|
||||
# Extract lora-specific metadata
|
||||
model_info = await extract_lora_metadata(real_path)
|
||||
metadata.base_model = model_info['base_model']
|
||||
|
||||
# create metadata file
|
||||
base_model_info = await extract_lora_metadata(real_path)
|
||||
metadata.base_model = base_model_info['base_model']
|
||||
# Save metadata to file
|
||||
await save_metadata(file_path, metadata)
|
||||
|
||||
return metadata
|
||||
@@ -102,7 +161,7 @@ async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
|
||||
logger.error(f"Error getting file info for {file_path}: {e}")
|
||||
return None
|
||||
|
||||
async def save_metadata(file_path: str, metadata: LoraMetadata) -> None:
|
||||
async def save_metadata(file_path: str, metadata: BaseModelMetadata) -> None:
|
||||
"""Save metadata to .metadata.json file"""
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
try:
|
||||
@@ -115,7 +174,7 @@ async def save_metadata(file_path: str, metadata: LoraMetadata) -> None:
|
||||
except Exception as e:
|
||||
print(f"Error saving metadata to {metadata_path}: {str(e)}")
|
||||
|
||||
async def load_metadata(file_path: str) -> Optional[LoraMetadata]:
|
||||
async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||
"""Load metadata from .metadata.json file"""
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
try:
|
||||
@@ -138,6 +197,7 @@ async def load_metadata(file_path: str) -> Optional[LoraMetadata]:
|
||||
data['file_path'] = normalize_path(file_path)
|
||||
needs_update = True
|
||||
|
||||
# TODO: optimize preview image to webp format if not already done
|
||||
preview_url = data.get('preview_url', '')
|
||||
if not preview_url or not os.path.exists(preview_url):
|
||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
@@ -162,12 +222,22 @@ async def load_metadata(file_path: str) -> Optional[LoraMetadata]:
|
||||
if 'modelDescription' not in data:
|
||||
data['modelDescription'] = ""
|
||||
needs_update = True
|
||||
|
||||
# For checkpoint metadata
|
||||
if model_class == CheckpointMetadata and 'model_type' not in data:
|
||||
data['model_type'] = "checkpoint"
|
||||
needs_update = True
|
||||
|
||||
# For lora metadata
|
||||
if model_class == LoraMetadata and 'usage_tips' not in data:
|
||||
data['usage_tips'] = "{}"
|
||||
needs_update = True
|
||||
|
||||
if needs_update:
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
||||
|
||||
return LoraMetadata.from_dict(data)
|
||||
return model_class.from_dict(data)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading metadata from {metadata_path}: {str(e)}")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from safetensors import safe_open
|
||||
from typing import Dict
|
||||
from .model_utils import determine_base_model
|
||||
import os
|
||||
|
||||
async def extract_lora_metadata(file_path: str) -> Dict:
|
||||
"""Extract essential metadata from safetensors file"""
|
||||
@@ -13,4 +14,67 @@ async def extract_lora_metadata(file_path: str) -> Dict:
|
||||
return {"base_model": base_model}
|
||||
except Exception as e:
|
||||
print(f"Error reading metadata from {file_path}: {str(e)}")
|
||||
return {"base_model": "Unknown"}
|
||||
return {"base_model": "Unknown"}
|
||||
|
||||
async def extract_checkpoint_metadata(file_path: str) -> dict:
|
||||
"""Extract metadata from a checkpoint file to determine model type and base model"""
|
||||
try:
|
||||
# Analyze filename for clues about the model
|
||||
filename = os.path.basename(file_path).lower()
|
||||
|
||||
model_info = {
|
||||
'base_model': 'Unknown',
|
||||
'model_type': 'checkpoint'
|
||||
}
|
||||
|
||||
# Detect base model from filename
|
||||
if 'xl' in filename or 'sdxl' in filename:
|
||||
model_info['base_model'] = 'SDXL'
|
||||
elif 'sd3' in filename:
|
||||
model_info['base_model'] = 'SD3'
|
||||
elif 'sd2' in filename or 'v2' in filename:
|
||||
model_info['base_model'] = 'SD2.x'
|
||||
elif 'sd1' in filename or 'v1' in filename:
|
||||
model_info['base_model'] = 'SD1.5'
|
||||
|
||||
# Detect model type from filename
|
||||
if 'inpaint' in filename:
|
||||
model_info['model_type'] = 'inpainting'
|
||||
elif 'anime' in filename:
|
||||
model_info['model_type'] = 'anime'
|
||||
elif 'realistic' in filename:
|
||||
model_info['model_type'] = 'realistic'
|
||||
|
||||
# Try to peek at the safetensors file structure if available
|
||||
if file_path.endswith('.safetensors'):
|
||||
import json
|
||||
import struct
|
||||
|
||||
with open(file_path, 'rb') as f:
|
||||
header_size = struct.unpack('<Q', f.read(8))[0]
|
||||
header_json = f.read(header_size)
|
||||
header = json.loads(header_json)
|
||||
|
||||
# Look for specific keys to identify model type
|
||||
metadata = header.get('__metadata__', {})
|
||||
if metadata:
|
||||
# Try to determine if it's SDXL
|
||||
if any(key.startswith('conditioner.embedders.1') for key in header):
|
||||
model_info['base_model'] = 'SDXL'
|
||||
|
||||
# Look for model type info
|
||||
if metadata.get('modelspec.architecture') == 'SD-XL':
|
||||
model_info['base_model'] = 'SDXL'
|
||||
elif metadata.get('modelspec.architecture') == 'SD-3':
|
||||
model_info['base_model'] = 'SD3'
|
||||
|
||||
# Check for specific use case
|
||||
if metadata.get('modelspec.purpose') == 'inpainting':
|
||||
model_info['model_type'] = 'inpainting'
|
||||
|
||||
return model_info
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting checkpoint metadata for {file_path}: {e}")
|
||||
# Return default values
|
||||
return {'base_model': 'Unknown', 'model_type': 'checkpoint'}
|
||||
@@ -5,23 +5,23 @@ import os
|
||||
from .model_utils import determine_base_model
|
||||
|
||||
@dataclass
|
||||
class LoraMetadata:
|
||||
"""Represents the metadata structure for a Lora model"""
|
||||
file_name: str # The filename without extension of the lora
|
||||
model_name: str # The lora's name defined by the creator, initially same as file_name
|
||||
file_path: str # Full path to the safetensors file
|
||||
class BaseModelMetadata:
|
||||
"""Base class for all model metadata structures"""
|
||||
file_name: str # The filename without extension
|
||||
model_name: str # The model's name defined by the creator
|
||||
file_path: str # Full path to the model file
|
||||
size: int # File size in bytes
|
||||
modified: float # Last modified timestamp
|
||||
sha256: str # SHA256 hash of the file
|
||||
base_model: str # Base model (SD1.5/SD2.1/SDXL/etc.)
|
||||
base_model: str # Base model type (SD1.5/SD2.1/SDXL/etc.)
|
||||
preview_url: str # Preview image URL
|
||||
preview_nsfw_level: int = 0 # NSFW level of the preview image
|
||||
usage_tips: str = "{}" # Usage tips for the model, json string
|
||||
notes: str = "" # Additional notes
|
||||
from_civitai: bool = True # Whether the lora is from Civitai
|
||||
from_civitai: bool = True # Whether from Civitai
|
||||
civitai: Optional[Dict] = None # Civitai API data if available
|
||||
tags: List[str] = None # Model tags
|
||||
modelDescription: str = "" # Full model description
|
||||
civitai_deleted: bool = False # Whether deleted from Civitai
|
||||
|
||||
def __post_init__(self):
|
||||
# Initialize empty lists to avoid mutable default parameter issue
|
||||
@@ -29,32 +29,11 @@ class LoraMetadata:
|
||||
self.tags = []
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from dictionary"""
|
||||
# Create a copy of the data to avoid modifying the input
|
||||
def from_dict(cls, data: Dict) -> 'BaseModelMetadata':
|
||||
"""Create instance from dictionary"""
|
||||
data_copy = data.copy()
|
||||
return cls(**data_copy)
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0, # Will be updated after preview download, it is decided by the nsfw level of the preview image
|
||||
from_civitai=True,
|
||||
civitai=version_info
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return asdict(self)
|
||||
@@ -76,30 +55,76 @@ class LoraMetadata:
|
||||
self.file_path = file_path.replace(os.sep, '/')
|
||||
|
||||
@dataclass
|
||||
class CheckpointMetadata:
|
||||
"""Represents the metadata structure for a Checkpoint model"""
|
||||
file_name: str # The filename without extension
|
||||
model_name: str # The checkpoint's name defined by the creator
|
||||
file_path: str # Full path to the model file
|
||||
size: int # File size in bytes
|
||||
modified: float # Last modified timestamp
|
||||
sha256: str # SHA256 hash of the file
|
||||
base_model: str # Base model type (SD1.5/SD2.1/SDXL/etc.)
|
||||
preview_url: str # Preview image URL
|
||||
preview_nsfw_level: int = 0 # NSFW level of the preview image
|
||||
model_type: str = "checkpoint" # Model type (checkpoint, inpainting, etc.)
|
||||
notes: str = "" # Additional notes
|
||||
from_civitai: bool = True # Whether from Civitai
|
||||
civitai: Optional[Dict] = None # Civitai API data if available
|
||||
tags: List[str] = None # Model tags
|
||||
modelDescription: str = "" # Full model description
|
||||
|
||||
# Additional checkpoint-specific fields
|
||||
resolution: Optional[str] = None # Native resolution (e.g., 512x512, 1024x1024)
|
||||
vae_included: bool = False # Whether VAE is included in the checkpoint
|
||||
architecture: str = "" # Model architecture (if known)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.tags is None:
|
||||
self.tags = []
|
||||
class LoraMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for a Lora model"""
|
||||
usage_tips: str = "{}" # Usage tips for the model, json string
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0, # Will be updated after preview download
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
|
||||
@dataclass
|
||||
class CheckpointMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for a Checkpoint model"""
|
||||
model_type: str = "checkpoint" # Model type (checkpoint, inpainting, etc.)
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'CheckpointMetadata':
|
||||
"""Create CheckpointMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
model_type = version_info.get('type', 'checkpoint')
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_nsfw_level=0,
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
model_type=model_type,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
|
||||
|
||||
503
py/utils/routes_common.py
Normal file
503
py/utils/routes_common.py
Normal file
@@ -0,0 +1,503 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, List, Callable, Awaitable
|
||||
from aiohttp import web
|
||||
|
||||
from .model_utils import determine_base_model
|
||||
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
||||
from ..config import config
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..services.download_manager import DownloadManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModelRouteUtils:
|
||||
"""Shared utilities for model routes (LoRAs, Checkpoints, etc.)"""
|
||||
|
||||
@staticmethod
|
||||
async def load_local_metadata(metadata_path: str) -> Dict:
|
||||
"""Load local metadata file"""
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metadata from {metadata_path}: {e}")
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
async def handle_not_found_on_civitai(metadata_path: str, local_metadata: Dict) -> None:
|
||||
"""Handle case when model is not found on CivitAI"""
|
||||
local_metadata['from_civitai'] = False
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
@staticmethod
|
||||
async def update_model_metadata(metadata_path: str, local_metadata: Dict,
|
||||
civitai_metadata: Dict, client: CivitaiClient) -> None:
|
||||
"""Update local metadata with CivitAI data"""
|
||||
local_metadata['civitai'] = civitai_metadata
|
||||
|
||||
# Update model name if available
|
||||
if 'model' in civitai_metadata:
|
||||
if civitai_metadata.get('model', {}).get('name'):
|
||||
local_metadata['model_name'] = civitai_metadata['model']['name']
|
||||
|
||||
# Fetch additional model metadata (description and tags) if we have model ID
|
||||
model_id = civitai_metadata['modelId']
|
||||
if model_id:
|
||||
model_metadata, _ = await client.get_model_metadata(str(model_id))
|
||||
if model_metadata:
|
||||
local_metadata['modelDescription'] = model_metadata.get('description', '')
|
||||
local_metadata['tags'] = model_metadata.get('tags', [])
|
||||
|
||||
# Update base model
|
||||
local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
|
||||
|
||||
# Update preview if needed
|
||||
if not local_metadata.get('preview_url') or not os.path.exists(local_metadata['preview_url']):
|
||||
first_preview = next((img for img in civitai_metadata.get('images', [])), None)
|
||||
if first_preview:
|
||||
# Determine if content is video or image
|
||||
is_video = first_preview['type'] == 'video'
|
||||
|
||||
if is_video:
|
||||
# For videos use .mp4 extension
|
||||
preview_ext = '.mp4'
|
||||
else:
|
||||
# For images use .webp extension
|
||||
preview_ext = '.webp'
|
||||
|
||||
base_name = os.path.splitext(os.path.splitext(os.path.basename(metadata_path))[0])[0]
|
||||
preview_filename = base_name + preview_ext
|
||||
preview_path = os.path.join(os.path.dirname(metadata_path), preview_filename)
|
||||
|
||||
if is_video:
|
||||
# Download video as is
|
||||
if await client.download_preview_image(first_preview['url'], preview_path):
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
else:
|
||||
# For images, download and then optimize to WebP
|
||||
temp_path = preview_path + ".temp"
|
||||
if await client.download_preview_image(first_preview['url'], temp_path):
|
||||
try:
|
||||
# Read the downloaded image
|
||||
with open(temp_path, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
# Optimize and convert to WebP
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=image_data,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
|
||||
# Save the optimized WebP image
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
|
||||
# Update metadata
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
|
||||
# Remove the temporary file
|
||||
if os.path.exists(temp_path):
|
||||
os.remove(temp_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing preview image: {e}")
|
||||
# If optimization fails, try to use the downloaded image directly
|
||||
if os.path.exists(temp_path):
|
||||
os.rename(temp_path, preview_path)
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
|
||||
# Save updated metadata
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
||||
|
||||
@staticmethod
|
||||
async def fetch_and_update_model(
|
||||
sha256: str,
|
||||
file_path: str,
|
||||
model_data: dict,
|
||||
update_cache_func: Callable[[str, str, Dict], Awaitable[bool]]
|
||||
) -> bool:
|
||||
"""Fetch and update metadata for a single model
|
||||
|
||||
Args:
|
||||
sha256: SHA256 hash of the model file
|
||||
file_path: Path to the model file
|
||||
model_data: The model object in cache to update
|
||||
update_cache_func: Function to update the cache with new metadata
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Check if model metadata exists
|
||||
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Fetch metadata from Civitai
|
||||
civitai_metadata = await client.get_model_by_hash(sha256)
|
||||
if not civitai_metadata:
|
||||
# Mark as not from CivitAI if not found
|
||||
local_metadata['from_civitai'] = False
|
||||
model_data['from_civitai'] = False
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
||||
return False
|
||||
|
||||
# Update metadata
|
||||
await ModelRouteUtils.update_model_metadata(
|
||||
metadata_path,
|
||||
local_metadata,
|
||||
civitai_metadata,
|
||||
client
|
||||
)
|
||||
|
||||
# Update cache object directly
|
||||
model_data.update({
|
||||
'model_name': local_metadata.get('model_name'),
|
||||
'preview_url': local_metadata.get('preview_url'),
|
||||
'from_civitai': True,
|
||||
'civitai': civitai_metadata
|
||||
})
|
||||
|
||||
# Update cache using the provided function
|
||||
await update_cache_func(file_path, file_path, local_metadata)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivitAI data: {e}")
|
||||
return False
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
@staticmethod
|
||||
def filter_civitai_data(data: Dict) -> Dict:
|
||||
"""Filter relevant fields from CivitAI data"""
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
fields = [
|
||||
"id", "modelId", "name", "createdAt", "updatedAt",
|
||||
"publishedAt", "trainedWords", "baseModel", "description",
|
||||
"model", "images"
|
||||
]
|
||||
return {k: data[k] for k in fields if k in data}
|
||||
|
||||
@staticmethod
|
||||
async def delete_model_files(target_dir: str, file_name: str, file_monitor=None) -> List[str]:
|
||||
"""Delete model and associated files
|
||||
|
||||
Args:
|
||||
target_dir: Directory containing the model files
|
||||
file_name: Base name of the model file without extension
|
||||
file_monitor: Optional file monitor to ignore delete events
|
||||
|
||||
Returns:
|
||||
List of deleted file paths
|
||||
"""
|
||||
patterns = [
|
||||
f"{file_name}.safetensors", # Required
|
||||
f"{file_name}.metadata.json",
|
||||
]
|
||||
|
||||
# Add all preview file extensions
|
||||
for ext in PREVIEW_EXTENSIONS:
|
||||
patterns.append(f"{file_name}{ext}")
|
||||
|
||||
deleted = []
|
||||
main_file = patterns[0]
|
||||
main_path = os.path.join(target_dir, main_file).replace(os.sep, '/')
|
||||
|
||||
if os.path.exists(main_path):
|
||||
# Notify file monitor to ignore delete event if available
|
||||
if file_monitor:
|
||||
file_monitor.handler.add_ignore_path(main_path, 0)
|
||||
|
||||
# Delete file
|
||||
os.remove(main_path)
|
||||
deleted.append(main_path)
|
||||
else:
|
||||
logger.warning(f"Model file not found: {main_file}")
|
||||
|
||||
# Delete optional files
|
||||
for pattern in patterns[1:]:
|
||||
path = os.path.join(target_dir, pattern)
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
os.remove(path)
|
||||
deleted.append(pattern)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete {pattern}: {e}")
|
||||
|
||||
return deleted
|
||||
|
||||
@staticmethod
|
||||
def get_multipart_ext(filename):
|
||||
"""Get extension that may have multiple parts like .metadata.json"""
|
||||
parts = filename.split(".")
|
||||
if len(parts) > 2: # If contains multi-part extension
|
||||
return "." + ".".join(parts[-2:]) # Take the last two parts, like ".metadata.json"
|
||||
return os.path.splitext(filename)[1] # Otherwise take the regular extension, like ".safetensors"
|
||||
|
||||
# New common endpoint handlers
|
||||
|
||||
@staticmethod
|
||||
async def handle_delete_model(request: web.Request, scanner) -> web.Response:
|
||||
"""Handle model deletion request
|
||||
|
||||
Args:
|
||||
request: The aiohttp request
|
||||
scanner: The model scanner instance with cache management methods
|
||||
|
||||
Returns:
|
||||
web.Response: The HTTP response
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
if not file_path:
|
||||
return web.Response(text='Model path is required', status=400)
|
||||
|
||||
target_dir = os.path.dirname(file_path)
|
||||
file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
|
||||
# Get the file monitor from the scanner if available
|
||||
file_monitor = getattr(scanner, 'file_monitor', None)
|
||||
|
||||
deleted_files = await ModelRouteUtils.delete_model_files(
|
||||
target_dir,
|
||||
file_name,
|
||||
file_monitor
|
||||
)
|
||||
|
||||
# Remove from cache
|
||||
cache = await scanner.get_cached_data()
|
||||
cache.raw_data = [item for item in cache.raw_data if item['file_path'] != file_path]
|
||||
await cache.resort()
|
||||
|
||||
# Update hash index if available
|
||||
if hasattr(scanner, '_hash_index') and scanner._hash_index:
|
||||
scanner._hash_index.remove_by_path(file_path)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'deleted_files': deleted_files
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting model: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
@staticmethod
|
||||
async def handle_fetch_civitai(request: web.Request, scanner) -> web.Response:
|
||||
"""Handle CivitAI metadata fetch request
|
||||
|
||||
Args:
|
||||
request: The aiohttp request
|
||||
scanner: The model scanner instance with cache management methods
|
||||
|
||||
Returns:
|
||||
web.Response: The HTTP response
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
metadata_path = os.path.splitext(data['file_path'])[0] + '.metadata.json'
|
||||
|
||||
# Check if model metadata exists
|
||||
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
if not local_metadata or not local_metadata.get('sha256'):
|
||||
return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
|
||||
|
||||
# Create a client for fetching from Civitai
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
# Fetch and update metadata
|
||||
civitai_metadata = await client.get_model_by_hash(local_metadata["sha256"])
|
||||
if not civitai_metadata:
|
||||
await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
|
||||
return web.json_response({"success": False, "error": "Not found on CivitAI"}, status=404)
|
||||
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
|
||||
|
||||
return web.json_response({"success": True})
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def handle_replace_preview(request: web.Request, scanner) -> web.Response:
|
||||
"""Handle preview image replacement request
|
||||
|
||||
Args:
|
||||
request: The aiohttp request
|
||||
scanner: The model scanner instance with methods to update cache
|
||||
|
||||
Returns:
|
||||
web.Response: The HTTP response
|
||||
"""
|
||||
try:
|
||||
reader = await request.multipart()
|
||||
|
||||
# Read preview file data
|
||||
field = await reader.next()
|
||||
if field.name != 'preview_file':
|
||||
raise ValueError("Expected 'preview_file' field")
|
||||
content_type = field.headers.get('Content-Type', 'image/png')
|
||||
preview_data = await field.read()
|
||||
|
||||
# Read model path
|
||||
field = await reader.next()
|
||||
if field.name != 'model_path':
|
||||
raise ValueError("Expected 'model_path' field")
|
||||
model_path = (await field.read()).decode()
|
||||
|
||||
# Save preview file
|
||||
base_name = os.path.splitext(os.path.basename(model_path))[0]
|
||||
folder = os.path.dirname(model_path)
|
||||
|
||||
# Determine if content is video or image
|
||||
if content_type.startswith('video/'):
|
||||
# For videos, keep original format and use .mp4 extension
|
||||
extension = '.mp4'
|
||||
optimized_data = preview_data
|
||||
else:
|
||||
# For images, optimize and convert to WebP
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=preview_data,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
extension = '.webp' # Use .webp without .preview part
|
||||
|
||||
preview_path = os.path.join(folder, base_name + extension).replace(os.sep, '/')
|
||||
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
|
||||
# Update preview path in metadata
|
||||
metadata_path = os.path.splitext(model_path)[0] + '.metadata.json'
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Update preview_url directly in the metadata dict
|
||||
metadata['preview_url'] = preview_path
|
||||
|
||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating metadata: {e}")
|
||||
|
||||
# Update preview URL in scanner cache
|
||||
if hasattr(scanner, 'update_preview_in_cache'):
|
||||
await scanner.update_preview_in_cache(model_path, preview_path)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"preview_url": config.get_preview_static_url(preview_path)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error replacing preview: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
@staticmethod
|
||||
async def handle_download_model(request: web.Request, download_manager: DownloadManager, model_type="lora") -> web.Response:
|
||||
"""Handle model download request
|
||||
|
||||
Args:
|
||||
request: The aiohttp request
|
||||
download_manager: Instance of DownloadManager
|
||||
model_type: Type of model ('lora' or 'checkpoint')
|
||||
|
||||
Returns:
|
||||
web.Response: The HTTP response
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
|
||||
# Create progress callback
|
||||
async def progress_callback(progress):
|
||||
from ..services.websocket_manager import ws_manager
|
||||
await ws_manager.broadcast({
|
||||
'status': 'progress',
|
||||
'progress': progress
|
||||
})
|
||||
|
||||
# Check which identifier is provided
|
||||
download_url = data.get('download_url')
|
||||
model_hash = data.get('model_hash')
|
||||
model_version_id = data.get('model_version_id')
|
||||
|
||||
# Validate that at least one identifier is provided
|
||||
if not any([download_url, model_hash, model_version_id]):
|
||||
return web.Response(
|
||||
status=400,
|
||||
text="Missing required parameter: Please provide either 'download_url', 'hash', or 'modelVersionId'"
|
||||
)
|
||||
|
||||
# Use the correct root directory based on model type
|
||||
root_key = 'checkpoint_root' if model_type == 'checkpoint' else 'lora_root'
|
||||
save_dir = data.get(root_key)
|
||||
|
||||
result = await download_manager.download_from_civitai(
|
||||
download_url=download_url,
|
||||
model_hash=model_hash,
|
||||
model_version_id=model_version_id,
|
||||
save_dir=save_dir,
|
||||
relative_path=data.get('relative_path', ''),
|
||||
progress_callback=progress_callback,
|
||||
model_type=model_type
|
||||
)
|
||||
|
||||
if not result.get('success', False):
|
||||
error_message = result.get('error', 'Unknown error')
|
||||
|
||||
# Return 401 for early access errors
|
||||
if 'early access' in error_message.lower():
|
||||
logger.warning(f"Early access download failed: {error_message}")
|
||||
return web.Response(
|
||||
status=401, # Use 401 status code to match Civitai's response
|
||||
text=f"Early Access Restriction: {error_message}"
|
||||
)
|
||||
|
||||
return web.Response(status=500, text=error_message)
|
||||
|
||||
return web.json_response(result)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
|
||||
# Check if this might be an early access error
|
||||
if '401' in error_message:
|
||||
logger.warning(f"Early access error (401): {error_message}")
|
||||
return web.Response(
|
||||
status=401,
|
||||
text="Early Access Restriction: This model requires purchase. Please buy early access on Civitai.com."
|
||||
)
|
||||
|
||||
logger.error(f"Error downloading {model_type}: {error_message}")
|
||||
return web.Response(status=500, text=error_message)
|
||||
Reference in New Issue
Block a user