diff --git a/__init__.py b/__init__.py index 4adea9d1..008219c8 100644 --- a/__init__.py +++ b/__init__.py @@ -2,15 +2,17 @@ from .py.lora_manager import LoraManager from .py.nodes.lora_loader import LoraManagerLoader from .py.nodes.trigger_word_toggle import TriggerWordToggle from .py.nodes.lora_stacker import LoraStacker +from .py.nodes.save_image import SaveImage NODE_CLASS_MAPPINGS = { LoraManagerLoader.NAME: LoraManagerLoader, TriggerWordToggle.NAME: TriggerWordToggle, - LoraStacker.NAME: LoraStacker + LoraStacker.NAME: LoraStacker, + SaveImage.NAME: SaveImage } WEB_DIRECTORY = "./web/comfyui" # Register routes on import LoraManager.add_routes() -__all__ = ['NODE_CLASS_MAPPINGS', 'WEB_DIRECTORY'] \ No newline at end of file +__all__ = ['NODE_CLASS_MAPPINGS', 'WEB_DIRECTORY'] diff --git a/py/config.py b/py/config.py index 0de5bdb8..bae54b63 100644 --- a/py/config.py +++ b/py/config.py @@ -17,6 +17,7 @@ class Config: # 静态路由映射字典, target to route mapping self._route_mappings = {} self.loras_roots = self._init_lora_paths() + self.temp_directory = folder_paths.get_temp_directory() # 在初始化时扫描符号链接 self._scan_symbolic_links() @@ -87,9 +88,9 @@ class Config: def _init_lora_paths(self) -> List[str]: """Initialize and validate LoRA paths from ComfyUI settings""" - paths = list(set(path.replace(os.sep, "/") + paths = sorted(set(path.replace(os.sep, "/") for path in folder_paths.get_folder_paths("loras") - if os.path.exists(path))) + if os.path.exists(path)), key=lambda p: p.lower()) print("Found LoRA roots:", "\n - " + "\n - ".join(paths)) if not paths: diff --git a/py/lora_manager.py b/py/lora_manager.py index 51655b45..a7ab3fb8 100644 --- a/py/lora_manager.py +++ b/py/lora_manager.py @@ -4,9 +4,13 @@ from server import PromptServer # type: ignore from .config import config from .routes.lora_routes import LoraRoutes from .routes.api_routes import ApiRoutes +from .routes.recipe_routes import RecipeRoutes +from .routes.checkpoints_routes import CheckpointsRoutes from .services.lora_scanner import LoraScanner +from .services.recipe_scanner import RecipeScanner from .services.file_monitor import LoraFileMonitor from .services.lora_cache import LoraCache +from .services.recipe_cache import RecipeCache import logging logger = logging.getLogger(__name__) @@ -56,36 +60,42 @@ class LoraManager: # Setup feature routes routes = LoraRoutes() + checkpoints_routes = CheckpointsRoutes() # Setup file monitoring monitor = LoraFileMonitor(routes.scanner, config.loras_roots) monitor.start() routes.setup_routes(app) + checkpoints_routes.setup_routes(app) ApiRoutes.setup_routes(app, monitor) + RecipeRoutes.setup_routes(app) # Store monitor in app for cleanup app['lora_monitor'] = monitor # Schedule cache initialization using the application's startup handler - app.on_startup.append(lambda app: cls._schedule_cache_init(routes.scanner)) + app.on_startup.append(lambda app: cls._schedule_cache_init(routes.scanner, routes.recipe_scanner)) # Add cleanup app.on_shutdown.append(cls._cleanup) app.on_shutdown.append(ApiRoutes.cleanup) @classmethod - async def _schedule_cache_init(cls, scanner: LoraScanner): + async def _schedule_cache_init(cls, scanner: LoraScanner, recipe_scanner: RecipeScanner): """Schedule cache initialization in the running event loop""" try: # 创建低优先级的初始化任务 - asyncio.create_task(cls._initialize_cache(scanner), name='lora_cache_init') + lora_task = asyncio.create_task(cls._initialize_lora_cache(scanner), name='lora_cache_init') + + # Schedule recipe cache initialization with a delay to let lora scanner initialize first + recipe_task = asyncio.create_task(cls._initialize_recipe_cache(recipe_scanner, delay=2), name='recipe_cache_init') except Exception as e: - print(f"LoRA Manager: Error scheduling cache initialization: {e}") + logger.error(f"LoRA Manager: Error scheduling cache initialization: {e}") @classmethod - async def _initialize_cache(cls, scanner: LoraScanner): - """Initialize cache in background""" + async def _initialize_lora_cache(cls, scanner: LoraScanner): + """Initialize lora cache in background""" try: # 设置初始缓存占位 scanner._cache = LoraCache( @@ -98,10 +108,29 @@ class LoraManager: # 分阶段加载缓存 await scanner.get_cached_data(force_refresh=True) except Exception as e: - print(f"LoRA Manager: Error initializing cache: {e}") + logger.error(f"LoRA Manager: Error initializing lora cache: {e}") + + @classmethod + async def _initialize_recipe_cache(cls, scanner: RecipeScanner, delay: float = 2.0): + """Initialize recipe cache in background with a delay""" + try: + # Wait for the specified delay to let lora scanner initialize first + await asyncio.sleep(delay) + + # Set initial empty cache + scanner._cache = RecipeCache( + raw_data=[], + sorted_by_name=[], + sorted_by_date=[] + ) + + # Force refresh to load the actual data + await scanner.get_cached_data(force_refresh=True) + except Exception as e: + logger.error(f"LoRA Manager: Error initializing recipe cache: {e}") @classmethod async def _cleanup(cls, app): """Cleanup resources""" if 'lora_monitor' in app: - app['lora_monitor'].stop() \ No newline at end of file + app['lora_monitor'].stop() diff --git a/py/nodes/save_image.py b/py/nodes/save_image.py new file mode 100644 index 00000000..09fec5e4 --- /dev/null +++ b/py/nodes/save_image.py @@ -0,0 +1,41 @@ +import json +from server import PromptServer # type: ignore + +class SaveImage: + NAME = "Save Image (LoraManager)" + CATEGORY = "Lora Manager/utils" + DESCRIPTION = "Experimental node to display image preview and print prompt and extra_pnginfo" + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + }, + "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "process_image" + + def process_image(self, image, prompt=None, extra_pnginfo=None): + # Print the prompt information + print("SaveImage Node - Prompt:") + if prompt: + print(json.dumps(prompt, indent=2)) + else: + print("No prompt information available") + + # Print the extra_pnginfo + print("\nSaveImage Node - Extra PNG Info:") + if extra_pnginfo: + print(json.dumps(extra_pnginfo, indent=2)) + else: + print("No extra PNG info available") + + # Return the image unchanged + return (image,) diff --git a/py/routes/api_routes.py b/py/routes/api_routes.py index e080ffa4..3846eabb 100644 --- a/py/routes/api_routes.py +++ b/py/routes/api_routes.py @@ -16,6 +16,7 @@ from ..services.websocket_manager import ws_manager from ..services.settings_manager import settings import asyncio from .update_routes import UpdateRoutes +from ..services.recipe_scanner import RecipeScanner logger = logging.getLogger(__name__) @@ -39,6 +40,7 @@ class ApiRoutes: app.router.add_post('/api/fetch-all-civitai', routes.fetch_all_civitai) app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection) app.router.add_get('/api/lora-roots', routes.get_lora_roots) + app.router.add_get('/api/folders', routes.get_folders) app.router.add_get('/api/civitai/versions/{model_id}', routes.get_civitai_versions) app.router.add_post('/api/download-lora', routes.download_lora) app.router.add_post('/api/settings', routes.update_settings) @@ -47,7 +49,8 @@ class ApiRoutes: app.router.add_post('/loras/api/save-metadata', routes.save_metadata) app.router.add_get('/api/lora-preview-url', routes.get_lora_preview_url) # Add new route app.router.add_post('/api/move_models_bulk', routes.move_models_bulk) - app.router.add_get('/api/top-tags', routes.get_top_tags) # Add new route for top tags + app.router.add_get('/api/loras/top-tags', routes.get_top_tags) # Add new route for top tags + app.router.add_get('/api/loras/base-models', routes.get_base_models) # Add new route for base models # Add update check routes UpdateRoutes.setup_routes(app) @@ -128,7 +131,6 @@ class ApiRoutes: folder = request.query.get('folder') search = request.query.get('search', '').lower() fuzzy = request.query.get('fuzzy', 'false').lower() == 'true' - recursive = request.query.get('recursive', 'false').lower() == 'true' # Parse base models filter parameter base_models = request.query.get('base_models', '').split(',') @@ -138,6 +140,7 @@ class ApiRoutes: search_filename = request.query.get('search_filename', 'true').lower() == 'true' search_modelname = request.query.get('search_modelname', 'true').lower() == 'true' search_tags = request.query.get('search_tags', 'false').lower() == 'true' + recursive = request.query.get('recursive', 'false').lower() == 'true' # Validate parameters if page < 1 or page_size < 1 or page_size > 100: @@ -162,13 +165,13 @@ class ApiRoutes: folder=folder, search=search, fuzzy=fuzzy, - recursive=recursive, base_models=base_models, # Pass base models filter tags=tags, # Add tags parameter search_options={ 'filename': search_filename, 'modelname': search_modelname, - 'tags': search_tags + 'tags': search_tags, + 'recursive': recursive } ) @@ -518,6 +521,13 @@ class ApiRoutes: return web.json_response({ 'roots': config.loras_roots }) + + async def get_folders(self, request: web.Request) -> web.Response: + """Get all folders in the cache""" + cache = await self.scanner.get_cached_data() + return web.json_response({ + 'folders': cache.folders + }) async def get_civitai_versions(self, request: web.Request) -> web.Response: """Get available versions for a Civitai model with local availability info""" @@ -833,3 +843,27 @@ class ApiRoutes: 'success': False, 'error': 'Internal server error' }, status=500) + + async def get_base_models(self, request: web.Request) -> web.Response: + """Get base models used in loras""" + try: + # Parse query parameters + limit = int(request.query.get('limit', '20')) + + # Validate limit + if limit < 1 or limit > 100: + limit = 20 # Default to a reasonable limit + + # Get base models + base_models = await self.scanner.get_base_models(limit) + + return web.json_response({ + 'success': True, + 'base_models': base_models + }) + except Exception as e: + logger.error(f"Error retrieving base models: {e}", exc_info=True) + return web.json_response({ + 'success': False, + 'error': str(e) + }, status=500) \ No newline at end of file diff --git a/py/routes/checkpoints_routes.py b/py/routes/checkpoints_routes.py new file mode 100644 index 00000000..0a79d6f9 --- /dev/null +++ b/py/routes/checkpoints_routes.py @@ -0,0 +1,44 @@ +import os +from aiohttp import web +import jinja2 +import logging +from ..config import config +from ..services.settings_manager import settings + +logger = logging.getLogger(__name__) +logging.getLogger('asyncio').setLevel(logging.CRITICAL) + +class CheckpointsRoutes: + """Route handlers for Checkpoints management endpoints""" + + def __init__(self): + self.template_env = jinja2.Environment( + loader=jinja2.FileSystemLoader(config.templates_path), + autoescape=True + ) + + async def handle_checkpoints_page(self, request: web.Request) -> web.Response: + """Handle GET /checkpoints request""" + try: + template = self.template_env.get_template('checkpoints.html') + rendered = template.render( + is_initializing=False, + settings=settings, + request=request + ) + + return web.Response( + text=rendered, + content_type='text/html' + ) + + except Exception as e: + logger.error(f"Error handling checkpoints request: {e}", exc_info=True) + return web.Response( + text="Error loading checkpoints page", + status=500 + ) + + def setup_routes(self, app: web.Application): + """Register routes with the application""" + app.router.add_get('/checkpoints', self.handle_checkpoints_page) diff --git a/py/routes/lora_routes.py b/py/routes/lora_routes.py index 586eb09f..ae1a5d8f 100644 --- a/py/routes/lora_routes.py +++ b/py/routes/lora_routes.py @@ -4,6 +4,7 @@ import jinja2 from typing import Dict, List import logging from ..services.lora_scanner import LoraScanner +from ..services.recipe_scanner import RecipeScanner from ..config import config from ..services.settings_manager import settings # Add this import @@ -15,6 +16,7 @@ class LoraRoutes: def __init__(self): self.scanner = LoraScanner() + self.recipe_scanner = RecipeScanner(self.scanner) self.template_env = jinja2.Environment( loader=jinja2.FileSystemLoader(config.templates_path), autoescape=True @@ -69,7 +71,8 @@ class LoraRoutes: rendered = template.render( folders=[], # 空文件夹列表 is_initializing=True, # 新增标志 - settings=settings # Pass settings to template + settings=settings, # Pass settings to template + request=request # Pass the request object to the template ) else: # 正常流程 @@ -78,7 +81,8 @@ class LoraRoutes: rendered = template.render( folders=cache.folders, is_initializing=False, - settings=settings # Pass settings to template + settings=settings, # Pass settings to template + request=request # Pass the request object to the template ) return web.Response( @@ -93,6 +97,82 @@ class LoraRoutes: status=500 ) + async def handle_recipes_page(self, request: web.Request) -> web.Response: + """Handle GET /loras/recipes request""" + try: + # Check cache initialization status + is_initializing = ( + self.recipe_scanner._cache is None and + (self.recipe_scanner._initialization_task is not None and + not self.recipe_scanner._initialization_task.done()) + ) + + if is_initializing: + # If initializing, return a loading page + template = self.template_env.get_template('recipes.html') + rendered = template.render( + is_initializing=True, + settings=settings, + request=request # Pass the request object to the template + ) + else: + # Normal flow - get recipes with the same formatting as the API endpoint + cache = await self.recipe_scanner.get_cached_data() + recipes_data = cache.sorted_by_name[:20] # Show first 20 recipes by name + + # Format the response data with static URLs for file paths - same as in recipe_routes + for item in recipes_data: + # Always ensure file_url is set + if 'file_path' in item: + item['file_url'] = self._format_recipe_file_url(item['file_path']) + else: + item['file_url'] = '/loras_static/images/no-preview.png' + + # Ensure loras array exists + if 'loras' not in item: + item['loras'] = [] + + # Ensure base_model field exists + if 'base_model' not in item: + item['base_model'] = "" + + template = self.template_env.get_template('recipes.html') + rendered = template.render( + recipes=recipes_data, + is_initializing=False, + settings=settings, + request=request # Pass the request object to the template + ) + + return web.Response( + text=rendered, + content_type='text/html' + ) + + except Exception as e: + logger.error(f"Error handling recipes request: {e}", exc_info=True) + return web.Response( + text="Error loading recipes page", + status=500 + ) + + def _format_recipe_file_url(self, file_path: str) -> str: + """Format file path for recipe image as a URL - same as in recipe_routes""" + try: + # Return the file URL directly for the first lora root's preview + recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/') + if file_path.replace(os.sep, '/').startswith(recipes_dir): + relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/') + return f"/loras_static/root1/preview/{relative_path}" + + # If not in recipes dir, try to create a valid URL from the file path + file_name = os.path.basename(file_path) + return f"/loras_static/root1/preview/recipes/{file_name}" + except Exception as e: + logger.error(f"Error formatting recipe file URL: {e}", exc_info=True) + return '/loras_static/images/no-preview.png' # Return default image on error + def setup_routes(self, app: web.Application): """Register routes with the application""" app.router.add_get('/loras', self.handle_loras_page) + app.router.add_get('/loras/recipes', self.handle_recipes_page) diff --git a/py/routes/recipe_routes.py b/py/routes/recipe_routes.py new file mode 100644 index 00000000..9365cb96 --- /dev/null +++ b/py/routes/recipe_routes.py @@ -0,0 +1,910 @@ +import os +import logging +from aiohttp import web +from typing import Dict +import tempfile +import json +import asyncio +from ..utils.exif_utils import ExifUtils +from ..utils.recipe_parsers import RecipeParserFactory +from ..services.civitai_client import CivitaiClient + +from ..services.recipe_scanner import RecipeScanner +from ..services.lora_scanner import LoraScanner +from ..config import config +import time # Add this import at the top + +logger = logging.getLogger(__name__) + +class RecipeRoutes: + """API route handlers for Recipe management""" + + def __init__(self): + self.recipe_scanner = RecipeScanner(LoraScanner()) + self.civitai_client = CivitaiClient() + + # Pre-warm the cache + self._init_cache_task = None + + @classmethod + def setup_routes(cls, app: web.Application): + """Register API routes""" + routes = cls() + app.router.add_get('/api/recipes', routes.get_recipes) + app.router.add_get('/api/recipe/{recipe_id}', routes.get_recipe_detail) + app.router.add_post('/api/recipes/analyze-image', routes.analyze_recipe_image) + app.router.add_post('/api/recipes/save', routes.save_recipe) + app.router.add_delete('/api/recipe/{recipe_id}', routes.delete_recipe) + + # Add new filter-related endpoints + app.router.add_get('/api/recipes/top-tags', routes.get_top_tags) + app.router.add_get('/api/recipes/base-models', routes.get_base_models) + + # Add new sharing endpoints + app.router.add_get('/api/recipe/{recipe_id}/share', routes.share_recipe) + app.router.add_get('/api/recipe/{recipe_id}/share/download', routes.download_shared_recipe) + + # Start cache initialization + app.on_startup.append(routes._init_cache) + + app.router.add_post('/api/recipes/save-from-widget', routes.save_recipe_from_widget) + + async def _init_cache(self, app): + """Initialize cache on startup""" + try: + # First, ensure the lora scanner is fully initialized + lora_scanner = self.recipe_scanner._lora_scanner + + # Get lora cache to ensure it's initialized + lora_cache = await lora_scanner.get_cached_data() + + # Verify hash index is built + if hasattr(lora_scanner, '_hash_index'): + hash_index_size = len(lora_scanner._hash_index._hash_to_path) if hasattr(lora_scanner._hash_index, '_hash_to_path') else 0 + + # Now that lora scanner is initialized, initialize recipe cache + await self.recipe_scanner.get_cached_data(force_refresh=True) + except Exception as e: + logger.error(f"Error pre-warming recipe cache: {e}", exc_info=True) + + async def get_recipes(self, request: web.Request) -> web.Response: + """API endpoint for getting paginated recipes""" + try: + # Get query parameters with defaults + page = int(request.query.get('page', '1')) + page_size = int(request.query.get('page_size', '20')) + sort_by = request.query.get('sort_by', 'date') + search = request.query.get('search', None) + + # Get search options (renamed for better clarity) + search_title = request.query.get('search_title', 'true').lower() == 'true' + search_tags = request.query.get('search_tags', 'true').lower() == 'true' + search_lora_name = request.query.get('search_lora_name', 'true').lower() == 'true' + search_lora_model = request.query.get('search_lora_model', 'true').lower() == 'true' + + # Get filter parameters + base_models = request.query.get('base_models', None) + tags = request.query.get('tags', None) + + # Parse filter parameters + filters = {} + if base_models: + filters['base_model'] = base_models.split(',') + if tags: + filters['tags'] = tags.split(',') + + # Add search options to filters + search_options = { + 'title': search_title, + 'tags': search_tags, + 'lora_name': search_lora_name, + 'lora_model': search_lora_model + } + + # Get paginated data + result = await self.recipe_scanner.get_paginated_data( + page=page, + page_size=page_size, + sort_by=sort_by, + search=search, + filters=filters, + search_options=search_options + ) + + # Format the response data with static URLs for file paths + for item in result['items']: + # Always ensure file_url is set + if 'file_path' in item: + item['file_url'] = self._format_recipe_file_url(item['file_path']) + else: + item['file_url'] = '/loras_static/images/no-preview.png' + + # 确保 loras 数组存在 + if 'loras' not in item: + item['loras'] = [] + + # 确保有 base_model 字段 + if 'base_model' not in item: + item['base_model'] = "" + + return web.json_response(result) + except Exception as e: + logger.error(f"Error retrieving recipes: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) + + async def get_recipe_detail(self, request: web.Request) -> web.Response: + """Get detailed information about a specific recipe""" + try: + recipe_id = request.match_info['recipe_id'] + + # Get all recipes from cache + cache = await self.recipe_scanner.get_cached_data() + + # Find the specific recipe + recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None) + + if not recipe: + return web.json_response({"error": "Recipe not found"}, status=404) + + # Format recipe data + formatted_recipe = self._format_recipe_data(recipe) + + return web.json_response(formatted_recipe) + except Exception as e: + logger.error(f"Error retrieving recipe details: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) + + def _format_recipe_file_url(self, file_path: str) -> str: + """Format file path for recipe image as a URL""" + try: + # Return the file URL directly for the first lora root's preview + recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/') + if file_path.replace(os.sep, '/').startswith(recipes_dir): + relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/') + return f"/loras_static/root1/preview/{relative_path}" + + # If not in recipes dir, try to create a valid URL from the file path + file_name = os.path.basename(file_path) + return f"/loras_static/root1/preview/recipes/{file_name}" + except Exception as e: + logger.error(f"Error formatting recipe file URL: {e}", exc_info=True) + return '/loras_static/images/no-preview.png' # Return default image on error + + def _format_recipe_data(self, recipe: Dict) -> Dict: + """Format recipe data for API response""" + formatted = {**recipe} # Copy all fields + + # Format file paths to URLs + if 'file_path' in formatted: + formatted['file_url'] = self._format_recipe_file_url(formatted['file_path']) + + # Format dates for display + for date_field in ['created_date', 'modified']: + if date_field in formatted: + formatted[f"{date_field}_formatted"] = self._format_timestamp(formatted[date_field]) + + return formatted + + def _format_timestamp(self, timestamp: float) -> str: + """Format timestamp for display""" + from datetime import datetime + return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') + + async def analyze_recipe_image(self, request: web.Request) -> web.Response: + """Analyze an uploaded image or URL for recipe metadata""" + temp_path = None + try: + # Check if request contains multipart data (image) or JSON data (url) + content_type = request.headers.get('Content-Type', '') + + is_url_mode = False + + if 'multipart/form-data' in content_type: + # Handle image upload + reader = await request.multipart() + field = await reader.next() + + if field.name != 'image': + return web.json_response({ + "error": "No image field found", + "loras": [] + }, status=400) + + # Create a temporary file to store the uploaded image + with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file: + while True: + chunk = await field.read_chunk() + if not chunk: + break + temp_file.write(chunk) + temp_path = temp_file.name + + elif 'application/json' in content_type: + # Handle URL input + data = await request.json() + url = data.get('url') + is_url_mode = True + + if not url: + return web.json_response({ + "error": "No URL provided", + "loras": [] + }, status=400) + + # Download image from URL + from ..utils.utils import download_twitter_image + temp_path = download_twitter_image(url) + + if not temp_path: + return web.json_response({ + "error": "Failed to download image from URL", + "loras": [] + }, status=400) + + # Extract metadata from the image using ExifUtils + user_comment = ExifUtils.extract_user_comment(temp_path) + + # If no metadata found, return a more specific error + if not user_comment: + result = { + "error": "No metadata found in this image", + "loras": [] # Return empty loras array to prevent client-side errors + } + + # For URL mode, include the image data as base64 + if is_url_mode and temp_path: + import base64 + with open(temp_path, "rb") as image_file: + result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8') + + return web.json_response(result, status=200) + + # Use the parser factory to get the appropriate parser + parser = RecipeParserFactory.create_parser(user_comment) + + if parser is None: + result = { + "error": "No parser found for this image", + "loras": [] # Return empty loras array to prevent client-side errors + } + + # For URL mode, include the image data as base64 + if is_url_mode and temp_path: + import base64 + with open(temp_path, "rb") as image_file: + result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8') + + return web.json_response(result, status=200) + + # Parse the metadata + result = await parser.parse_metadata( + user_comment, + recipe_scanner=self.recipe_scanner, + civitai_client=self.civitai_client + ) + + # For URL mode, include the image data as base64 + if is_url_mode and temp_path: + import base64 + with open(temp_path, "rb") as image_file: + result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8') + + # Check for errors + if "error" in result and not result.get("loras"): + return web.json_response(result, status=200) + + return web.json_response(result) + + except Exception as e: + logger.error(f"Error analyzing recipe image: {e}", exc_info=True) + return web.json_response({ + "error": str(e), + "loras": [] # Return empty loras array to prevent client-side errors + }, status=500) + finally: + # Clean up the temporary file in the finally block + if temp_path and os.path.exists(temp_path): + try: + os.unlink(temp_path) + except Exception as e: + logger.error(f"Error deleting temporary file: {e}") + + + async def save_recipe(self, request: web.Request) -> web.Response: + """Save a recipe to the recipes folder""" + try: + reader = await request.multipart() + + # Process form data + image = None + image_base64 = None + image_url = None + name = None + tags = [] + metadata = None + + while True: + field = await reader.next() + if field is None: + break + + if field.name == 'image': + # Read image data + image_data = b'' + while True: + chunk = await field.read_chunk() + if not chunk: + break + image_data += chunk + image = image_data + + elif field.name == 'image_base64': + # Get base64 image data + image_base64 = await field.text() + + elif field.name == 'image_url': + # Get image URL + image_url = await field.text() + + elif field.name == 'name': + name = await field.text() + + elif field.name == 'tags': + tags_text = await field.text() + try: + tags = json.loads(tags_text) + except: + tags = [] + + elif field.name == 'metadata': + metadata_text = await field.text() + try: + metadata = json.loads(metadata_text) + except: + metadata = {} + + missing_fields = [] + if not name: + missing_fields.append("name") + if not metadata: + missing_fields.append("metadata") + if missing_fields: + return web.json_response({"error": f"Missing required fields: {', '.join(missing_fields)}"}, status=400) + + # Handle different image sources + if not image: + if image_base64: + # Convert base64 to binary + import base64 + try: + # Remove potential data URL prefix + if ',' in image_base64: + image_base64 = image_base64.split(',', 1)[1] + image = base64.b64decode(image_base64) + except Exception as e: + return web.json_response({"error": f"Invalid base64 image data: {str(e)}"}, status=400) + elif image_url: + # Download image from URL + from ..utils.utils import download_twitter_image + temp_path = download_twitter_image(image_url) + if not temp_path: + return web.json_response({"error": "Failed to download image from URL"}, status=400) + + # Read the downloaded image + with open(temp_path, 'rb') as f: + image = f.read() + + # Clean up temp file + try: + os.unlink(temp_path) + except: + pass + else: + return web.json_response({"error": "No image data provided"}, status=400) + + # Create recipes directory if it doesn't exist + recipes_dir = self.recipe_scanner.recipes_dir + os.makedirs(recipes_dir, exist_ok=True) + + # Generate UUID for the recipe + import uuid + recipe_id = str(uuid.uuid4()) + + # Optimize the image (resize and convert to WebP) + optimized_image, extension = ExifUtils.optimize_image( + image_data=image, + target_width=480, + format='webp', + quality=85, + preserve_metadata=True + ) + + # Save the optimized image + image_filename = f"{recipe_id}{extension}" + image_path = os.path.join(recipes_dir, image_filename) + with open(image_path, 'wb') as f: + f.write(optimized_image) + + # Create the recipe JSON + current_time = time.time() + + # Format loras data according to the recipe.json format + loras_data = [] + for lora in metadata.get("loras", []): + # Skip deleted LoRAs if they're marked to be excluded + if lora.get("isDeleted", False) and lora.get("exclude", False): + continue + + # Convert frontend lora format to recipe format + lora_entry = { + "file_name": lora.get("file_name", "") or os.path.splitext(os.path.basename(lora.get("localPath", "")))[0], + "hash": lora.get("hash", "").lower() if lora.get("hash") else "", + "strength": float(lora.get("weight", 1.0)), + "modelVersionId": lora.get("id", ""), + "modelName": lora.get("name", ""), + "modelVersionName": lora.get("version", ""), + "isDeleted": lora.get("isDeleted", False) # Preserve deletion status in saved recipe + } + loras_data.append(lora_entry) + + # Format gen_params according to the recipe.json format + gen_params = metadata.get("gen_params", {}) + if not gen_params and "raw_metadata" in metadata: + # Extract from raw metadata if available + raw_metadata = metadata.get("raw_metadata", {}) + gen_params = { + "prompt": raw_metadata.get("prompt", ""), + "negative_prompt": raw_metadata.get("negative_prompt", ""), + "checkpoint": raw_metadata.get("checkpoint", {}), + "steps": raw_metadata.get("steps", ""), + "sampler": raw_metadata.get("sampler", ""), + "cfg_scale": raw_metadata.get("cfg_scale", ""), + "seed": raw_metadata.get("seed", ""), + "size": raw_metadata.get("size", ""), + "clip_skip": raw_metadata.get("clip_skip", "") + } + + # Create the recipe data structure + recipe_data = { + "id": recipe_id, + "file_path": image_path, + "title": name, + "modified": current_time, + "created_date": current_time, + "base_model": metadata.get("base_model", ""), + "loras": loras_data, + "gen_params": gen_params + } + + # Add tags if provided + if tags: + recipe_data["tags"] = tags + + # Save the recipe JSON + json_filename = f"{recipe_id}.recipe.json" + json_path = os.path.join(recipes_dir, json_filename) + with open(json_path, 'w', encoding='utf-8') as f: + json.dump(recipe_data, f, indent=4, ensure_ascii=False) + + # Add recipe metadata to the image + ExifUtils.append_recipe_metadata(image_path, recipe_data) + + # Simplified cache update approach + # Instead of trying to update the cache directly, just set it to None + # to force a refresh on the next get_cached_data call + if self.recipe_scanner._cache is not None: + # Add the recipe to the raw data if the cache exists + # This is a simple direct update without locks or timeouts + self.recipe_scanner._cache.raw_data.append(recipe_data) + # Schedule a background task to resort the cache + asyncio.create_task(self.recipe_scanner._cache.resort()) + logger.info(f"Added recipe {recipe_id} to cache") + + return web.json_response({ + 'success': True, + 'recipe_id': recipe_id, + 'image_path': image_path, + 'json_path': json_path + }) + + except Exception as e: + logger.error(f"Error saving recipe: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) + + async def delete_recipe(self, request: web.Request) -> web.Response: + """Delete a recipe by ID""" + try: + recipe_id = request.match_info['recipe_id'] + + # Get recipes directory + recipes_dir = self.recipe_scanner.recipes_dir + if not recipes_dir or not os.path.exists(recipes_dir): + return web.json_response({"error": "Recipes directory not found"}, status=404) + + # Find recipe JSON file + recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json") + if not os.path.exists(recipe_json_path): + return web.json_response({"error": "Recipe not found"}, status=404) + + # Load recipe data to get image path + with open(recipe_json_path, 'r', encoding='utf-8') as f: + recipe_data = json.load(f) + + # Get image path + image_path = recipe_data.get('file_path') + + # Delete recipe JSON file + os.remove(recipe_json_path) + logger.info(f"Deleted recipe JSON file: {recipe_json_path}") + + # Delete recipe image if it exists + if image_path and os.path.exists(image_path): + os.remove(image_path) + logger.info(f"Deleted recipe image: {image_path}") + + # Simplified cache update approach + if self.recipe_scanner._cache is not None: + # Remove the recipe from raw_data if it exists + self.recipe_scanner._cache.raw_data = [ + r for r in self.recipe_scanner._cache.raw_data + if str(r.get('id', '')) != recipe_id + ] + # Schedule a background task to resort the cache + asyncio.create_task(self.recipe_scanner._cache.resort()) + logger.info(f"Removed recipe {recipe_id} from cache") + + return web.json_response({"success": True, "message": "Recipe deleted successfully"}) + except Exception as e: + logger.error(f"Error deleting recipe: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) + + async def get_top_tags(self, request: web.Request) -> web.Response: + """Get top tags used in recipes""" + try: + # Get limit parameter with default + limit = int(request.query.get('limit', '20')) + + # Get all recipes from cache + cache = await self.recipe_scanner.get_cached_data() + + # Count tag occurrences + tag_counts = {} + for recipe in cache.raw_data: + if 'tags' in recipe and recipe['tags']: + for tag in recipe['tags']: + tag_counts[tag] = tag_counts.get(tag, 0) + 1 + + # Sort tags by count and limit results + sorted_tags = [{'tag': tag, 'count': count} for tag, count in tag_counts.items()] + sorted_tags.sort(key=lambda x: x['count'], reverse=True) + top_tags = sorted_tags[:limit] + + return web.json_response({ + 'success': True, + 'tags': top_tags + }) + except Exception as e: + logger.error(f"Error retrieving top tags: {e}", exc_info=True) + return web.json_response({ + 'success': False, + 'error': str(e) + }, status=500) + + async def get_base_models(self, request: web.Request) -> web.Response: + """Get base models used in recipes""" + try: + # Get all recipes from cache + cache = await self.recipe_scanner.get_cached_data() + + # Count base model occurrences + base_model_counts = {} + for recipe in cache.raw_data: + if 'base_model' in recipe and recipe['base_model']: + base_model = recipe['base_model'] + base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1 + + # Sort base models by count + sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()] + sorted_models.sort(key=lambda x: x['count'], reverse=True) + + return web.json_response({ + 'success': True, + 'base_models': sorted_models + }) + except Exception as e: + logger.error(f"Error retrieving base models: {e}", exc_info=True) + return web.json_response({ + 'success': False, + 'error': str(e) + }, status=500) + + async def share_recipe(self, request: web.Request) -> web.Response: + """Process a recipe image for sharing by adding metadata to EXIF""" + try: + recipe_id = request.match_info['recipe_id'] + + # Get all recipes from cache + cache = await self.recipe_scanner.get_cached_data() + + # Find the specific recipe + recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None) + + if not recipe: + return web.json_response({"error": "Recipe not found"}, status=404) + + # Get the image path + image_path = recipe.get('file_path') + if not image_path or not os.path.exists(image_path): + return web.json_response({"error": "Recipe image not found"}, status=404) + + # Create a temporary copy of the image to modify + import tempfile + import shutil + + # Create temp file with same extension + ext = os.path.splitext(image_path)[1] + with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as temp_file: + temp_path = temp_file.name + + # Copy the original image to temp file + shutil.copy2(image_path, temp_path) + processed_path = temp_path + + # Create a URL for the processed image + # Use a timestamp to prevent caching + timestamp = int(time.time()) + url_path = f"/api/recipe/{recipe_id}/share/download?t={timestamp}" + + # Store the temp path in a dictionary to serve later + if not hasattr(self, '_shared_recipes'): + self._shared_recipes = {} + + self._shared_recipes[recipe_id] = { + 'path': processed_path, + 'timestamp': timestamp, + 'expires': time.time() + 300 # Expire after 5 minutes + } + + # Clean up old entries + self._cleanup_shared_recipes() + + return web.json_response({ + 'success': True, + 'download_url': url_path, + 'filename': f"recipe_{recipe.get('title', '').replace(' ', '_').lower()}{ext}" + }) + except Exception as e: + logger.error(f"Error sharing recipe: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) + + async def download_shared_recipe(self, request: web.Request) -> web.Response: + """Serve a processed recipe image for download""" + try: + recipe_id = request.match_info['recipe_id'] + + # Check if we have this shared recipe + if not hasattr(self, '_shared_recipes') or recipe_id not in self._shared_recipes: + return web.json_response({"error": "Shared recipe not found or expired"}, status=404) + + shared_info = self._shared_recipes[recipe_id] + file_path = shared_info['path'] + + if not os.path.exists(file_path): + return web.json_response({"error": "Shared recipe file not found"}, status=404) + + # Get recipe to determine filename + cache = await self.recipe_scanner.get_cached_data() + recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None) + + # Set filename for download + filename = f"recipe_{recipe.get('title', '').replace(' ', '_').lower() if recipe else recipe_id}" + ext = os.path.splitext(file_path)[1] + download_filename = f"{filename}{ext}" + + # Serve the file + return web.FileResponse( + file_path, + headers={ + 'Content-Disposition': f'attachment; filename="{download_filename}"' + } + ) + except Exception as e: + logger.error(f"Error downloading shared recipe: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) + + def _cleanup_shared_recipes(self): + """Clean up expired shared recipes""" + if not hasattr(self, '_shared_recipes'): + return + + current_time = time.time() + expired_ids = [rid for rid, info in self._shared_recipes.items() + if current_time > info.get('expires', 0)] + + for rid in expired_ids: + try: + # Delete the temporary file + file_path = self._shared_recipes[rid]['path'] + if os.path.exists(file_path): + os.unlink(file_path) + + # Remove from dictionary + del self._shared_recipes[rid] + except Exception as e: + logger.error(f"Error cleaning up shared recipe {rid}: {e}") + + async def save_recipe_from_widget(self, request: web.Request) -> web.Response: + """Save a recipe from the LoRAs widget""" + try: + reader = await request.multipart() + + # Process form data + workflow_json = None + + while True: + field = await reader.next() + if field is None: + break + + if field.name == 'workflow_json': + workflow_text = await field.text() + try: + workflow_json = json.loads(workflow_text) + except: + return web.json_response({"error": "Invalid workflow JSON"}, status=400) + + if not workflow_json: + return web.json_response({"error": "Missing required workflow_json field"}, status=400) + + # Find the latest image in the temp directory + temp_dir = config.temp_directory + image_files = [] + + for file in os.listdir(temp_dir): + if file.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')): + file_path = os.path.join(temp_dir, file) + image_files.append((file_path, os.path.getmtime(file_path))) + + if not image_files: + return web.json_response({"error": "No recent images found to use for recipe"}, status=400) + + # Sort by modification time (newest first) + image_files.sort(key=lambda x: x[1], reverse=True) + latest_image_path = image_files[0][0] + + # Parse the workflow to extract generation parameters and loras + from ..workflow_params.workflow_parser import parse_workflow + # load_extensions=False to avoid loading extensions for now + parsed_workflow = parse_workflow(workflow_json, load_extensions=False) + + logger.info(f"Parsed workflow: {parsed_workflow}") + + if not parsed_workflow or not parsed_workflow.get("gen_params"): + return web.json_response({"error": "Could not extract generation parameters from workflow"}, status=400) + + # Get the lora stack from the parsed workflow + lora_stack = parsed_workflow.get("loras", "") + + # Parse the lora stack format: " ..." + import re + lora_matches = re.findall(r']+)>', lora_stack) + + # Check if any loras were found + if not lora_matches: + return web.json_response({"error": "No LoRAs found in the workflow"}, status=400) + + # Generate recipe name from the first 3 loras (or less if fewer are available) + loras_for_name = lora_matches[:3] # Take at most 3 loras for the name + + recipe_name_parts = [] + for lora_name, lora_strength in loras_for_name: + # Get the basename without path or extension + basename = os.path.basename(lora_name) + basename = os.path.splitext(basename)[0] + recipe_name_parts.append(f"{basename}:{lora_strength}") + + recipe_name = " ".join(recipe_name_parts) + + # Read the image + with open(latest_image_path, 'rb') as f: + image = f.read() + + # Create recipes directory if it doesn't exist + recipes_dir = self.recipe_scanner.recipes_dir + os.makedirs(recipes_dir, exist_ok=True) + + # Generate UUID for the recipe + import uuid + recipe_id = str(uuid.uuid4()) + + # Optimize the image (resize and convert to WebP) + optimized_image, extension = ExifUtils.optimize_image( + image_data=image, + target_width=480, + format='webp', + quality=85, + preserve_metadata=True + ) + + # Save the optimized image + image_filename = f"{recipe_id}{extension}" + image_path = os.path.join(recipes_dir, image_filename) + with open(image_path, 'wb') as f: + f.write(optimized_image) + + # Format loras data from the lora stack + loras_data = [] + + for lora_name, lora_strength in lora_matches: + try: + # Get lora info from scanner + lora_info = await self.recipe_scanner._lora_scanner.get_lora_info_by_name(lora_name) + + # Create lora entry + lora_entry = { + "file_name": lora_name, + "hash": lora_info.get("sha256", "").lower() if lora_info else "", + "strength": float(lora_strength), + "modelVersionId": lora_info.get("civitai", {}).get("id", "") if lora_info else "", + "modelName": lora_info.get("civitai", {}).get("model", {}).get("name", "") if lora_info else lora_name, + "modelVersionName": lora_info.get("civitai", {}).get("name", "") if lora_info else "", + "isDeleted": False + } + loras_data.append(lora_entry) + except Exception as e: + logger.warning(f"Error processing LoRA {lora_name}: {e}") + + # Get base model from lora scanner for the available loras + base_model_counts = {} + for lora in loras_data: + lora_info = await self.recipe_scanner._lora_scanner.get_lora_info_by_name(lora.get("file_name", "")) + if lora_info and "base_model" in lora_info: + base_model = lora_info["base_model"] + base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1 + + # Get most common base model + most_common_base_model = "" + if base_model_counts: + most_common_base_model = max(base_model_counts.items(), key=lambda x: x[1])[0] + + # Create the recipe data structure + recipe_data = { + "id": recipe_id, + "file_path": image_path, + "title": recipe_name, # Use generated recipe name + "modified": time.time(), + "created_date": time.time(), + "base_model": most_common_base_model, + "loras": loras_data, + "gen_params": parsed_workflow.get("gen_params", {}), # Use the parsed workflow parameters + "loras_stack": lora_stack # Include the original lora stack string + } + + # Save the recipe JSON + json_filename = f"{recipe_id}.recipe.json" + json_path = os.path.join(recipes_dir, json_filename) + with open(json_path, 'w', encoding='utf-8') as f: + json.dump(recipe_data, f, indent=4, ensure_ascii=False) + + # Add recipe metadata to the image + ExifUtils.append_recipe_metadata(image_path, recipe_data) + + # Update cache + if self.recipe_scanner._cache is not None: + # Add the recipe to the raw data if the cache exists + self.recipe_scanner._cache.raw_data.append(recipe_data) + # Schedule a background task to resort the cache + asyncio.create_task(self.recipe_scanner._cache.resort()) + logger.info(f"Added recipe {recipe_id} to cache") + + return web.json_response({ + 'success': True, + 'recipe_id': recipe_id, + 'image_path': image_path, + 'json_path': json_path, + 'recipe_name': recipe_name # Include the generated recipe name in the response + }) + + except Exception as e: + logger.error(f"Error saving recipe from widget: {e}", exc_info=True) + return web.json_response({"error": str(e)}, status=500) diff --git a/py/routes/update_routes.py b/py/routes/update_routes.py index 41037aae..5b488d55 100644 --- a/py/routes/update_routes.py +++ b/py/routes/update_routes.py @@ -24,11 +24,9 @@ class UpdateRoutes: try: # Read local version from pyproject.toml local_version = UpdateRoutes._get_local_version() - logger.info(f"Local version: {local_version}") - + # Fetch remote version from GitHub remote_version, changelog = await UpdateRoutes._get_remote_version() - logger.info(f"Remote version: {remote_version}") # Compare versions update_available = UpdateRoutes._compare_versions( @@ -36,8 +34,6 @@ class UpdateRoutes: remote_version.replace('v', '') ) - logger.info(f"Update available: {update_available}") - return web.json_response({ 'success': True, 'current_version': local_version, diff --git a/py/services/civitai_client.py b/py/services/civitai_client.py index a39be20f..91387ebe 100644 --- a/py/services/civitai_client.py +++ b/py/services/civitai_client.py @@ -214,4 +214,30 @@ class CivitaiClient: """Close the session if it exists""" if self._session is not None: await self._session.close() - self._session = None \ No newline at end of file + self._session = None + + async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]: + """Get hash from Civitai API""" + try: + if not self._session: + return None + + logger.info(f"Fetching model version info from Civitai for ID: {model_version_id}") + version_info = await self._session.get(f"{self.base_url}/model-versions/{model_version_id}") + + if not version_info or not version_info.json().get('files'): + logger.warning(f"No files found in version info for ID: {model_version_id}") + return None + + # Get hash from the first file + for file_info in version_info.json().get('files', []): + if file_info.get('hashes', {}).get('SHA256'): + # Convert hash to lowercase to standardize + hash_value = file_info['hashes']['SHA256'].lower() + return hash_value + + logger.warning(f"No SHA256 hash found in version info for ID: {model_version_id}") + return None + except Exception as e: + logger.error(f"Error getting hash from Civitai: {e}") + return None \ No newline at end of file diff --git a/py/services/download_manager.py b/py/services/download_manager.py index 2571ffb2..66330a68 100644 --- a/py/services/download_manager.py +++ b/py/services/download_manager.py @@ -42,11 +42,18 @@ class DownloadManager: save_path = os.path.join(save_dir, file_name) file_size = file_info.get('sizeKB', 0) * 1024 - # 4. 通知文件监控系统 - self.file_monitor.handler.add_ignore_path( - save_path.replace(os.sep, '/'), - file_size - ) + # 4. 通知文件监控系统 - 使用规范化路径和文件大小 + if self.file_monitor and self.file_monitor.handler: + # Add both the normalized path and potential alternative paths + normalized_path = save_path.replace(os.sep, '/') + self.file_monitor.handler.add_ignore_path(normalized_path, file_size) + + # Also add the path with file extension variations (.safetensors) + if not normalized_path.endswith('.safetensors'): + safetensors_path = os.path.splitext(normalized_path)[0] + '.safetensors' + self.file_monitor.handler.add_ignore_path(safetensors_path, file_size) + + logger.debug(f"Added download path to ignore list: {normalized_path} (size: {file_size} bytes)") # 5. 准备元数据 metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path) @@ -135,6 +142,9 @@ class DownloadManager: all_folders = set(cache.folders) all_folders.add(relative_path) cache.folders = sorted(list(all_folders), key=lambda x: x.lower()) + + # Update the hash index with the new LoRA entry + self.file_monitor.scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path']) # Update the hash index with the new LoRA entry self.file_monitor.scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path']) diff --git a/py/services/file_monitor.py b/py/services/file_monitor.py index 33b53448..849ba2a0 100644 --- a/py/services/file_monitor.py +++ b/py/services/file_monitor.py @@ -20,29 +20,76 @@ class LoraFileHandler(FileSystemEventHandler): self.pending_changes = set() # 待处理的变更 self.lock = Lock() # 线程安全锁 self.update_task = None # 异步更新任务 - self._ignore_paths = set() # Add ignore paths set + self._ignore_paths = {} # Change to dictionary to store expiration times self._min_ignore_timeout = 5 # minimum timeout in seconds self._download_speed = 1024 * 1024 # assume 1MB/s as base speed def _should_ignore(self, path: str) -> bool: """Check if path should be ignored""" real_path = os.path.realpath(path) # Resolve any symbolic links - return real_path.replace(os.sep, '/') in self._ignore_paths + normalized_path = real_path.replace(os.sep, '/') + + # Also check with backslashes for Windows compatibility + alt_path = real_path.replace('/', '\\') + + current_time = asyncio.get_event_loop().time() + + # Check if path is in ignore list and not expired + if normalized_path in self._ignore_paths and self._ignore_paths[normalized_path] > current_time: + return True + + # Also check alternative path format + if alt_path in self._ignore_paths and self._ignore_paths[alt_path] > current_time: + return True + + return False def add_ignore_path(self, path: str, file_size: int = 0): """Add path to ignore list with dynamic timeout based on file size""" real_path = os.path.realpath(path) # Resolve any symbolic links - self._ignore_paths.add(real_path.replace(os.sep, '/')) + normalized_path = real_path.replace(os.sep, '/') - # Short timeout (e.g. 5 seconds) is sufficient to ignore the CREATE event - timeout = 5 + # Calculate timeout based on file size + # For small files, use minimum timeout + # For larger files, estimate download time + buffer + if file_size > 0: + # Estimate download time in seconds (size / speed) + buffer + estimated_time = (file_size / self._download_speed) + 10 + timeout = max(self._min_ignore_timeout, estimated_time) + else: + timeout = self._min_ignore_timeout + # Store expiration time instead of just the path + current_time = asyncio.get_event_loop().time() + expiration_time = current_time + timeout + + # Store both normalized and alternative path formats + self._ignore_paths[normalized_path] = expiration_time + + # Also store with backslashes for Windows compatibility + alt_path = real_path.replace('/', '\\') + self._ignore_paths[alt_path] = expiration_time + + logger.debug(f"Added ignore path: {normalized_path} (expires in {timeout:.1f}s)") + + # Schedule cleanup after timeout asyncio.get_event_loop().call_later( timeout, - self._ignore_paths.discard, - real_path.replace(os.sep, '/') + self._remove_ignore_path, + normalized_path ) + + def _remove_ignore_path(self, path: str): + """Remove path from ignore list after timeout""" + if path in self._ignore_paths: + del self._ignore_paths[path] + logger.debug(f"Removed ignore path: {path}") + # Also remove alternative path format + alt_path = path.replace('/', '\\') + if alt_path in self._ignore_paths: + del self._ignore_paths[alt_path] + def on_created(self, event): if event.is_directory or not event.src_path.endswith('.safetensors'): return diff --git a/py/services/lora_hash_index.py b/py/services/lora_hash_index.py index f5b6c4e7..8285d293 100644 --- a/py/services/lora_hash_index.py +++ b/py/services/lora_hash_index.py @@ -15,11 +15,13 @@ class LoraHashIndex: """Add or update a hash -> path mapping""" if not sha256 or not file_path: return - self._hash_to_path[sha256] = file_path + # Always store lowercase hashes for consistency + self._hash_to_path[sha256.lower()] = file_path def remove_entry(self, sha256: str) -> None: """Remove a hash entry""" - self._hash_to_path.pop(sha256, None) + if sha256: + self._hash_to_path.pop(sha256.lower(), None) def remove_by_path(self, file_path: str) -> None: """Remove entry by file path""" @@ -30,7 +32,9 @@ class LoraHashIndex: def get_path(self, sha256: str) -> Optional[str]: """Get file path for a given hash""" - return self._hash_to_path.get(sha256) + if not sha256: + return None + return self._hash_to_path.get(sha256.lower()) def get_hash(self, file_path: str) -> Optional[str]: """Get hash for a given file path""" @@ -41,7 +45,9 @@ class LoraHashIndex: def has_hash(self, sha256: str) -> bool: """Check if hash exists in index""" - return sha256 in self._hash_to_path + if not sha256: + return False + return sha256.lower() in self._hash_to_path def clear(self) -> None: """Clear all entries""" diff --git a/py/services/lora_scanner.py b/py/services/lora_scanner.py index ca44cb19..bf58029c 100644 --- a/py/services/lora_scanner.py +++ b/py/services/lora_scanner.py @@ -9,10 +9,11 @@ from operator import itemgetter from ..config import config from ..utils.file_utils import load_metadata, get_file_info from .lora_cache import LoraCache -from difflib import SequenceMatcher from .lora_hash_index import LoraHashIndex from .settings_manager import settings from ..utils.constants import NSFW_LEVELS +from ..utils.utils import fuzzy_match +import sys logger = logging.getLogger(__name__) @@ -131,45 +132,9 @@ class LoraScanner: folders=[] ) - def fuzzy_match(self, text: str, pattern: str, threshold: float = 0.7) -> bool: - """ - Check if text matches pattern using fuzzy matching. - Returns True if similarity ratio is above threshold. - """ - if not pattern or not text: - return False - - # Convert both to lowercase for case-insensitive matching - text = text.lower() - pattern = pattern.lower() - - # Split pattern into words - search_words = pattern.split() - - # Check each word - for word in search_words: - # First check if word is a substring (faster) - if word in text: - continue - - # If not found as substring, try fuzzy matching - # Check if any part of the text matches this word - found_match = False - for text_part in text.split(): - ratio = SequenceMatcher(None, text_part, word).ratio() - if ratio >= threshold: - found_match = True - break - - if not found_match: - return False - - # All words found either as substrings or fuzzy matches - return True - async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name', folder: str = None, search: str = None, fuzzy: bool = False, - recursive: bool = False, base_models: list = None, tags: list = None, + base_models: list = None, tags: list = None, search_options: dict = None) -> Dict: """Get paginated and filtered lora data @@ -180,10 +145,9 @@ class LoraScanner: folder: Filter by folder path search: Search term fuzzy: Use fuzzy matching for search - recursive: Include subfolders when folder filter is applied base_models: List of base models to filter by tags: List of tags to filter by - search_options: Dictionary with search options (filename, modelname, tags) + search_options: Dictionary with search options (filename, modelname, tags, recursive) """ cache = await self.get_cached_data() @@ -192,7 +156,8 @@ class LoraScanner: search_options = { 'filename': True, 'modelname': True, - 'tags': False + 'tags': False, + 'recursive': False } # Get the base data set @@ -207,7 +172,7 @@ class LoraScanner: # Apply folder filtering if folder is not None: - if recursive: + if search_options.get('recursive', False): # Recursive mode: match all paths starting with this folder filtered_data = [ item for item in filtered_data @@ -236,16 +201,47 @@ class LoraScanner: # Apply search filtering if search: - if fuzzy: - filtered_data = [ - item for item in filtered_data - if self._fuzzy_search_match(item, search, search_options) - ] - else: - filtered_data = [ - item for item in filtered_data - if self._exact_search_match(item, search, search_options) - ] + search_results = [] + for item in filtered_data: + # Check filename if enabled + if search_options.get('filename', True): + if fuzzy: + if fuzzy_match(item.get('file_name', ''), search): + search_results.append(item) + continue + else: + if search.lower() in item.get('file_name', '').lower(): + search_results.append(item) + continue + + # Check model name if enabled + if search_options.get('modelname', True): + if fuzzy: + if fuzzy_match(item.get('model_name', ''), search): + search_results.append(item) + continue + else: + if search.lower() in item.get('model_name', '').lower(): + search_results.append(item) + continue + + # Check tags if enabled + if search_options.get('tags', False) and item.get('tags'): + found_tag = False + for tag in item['tags']: + if fuzzy: + if fuzzy_match(tag, search): + found_tag = True + break + else: + if search.lower() in tag.lower(): + found_tag = True + break + if found_tag: + search_results.append(item) + continue + + filtered_data = search_results # Calculate pagination total_items = len(filtered_data) @@ -262,44 +258,6 @@ class LoraScanner: return result - def _fuzzy_search_match(self, item: Dict, search: str, search_options: Dict) -> bool: - """Check if an item matches the search term using fuzzy matching with search options""" - # Check filename if enabled - if search_options.get('filename', True) and self.fuzzy_match(item.get('file_name', ''), search): - return True - - # Check model name if enabled - if search_options.get('modelname', True) and self.fuzzy_match(item.get('model_name', ''), search): - return True - - # Check tags if enabled - if search_options.get('tags', False) and item.get('tags'): - for tag in item['tags']: - if self.fuzzy_match(tag, search): - return True - - return False - - def _exact_search_match(self, item: Dict, search: str, search_options: Dict) -> bool: - """Check if an item matches the search term using exact matching with search options""" - search = search.lower() - - # Check filename if enabled - if search_options.get('filename', True) and search in item.get('file_name', '').lower(): - return True - - # Check model name if enabled - if search_options.get('modelname', True) and search in item.get('model_name', '').lower(): - return True - - # Check tags if enabled - if search_options.get('tags', False) and item.get('tags'): - for tag in item['tags']: - if search in tag.lower(): - return True - - return False - def invalidate_cache(self): """Invalidate the current cache""" self._cache = None @@ -604,7 +562,7 @@ class LoraScanner: # Update hash index with new path if 'sha256' in metadata: - self._hash_index.add_entry(metadata['sha256'], new_path) + self._hash_index.add_entry(metadata['sha256'].lower(), new_path) # Update folders list all_folders = set(item['folder'] for item in cache.raw_data) @@ -659,6 +617,26 @@ class LoraScanner: """Get hash for a LoRA by its file path""" return self._hash_index.get_hash(file_path) + def get_preview_url_by_hash(self, sha256: str) -> Optional[str]: + """Get preview static URL for a LoRA by its hash""" + # Get the file path first + file_path = self._hash_index.get_path(sha256.lower()) + if not file_path: + return None + + # Determine the preview file path (typically same name with different extension) + base_name = os.path.splitext(file_path)[0] + preview_extensions = ['.preview.png', '.preview.jpeg', '.preview.jpg', '.preview.mp4', + '.png', '.jpeg', '.jpg', '.mp4'] + + for ext in preview_extensions: + preview_path = f"{base_name}{ext}" + if os.path.exists(preview_path): + # Convert to static URL using config + return config.get_preview_static_url(preview_path) + + return None + # Add new method to get top tags async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]: """Get top tags sorted by count @@ -681,4 +659,81 @@ class LoraScanner: # Return limited number return sorted_tags[:limit] + + async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]: + """Get base models used in loras sorted by frequency + + Args: + limit: Maximum number of base models to return + + Returns: + List of dictionaries with base model name and count, sorted by count + """ + # Make sure cache is initialized + cache = await self.get_cached_data() + + # Count base model occurrences + base_model_counts = {} + for lora in cache.raw_data: + if 'base_model' in lora and lora['base_model']: + base_model = lora['base_model'] + base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1 + + # Sort base models by count + sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()] + sorted_models.sort(key=lambda x: x['count'], reverse=True) + + # Return limited number + return sorted_models[:limit] + + async def diagnose_hash_index(self): + """Diagnostic method to verify hash index functionality""" + print("\n\n*** DIAGNOSING LORA HASH INDEX ***\n\n", file=sys.stderr) + + # First check if the hash index has any entries + if hasattr(self, '_hash_index'): + index_entries = len(self._hash_index._hash_to_path) + print(f"Hash index has {index_entries} entries", file=sys.stderr) + + # Print a few example entries if available + if index_entries > 0: + print("\nSample hash index entries:", file=sys.stderr) + count = 0 + for hash_val, path in self._hash_index._hash_to_path.items(): + if count < 5: # Just show the first 5 + print(f"Hash: {hash_val[:8]}... -> Path: {path}", file=sys.stderr) + count += 1 + else: + break + else: + print("Hash index not initialized", file=sys.stderr) + + # Try looking up by a known hash for testing + if not hasattr(self, '_hash_index') or not self._hash_index._hash_to_path: + print("No hash entries to test lookup with", file=sys.stderr) + return + + test_hash = next(iter(self._hash_index._hash_to_path.keys())) + test_path = self._hash_index.get_path(test_hash) + print(f"\nTest lookup by hash: {test_hash[:8]}... -> {test_path}", file=sys.stderr) + + # Also test reverse lookup + test_hash_result = self._hash_index.get_hash(test_path) + print(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n", file=sys.stderr) + + async def get_lora_info_by_name(self, name): + """Get LoRA information by name""" + try: + # Get cached data + cache = await self.get_cached_data() + + # Find the LoRA by name + for lora in cache.raw_data: + if lora.get("file_name") == name: + return lora + + return None + except Exception as e: + logger.error(f"Error getting LoRA info by name: {e}", exc_info=True) + return None diff --git a/py/services/recipe_cache.py b/py/services/recipe_cache.py new file mode 100644 index 00000000..b9cd2219 --- /dev/null +++ b/py/services/recipe_cache.py @@ -0,0 +1,85 @@ +import asyncio +from typing import List, Dict +from dataclasses import dataclass +from operator import itemgetter + +@dataclass +class RecipeCache: + """Cache structure for Recipe data""" + raw_data: List[Dict] + sorted_by_name: List[Dict] + sorted_by_date: List[Dict] + + def __post_init__(self): + self._lock = asyncio.Lock() + + async def resort(self, name_only: bool = False): + """Resort all cached data views""" + async with self._lock: + self.sorted_by_name = sorted( + self.raw_data, + key=lambda x: x.get('title', '').lower() # Case-insensitive sort + ) + if not name_only: + self.sorted_by_date = sorted( + self.raw_data, + key=itemgetter('created_date', 'file_path'), + reverse=True + ) + + async def update_recipe_metadata(self, recipe_id: str, metadata: Dict) -> bool: + """Update metadata for a specific recipe in all cached data + + Args: + recipe_id: The ID of the recipe to update + metadata: The new metadata + + Returns: + bool: True if the update was successful, False if the recipe wasn't found + """ + async with self._lock: + # Update in raw_data + for item in self.raw_data: + if item.get('id') == recipe_id: + item.update(metadata) + break + else: + return False # Recipe not found + + # Resort to reflect changes + await self.resort() + return True + + async def add_recipe(self, recipe_data: Dict) -> None: + """Add a new recipe to the cache + + Args: + recipe_data: The recipe data to add + """ + async with self._lock: + self.raw_data.append(recipe_data) + await self.resort() + + async def remove_recipe(self, recipe_id: str) -> bool: + """Remove a recipe from the cache by ID + + Args: + recipe_id: The ID of the recipe to remove + + Returns: + bool: True if the recipe was found and removed, False otherwise + """ + # Find the recipe in raw_data + recipe_index = next((i for i, recipe in enumerate(self.raw_data) + if recipe.get('id') == recipe_id), None) + + if recipe_index is None: + return False + + # Remove from raw_data + self.raw_data.pop(recipe_index) + + # Resort to update sorted lists + await self.resort() + + return True \ No newline at end of file diff --git a/py/services/recipe_scanner.py b/py/services/recipe_scanner.py new file mode 100644 index 00000000..29b93561 --- /dev/null +++ b/py/services/recipe_scanner.py @@ -0,0 +1,451 @@ +import os +import logging +import asyncio +import json +import re +from typing import List, Dict, Optional, Any +from datetime import datetime +from ..config import config +from .recipe_cache import RecipeCache +from .lora_scanner import LoraScanner +from .civitai_client import CivitaiClient +from ..utils.utils import fuzzy_match +import sys + +logger = logging.getLogger(__name__) + +class RecipeScanner: + """Service for scanning and managing recipe images""" + + _instance = None + _lock = asyncio.Lock() + + def __new__(cls, lora_scanner: Optional[LoraScanner] = None): + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._lora_scanner = lora_scanner + cls._instance._civitai_client = CivitaiClient() + return cls._instance + + def __init__(self, lora_scanner: Optional[LoraScanner] = None): + # Ensure initialization only happens once + if not hasattr(self, '_initialized'): + self._cache: Optional[RecipeCache] = None + self._initialization_lock = asyncio.Lock() + self._initialization_task: Optional[asyncio.Task] = None + self._is_initializing = False + if lora_scanner: + self._lora_scanner = lora_scanner + self._initialized = True + + # Initialization will be scheduled by LoraManager + + @property + def recipes_dir(self) -> str: + """Get path to recipes directory""" + if not config.loras_roots: + return "" + + # config.loras_roots already sorted case-insensitively, use the first one + recipes_dir = os.path.join(config.loras_roots[0], "recipes") + os.makedirs(recipes_dir, exist_ok=True) + + return recipes_dir + + async def get_cached_data(self, force_refresh: bool = False) -> RecipeCache: + """Get cached recipe data, refresh if needed""" + # If cache is already initialized and no refresh is needed, return it immediately + if self._cache is not None and not force_refresh: + return self._cache + + # If another initialization is already in progress, wait for it to complete + if self._is_initializing and not force_refresh: + return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[]) + + # Try to acquire the lock with a timeout to prevent deadlocks + try: + # Use a timeout for acquiring the lock + async with asyncio.timeout(1.0): + async with self._initialization_lock: + # Check again after acquiring the lock + if self._cache is not None and not force_refresh: + return self._cache + + # Mark as initializing to prevent concurrent initializations + self._is_initializing = True + + try: + # First ensure the lora scanner is initialized + if self._lora_scanner: + try: + lora_cache = await asyncio.wait_for( + self._lora_scanner.get_cached_data(), + timeout=10.0 + ) + except asyncio.TimeoutError: + logger.error("Timeout waiting for lora scanner initialization") + except Exception as e: + logger.error(f"Error waiting for lora scanner: {e}") + + # Scan for recipe data + raw_data = await self.scan_all_recipes() + + # Update cache + self._cache = RecipeCache( + raw_data=raw_data, + sorted_by_name=[], + sorted_by_date=[] + ) + + # Resort cache + await self._cache.resort() + + return self._cache + + except Exception as e: + logger.error(f"Recipe Manager: Error initializing cache: {e}", exc_info=True) + # Create empty cache on error + self._cache = RecipeCache( + raw_data=[], + sorted_by_name=[], + sorted_by_date=[] + ) + return self._cache + finally: + # Mark initialization as complete + self._is_initializing = False + + except asyncio.TimeoutError: + # If we can't acquire the lock in time, return the current cache or an empty one + logger.warning("Timeout acquiring initialization lock - returning current cache state") + return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[]) + except Exception as e: + logger.error(f"Unexpected error in get_cached_data: {e}") + return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[]) + + async def scan_all_recipes(self) -> List[Dict]: + """Scan all recipe JSON files and return metadata""" + recipes = [] + recipes_dir = self.recipes_dir + + if not recipes_dir or not os.path.exists(recipes_dir): + logger.warning(f"Recipes directory not found: {recipes_dir}") + return recipes + + # Get all recipe JSON files in the recipes directory + recipe_files = [] + for root, _, files in os.walk(recipes_dir): + recipe_count = sum(1 for f in files if f.lower().endswith('.recipe.json')) + if recipe_count > 0: + for file in files: + if file.lower().endswith('.recipe.json'): + recipe_files.append(os.path.join(root, file)) + + # Process each recipe file + for recipe_path in recipe_files: + recipe_data = await self._load_recipe_file(recipe_path) + if recipe_data: + recipes.append(recipe_data) + + return recipes + + async def _load_recipe_file(self, recipe_path: str) -> Optional[Dict]: + """Load recipe data from a JSON file""" + try: + with open(recipe_path, 'r', encoding='utf-8') as f: + recipe_data = json.load(f) + + # Validate recipe data + if not recipe_data or not isinstance(recipe_data, dict): + logger.warning(f"Invalid recipe data in {recipe_path}") + return None + + # Ensure required fields exist + required_fields = ['id', 'file_path', 'title'] + for field in required_fields: + if field not in recipe_data: + logger.warning(f"Missing required field '{field}' in {recipe_path}") + return None + + # Ensure the image file exists + image_path = recipe_data.get('file_path') + if not os.path.exists(image_path): + logger.warning(f"Recipe image not found: {image_path}") + # Try to find the image in the same directory as the recipe + recipe_dir = os.path.dirname(recipe_path) + image_filename = os.path.basename(image_path) + alternative_path = os.path.join(recipe_dir, image_filename) + if os.path.exists(alternative_path): + recipe_data['file_path'] = alternative_path + else: + logger.warning(f"Could not find alternative image path for {image_path}") + + # Ensure loras array exists + if 'loras' not in recipe_data: + recipe_data['loras'] = [] + + # Ensure gen_params exists + if 'gen_params' not in recipe_data: + recipe_data['gen_params'] = {} + + # Update lora information with local paths and availability + await self._update_lora_information(recipe_data) + + return recipe_data + except Exception as e: + logger.error(f"Error loading recipe file {recipe_path}: {e}") + import traceback + traceback.print_exc(file=sys.stderr) + return None + + async def _update_lora_information(self, recipe_data: Dict) -> bool: + """Update LoRA information with hash and file_name + + Returns: + bool: True if metadata was updated + """ + if not recipe_data.get('loras'): + return False + + metadata_updated = False + + for lora in recipe_data['loras']: + # Skip if already has complete information + if 'hash' in lora and 'file_name' in lora and lora['file_name']: + continue + + # If has modelVersionId but no hash, look in lora cache first, then fetch from Civitai + if 'modelVersionId' in lora and not lora.get('hash'): + model_version_id = lora['modelVersionId'] + + # Try to find in lora cache first + hash_from_cache = await self._find_hash_in_lora_cache(model_version_id) + if hash_from_cache: + lora['hash'] = hash_from_cache + metadata_updated = True + else: + # If not in cache, fetch from Civitai + hash_from_civitai = await self._get_hash_from_civitai(model_version_id) + if hash_from_civitai: + lora['hash'] = hash_from_civitai + metadata_updated = True + else: + logger.warning(f"Could not get hash for modelVersionId {model_version_id}") + + # If has hash but no file_name, look up in lora library + if 'hash' in lora and (not lora.get('file_name') or not lora['file_name']): + hash_value = lora['hash'] + + if self._lora_scanner.has_lora_hash(hash_value): + lora_path = self._lora_scanner.get_lora_path_by_hash(hash_value) + if lora_path: + file_name = os.path.splitext(os.path.basename(lora_path))[0] + lora['file_name'] = file_name + metadata_updated = True + else: + # Lora not in library + lora['file_name'] = '' + metadata_updated = True + + return metadata_updated + + async def _find_hash_in_lora_cache(self, model_version_id: str) -> Optional[str]: + """Find hash in lora cache based on modelVersionId""" + try: + # Get all loras from cache + if not self._lora_scanner: + return None + + cache = await self._lora_scanner.get_cached_data() + if not cache or not cache.raw_data: + return None + + # Find lora with matching civitai.id + for lora in cache.raw_data: + civitai_data = lora.get('civitai', {}) + if civitai_data and str(civitai_data.get('id', '')) == str(model_version_id): + return lora.get('sha256') + + return None + except Exception as e: + logger.error(f"Error finding hash in lora cache: {e}") + return None + + async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]: + """Get hash from Civitai API""" + try: + if not self._civitai_client: + return None + + version_info = await self._civitai_client.get_model_version_info(model_version_id) + + if not version_info or not version_info.get('files'): + logger.warning(f"No files found in version info for ID: {model_version_id}") + return None + + # Get hash from the first file + for file_info in version_info.get('files', []): + if file_info.get('hashes', {}).get('SHA256'): + return file_info['hashes']['SHA256'] + + logger.warning(f"No SHA256 hash found in version info for ID: {model_version_id}") + return None + except Exception as e: + logger.error(f"Error getting hash from Civitai: {e}") + return None + + async def _get_model_version_name(self, model_version_id: str) -> Optional[str]: + """Get model version name from Civitai API""" + try: + if not self._civitai_client: + return None + + version_info = await self._civitai_client.get_model_version_info(model_version_id) + + if version_info and 'name' in version_info: + return version_info['name'] + + logger.warning(f"No version name found for modelVersionId {model_version_id}") + return None + except Exception as e: + logger.error(f"Error getting model version name from Civitai: {e}") + return None + + async def _determine_base_model(self, loras: List[Dict]) -> Optional[str]: + """Determine the most common base model among LoRAs""" + base_models = {} + + # Count occurrences of each base model + for lora in loras: + if 'hash' in lora: + lora_path = self._lora_scanner.get_lora_path_by_hash(lora['hash']) + if lora_path: + base_model = await self._get_base_model_for_lora(lora_path) + if base_model: + base_models[base_model] = base_models.get(base_model, 0) + 1 + + # Return the most common base model + if base_models: + return max(base_models.items(), key=lambda x: x[1])[0] + return None + + async def _get_base_model_for_lora(self, lora_path: str) -> Optional[str]: + """Get base model for a LoRA from cache""" + try: + if not self._lora_scanner: + return None + + cache = await self._lora_scanner.get_cached_data() + if not cache or not cache.raw_data: + return None + + # Find matching lora in cache + for lora in cache.raw_data: + if lora.get('file_path') == lora_path: + return lora.get('base_model') + + return None + except Exception as e: + logger.error(f"Error getting base model for lora: {e}") + return None + + async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'date', search: str = None, filters: dict = None, search_options: dict = None): + """Get paginated and filtered recipe data + + Args: + page: Current page number (1-based) + page_size: Number of items per page + sort_by: Sort method ('name' or 'date') + search: Search term + filters: Dictionary of filters to apply + search_options: Dictionary of search options to apply + """ + cache = await self.get_cached_data() + + # Get base dataset + filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name + + # Apply search filter + if search: + # Default search options if none provided + if not search_options: + search_options = { + 'title': True, + 'tags': True, + 'lora_name': True, + 'lora_model': True + } + + # Build the search predicate based on search options + def matches_search(item): + # Search in title if enabled + if search_options.get('title', True): + if fuzzy_match(str(item.get('title', '')), search): + return True + + # Search in tags if enabled + if search_options.get('tags', True) and 'tags' in item: + for tag in item['tags']: + if fuzzy_match(tag, search): + return True + + # Search in lora file names if enabled + if search_options.get('lora_name', True) and 'loras' in item: + for lora in item['loras']: + if fuzzy_match(str(lora.get('file_name', '')), search): + return True + + # Search in lora model names if enabled + if search_options.get('lora_model', True) and 'loras' in item: + for lora in item['loras']: + if fuzzy_match(str(lora.get('modelName', '')), search): + return True + + # No match found + return False + + # Filter the data using the search predicate + filtered_data = [item for item in filtered_data if matches_search(item)] + + # Apply additional filters + if filters: + # Filter by base model + if 'base_model' in filters and filters['base_model']: + filtered_data = [ + item for item in filtered_data + if item.get('base_model', '') in filters['base_model'] + ] + + # Filter by tags + if 'tags' in filters and filters['tags']: + filtered_data = [ + item for item in filtered_data + if any(tag in item.get('tags', []) for tag in filters['tags']) + ] + + # Calculate pagination + total_items = len(filtered_data) + start_idx = (page - 1) * page_size + end_idx = min(start_idx + page_size, total_items) + + # Get paginated items + paginated_items = filtered_data[start_idx:end_idx] + + # Add inLibrary information for each lora + for item in paginated_items: + if 'loras' in item: + for lora in item['loras']: + if 'hash' in lora and lora['hash']: + lora['inLibrary'] = self._lora_scanner.has_lora_hash(lora['hash'].lower()) + lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora['hash'].lower()) + lora['localPath'] = self._lora_scanner.get_lora_path_by_hash(lora['hash'].lower()) + + result = { + 'items': paginated_items, + 'total': total_items, + 'page': page, + 'page_size': page_size, + 'total_pages': (total_items + page_size - 1) // page_size + } + + return result \ No newline at end of file diff --git a/py/utils/exif_utils.py b/py/utils/exif_utils.py new file mode 100644 index 00000000..f1b304a1 --- /dev/null +++ b/py/utils/exif_utils.py @@ -0,0 +1,486 @@ +import piexif +import json +import logging +from typing import Dict, Optional, Any +from io import BytesIO +import os +from PIL import Image +import re + +logger = logging.getLogger(__name__) + +class ExifUtils: + """Utility functions for working with EXIF data in images""" + + @staticmethod + def extract_user_comment(image_path: str) -> Optional[str]: + """Extract UserComment field from image EXIF data""" + try: + # First try to open as image to check format + with Image.open(image_path) as img: + if img.format not in ['JPEG', 'TIFF', 'WEBP']: + # For non-JPEG/TIFF/WEBP images, try to get EXIF through PIL + exif = img._getexif() + if exif and piexif.ExifIFD.UserComment in exif: + user_comment = exif[piexif.ExifIFD.UserComment] + if isinstance(user_comment, bytes): + if user_comment.startswith(b'UNICODE\0'): + return user_comment[8:].decode('utf-16be') + return user_comment.decode('utf-8', errors='ignore') + return user_comment + return None + + # For JPEG/TIFF/WEBP, use piexif + exif_dict = piexif.load(image_path) + + if piexif.ExifIFD.UserComment in exif_dict.get('Exif', {}): + user_comment = exif_dict['Exif'][piexif.ExifIFD.UserComment] + if isinstance(user_comment, bytes): + if user_comment.startswith(b'UNICODE\0'): + user_comment = user_comment[8:].decode('utf-16be') + else: + user_comment = user_comment.decode('utf-8', errors='ignore') + return user_comment + return None + + except Exception as e: + return None + + @staticmethod + def update_user_comment(image_path: str, user_comment: str) -> str: + """Update UserComment field in image EXIF data""" + try: + # Load the image and its EXIF data + with Image.open(image_path) as img: + # Get original format + img_format = img.format + + # For WebP format, we need a different approach + if img_format == 'WEBP': + # WebP doesn't support standard EXIF through piexif + # We'll use PIL's exif parameter directly + exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + user_comment.encode('utf-16be')}} + exif_bytes = piexif.dump(exif_dict) + + # Save with the exif data + img.save(image_path, format='WEBP', exif=exif_bytes, quality=85) + return image_path + + # For other formats, use the standard approach + try: + exif_dict = piexif.load(img.info.get('exif', b'')) + except: + exif_dict = {'0th':{}, 'Exif':{}, 'GPS':{}, 'Interop':{}, '1st':{}} + + # If no Exif dictionary exists, create one + if 'Exif' not in exif_dict: + exif_dict['Exif'] = {} + + # Update the UserComment field - use UNICODE format + unicode_bytes = user_comment.encode('utf-16be') + user_comment_bytes = b'UNICODE\0' + unicode_bytes + + exif_dict['Exif'][piexif.ExifIFD.UserComment] = user_comment_bytes + + # Convert EXIF dict back to bytes + exif_bytes = piexif.dump(exif_dict) + + # Save the image with updated EXIF data + img.save(image_path, exif=exif_bytes) + + return image_path + except Exception as e: + logger.error(f"Error updating EXIF data in {image_path}: {e}") + return image_path + + @staticmethod + def append_recipe_metadata(image_path, recipe_data) -> str: + """Append recipe metadata to an image's EXIF data""" + try: + # First, extract existing user comment + user_comment = ExifUtils.extract_user_comment(image_path) + + # Check if there's already recipe metadata in the user comment + if user_comment: + # Remove any existing recipe metadata + user_comment = ExifUtils.remove_recipe_metadata(user_comment) + + # Prepare simplified loras data + simplified_loras = [] + for lora in recipe_data.get("loras", []): + simplified_lora = { + "file_name": lora.get("file_name", ""), + "hash": lora.get("hash", "").lower() if lora.get("hash") else "", + "strength": float(lora.get("strength", 1.0)), + "modelVersionId": lora.get("modelVersionId", ""), + "modelName": lora.get("modelName", ""), + "modelVersionName": lora.get("modelVersionName", ""), + } + simplified_loras.append(simplified_lora) + + # Create recipe metadata JSON + recipe_metadata = { + 'title': recipe_data.get('title', ''), + 'base_model': recipe_data.get('base_model', ''), + 'loras': simplified_loras, + 'gen_params': recipe_data.get('gen_params', {}), + 'tags': recipe_data.get('tags', []) + } + + # Convert to JSON string + recipe_metadata_json = json.dumps(recipe_metadata) + + # Create the recipe metadata marker + recipe_metadata_marker = f"Recipe metadata: {recipe_metadata_json}" + + # Append to existing user comment or create new one + new_user_comment = f"{user_comment} \n {recipe_metadata_marker}" if user_comment else recipe_metadata_marker + + # Write back to the image + return ExifUtils.update_user_comment(image_path, new_user_comment) + except Exception as e: + logger.error(f"Error appending recipe metadata: {e}", exc_info=True) + return image_path + + @staticmethod + def remove_recipe_metadata(user_comment): + """Remove recipe metadata from user comment""" + if not user_comment: + return "" + + # Find the recipe metadata marker + recipe_marker_index = user_comment.find("Recipe metadata: ") + if recipe_marker_index == -1: + return user_comment + + # If recipe metadata is not at the start, remove the preceding ", " + if recipe_marker_index >= 2 and user_comment[recipe_marker_index-2:recipe_marker_index] == ", ": + recipe_marker_index -= 2 + + # Remove the recipe metadata part + # First, find where the metadata ends (next line or end of string) + next_line_index = user_comment.find("\n", recipe_marker_index) + if next_line_index == -1: + # Metadata is at the end of the string + return user_comment[:recipe_marker_index].rstrip() + else: + # Metadata is in the middle of the string + return user_comment[:recipe_marker_index] + user_comment[next_line_index:] + + @staticmethod + def optimize_image(image_data, target_width=250, format='webp', quality=85, preserve_metadata=True): + """ + Optimize an image by resizing and converting to WebP format + + Args: + image_data: Binary image data or path to image file + target_width: Width to resize the image to (preserves aspect ratio) + format: Output format (default: webp) + quality: Output quality (0-100) + preserve_metadata: Whether to preserve EXIF metadata + + Returns: + Tuple of (optimized_image_data, extension) + """ + try: + # Extract metadata if needed + user_comment = None + if preserve_metadata: + if isinstance(image_data, str) and os.path.exists(image_data): + # It's a file path + user_comment = ExifUtils.extract_user_comment(image_data) + img = Image.open(image_data) + else: + # It's binary data + temp_img = BytesIO(image_data) + img = Image.open(temp_img) + # Save to a temporary file to extract metadata + import tempfile + with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file: + temp_path = temp_file.name + temp_file.write(image_data) + user_comment = ExifUtils.extract_user_comment(temp_path) + os.unlink(temp_path) + else: + # Just open the image without extracting metadata + if isinstance(image_data, str) and os.path.exists(image_data): + img = Image.open(image_data) + else: + img = Image.open(BytesIO(image_data)) + + # Calculate new height to maintain aspect ratio + width, height = img.size + new_height = int(height * (target_width / width)) + + # Resize the image + resized_img = img.resize((target_width, new_height), Image.LANCZOS) + + # Save to BytesIO in the specified format + output = BytesIO() + + # WebP format + if format.lower() == 'webp': + resized_img.save(output, format='WEBP', quality=quality) + extension = '.webp' + # JPEG format + elif format.lower() in ('jpg', 'jpeg'): + resized_img.save(output, format='JPEG', quality=quality) + extension = '.jpg' + # PNG format + elif format.lower() == 'png': + resized_img.save(output, format='PNG', optimize=True) + extension = '.png' + else: + # Default to WebP + resized_img.save(output, format='WEBP', quality=quality) + extension = '.webp' + + # Get the optimized image data + optimized_data = output.getvalue() + + # If we need to preserve metadata, write it to a temporary file + if preserve_metadata and user_comment: + # For WebP format, we'll directly save with metadata + if format.lower() == 'webp': + # Create a new BytesIO with metadata + output_with_metadata = BytesIO() + + # Create EXIF data with user comment + exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + user_comment.encode('utf-16be')}} + exif_bytes = piexif.dump(exif_dict) + + # Save with metadata + resized_img.save(output_with_metadata, format='WEBP', exif=exif_bytes, quality=quality) + optimized_data = output_with_metadata.getvalue() + else: + # For other formats, use the temporary file approach + import tempfile + with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as temp_file: + temp_path = temp_file.name + temp_file.write(optimized_data) + + # Add the metadata back + ExifUtils.update_user_comment(temp_path, user_comment) + + # Read the file with metadata + with open(temp_path, 'rb') as f: + optimized_data = f.read() + + # Clean up + os.unlink(temp_path) + + return optimized_data, extension + + except Exception as e: + logger.error(f"Error optimizing image: {e}", exc_info=True) + # Return original data if optimization fails + if isinstance(image_data, str) and os.path.exists(image_data): + with open(image_data, 'rb') as f: + return f.read(), os.path.splitext(image_data)[1] + return image_data, '.jpg' + + @staticmethod + def _parse_comfyui_workflow(workflow_data: Any) -> Dict[str, Any]: + """ + Parse ComfyUI workflow data and extract relevant generation parameters + + Args: + workflow_data: Raw workflow data (string or dict) + + Returns: + Formatted generation parameters dictionary + """ + try: + # If workflow_data is a string, try to parse it as JSON + if isinstance(workflow_data, str): + try: + workflow_data = json.loads(workflow_data) + except json.JSONDecodeError: + logger.error("Failed to parse workflow data as JSON") + return {} + + # Now workflow_data should be a dictionary + if not isinstance(workflow_data, dict): + logger.error(f"Workflow data is not a dictionary: {type(workflow_data)}") + return {} + + # Initialize parameters dictionary with only the required fields + gen_params = { + "prompt": "", + "negative_prompt": "", + "steps": "", + "sampler": "", + "cfg_scale": "", + "seed": "", + "size": "", + "clip_skip": "" + } + + # First pass: find the KSampler node to get basic parameters and node references + # Store node references to follow for prompts + positive_ref = None + negative_ref = None + + for node_id, node_data in workflow_data.items(): + if not isinstance(node_data, dict): + continue + + # Extract node inputs if available + inputs = node_data.get("inputs", {}) + if not inputs: + continue + + # KSampler nodes contain most generation parameters and references to prompt nodes + if "KSampler" in node_data.get("class_type", ""): + # Extract basic sampling parameters + gen_params["steps"] = inputs.get("steps", "") + gen_params["cfg_scale"] = inputs.get("cfg", "") + gen_params["sampler"] = inputs.get("sampler_name", "") + gen_params["seed"] = inputs.get("seed", "") + if isinstance(gen_params["seed"], list) and len(gen_params["seed"]) > 1: + gen_params["seed"] = gen_params["seed"][1] # Use the actual value if it's a list + + # Get references to positive and negative prompt nodes + positive_ref = inputs.get("positive", "") + negative_ref = inputs.get("negative", "") + + # CLIPSetLastLayer contains clip_skip information + elif "CLIPSetLastLayer" in node_data.get("class_type", ""): + gen_params["clip_skip"] = inputs.get("stop_at_clip_layer", "") + if isinstance(gen_params["clip_skip"], int) and gen_params["clip_skip"] < 0: + # Convert negative layer index to positive clip skip value + gen_params["clip_skip"] = abs(gen_params["clip_skip"]) + + # Look for resolution information + elif "LatentImage" in node_data.get("class_type", "") or "Empty" in node_data.get("class_type", ""): + width = inputs.get("width", 0) + height = inputs.get("height", 0) + if width and height: + gen_params["size"] = f"{width}x{height}" + + # Some nodes have resolution as a string like "832x1216 (0.68)" + resolution = inputs.get("resolution", "") + if isinstance(resolution, str) and "x" in resolution: + gen_params["size"] = resolution.split(" ")[0] # Extract just the dimensions + + # Helper function to follow node references and extract text content + def get_text_from_node_ref(node_ref, workflow_data): + if not node_ref or not isinstance(node_ref, list) or len(node_ref) < 2: + return "" + + node_id, slot_idx = node_ref + + # If we can't find the node, return empty string + if node_id not in workflow_data: + return "" + + node = workflow_data[node_id] + inputs = node.get("inputs", {}) + + # Direct text input in CLIP Text Encode nodes + if "CLIPTextEncode" in node.get("class_type", ""): + text = inputs.get("text", "") + if isinstance(text, str): + return text + elif isinstance(text, list) and len(text) >= 2: + # If text is a reference to another node, follow it + return get_text_from_node_ref(text, workflow_data) + + # Other nodes might have text input with different field names + for field_name, field_value in inputs.items(): + if field_name == "text" and isinstance(field_value, str): + return field_value + elif isinstance(field_value, list) and len(field_value) >= 2 and field_name in ["text"]: + # If it's a reference to another node, follow it + return get_text_from_node_ref(field_value, workflow_data) + + return "" + + # Extract prompts by following references from KSampler node + if positive_ref: + gen_params["prompt"] = get_text_from_node_ref(positive_ref, workflow_data) + + if negative_ref: + gen_params["negative_prompt"] = get_text_from_node_ref(negative_ref, workflow_data) + + # Fallback: if we couldn't extract prompts via references, use the traditional method + if not gen_params["prompt"] or not gen_params["negative_prompt"]: + for node_id, node_data in workflow_data.items(): + if not isinstance(node_data, dict): + continue + + inputs = node_data.get("inputs", {}) + if not inputs: + continue + + if "CLIPTextEncode" in node_data.get("class_type", ""): + # Check for negative prompt nodes + title = node_data.get("_meta", {}).get("title", "").lower() + prompt_text = inputs.get("text", "") + + if isinstance(prompt_text, str): + if "negative" in title and not gen_params["negative_prompt"]: + gen_params["negative_prompt"] = prompt_text + elif prompt_text and not "negative" in title and not gen_params["prompt"]: + gen_params["prompt"] = prompt_text + + return gen_params + + except Exception as e: + logger.error(f"Error parsing ComfyUI workflow: {e}", exc_info=True) + return {} + + @staticmethod + def extract_comfyui_gen_params(image_path: str) -> Dict[str, Any]: + """ + Extract ComfyUI workflow data from PNG images and format for recipe data + Only extracts the specific generation parameters needed for recipes. + + Args: + image_path: Path to the ComfyUI-generated PNG image + + Returns: + Dictionary containing formatted generation parameters + """ + try: + # Check if the file exists and is accessible + if not os.path.exists(image_path): + logger.error(f"Image file not found: {image_path}") + return {} + + # Open the image to extract embedded workflow data + with Image.open(image_path) as img: + workflow_data = None + + # For PNG images, look for the ComfyUI workflow data in PNG chunks + if img.format == 'PNG': + # Check standard metadata fields that might contain workflow + if 'parameters' in img.info: + workflow_data = img.info['parameters'] + elif 'prompt' in img.info: + workflow_data = img.info['prompt'] + else: + # Look for other potential field names that might contain workflow data + for key in img.info: + if isinstance(key, str) and ('workflow' in key.lower() or 'comfy' in key.lower()): + workflow_data = img.info[key] + break + + # If no workflow data found in PNG chunks, try EXIF as fallback + if not workflow_data: + user_comment = ExifUtils.extract_user_comment(image_path) + if user_comment and '{' in user_comment and '}' in user_comment: + # Try to extract JSON part + json_start = user_comment.find('{') + json_end = user_comment.rfind('}') + 1 + workflow_data = user_comment[json_start:json_end] + + # Parse workflow data if found + if workflow_data: + return ExifUtils._parse_comfyui_workflow(workflow_data) + + return {} + + except Exception as e: + logger.error(f"Error extracting ComfyUI gen params from {image_path}: {e}", exc_info=True) + return {} \ No newline at end of file diff --git a/py/utils/recipe_parsers.py b/py/utils/recipe_parsers.py new file mode 100644 index 00000000..581eda74 --- /dev/null +++ b/py/utils/recipe_parsers.py @@ -0,0 +1,526 @@ +import json +import logging +import os +import re +from typing import Dict, List, Any, Optional, Tuple +from abc import ABC, abstractmethod +from ..config import config + +logger = logging.getLogger(__name__) + +# Constants for generation parameters +GEN_PARAM_KEYS = [ + 'prompt', + 'negative_prompt', + 'steps', + 'sampler', + 'cfg_scale', + 'seed', + 'size', + 'clip_skip', +] + +class RecipeMetadataParser(ABC): + """Interface for parsing recipe metadata from image user comments""" + + METADATA_MARKER = None + + @abstractmethod + def is_metadata_matching(self, user_comment: str) -> bool: + """Check if the user comment matches the metadata format""" + pass + + @abstractmethod + async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]: + """ + Parse metadata from user comment and return structured recipe data + + Args: + user_comment: The EXIF UserComment string from the image + recipe_scanner: Optional recipe scanner instance for local LoRA lookup + civitai_client: Optional Civitai client for fetching model information + + Returns: + Dict containing parsed recipe data with standardized format + """ + pass + + +class RecipeFormatParser(RecipeMetadataParser): + """Parser for images with dedicated recipe metadata format""" + + # Regular expression pattern for extracting recipe metadata + METADATA_MARKER = r'Recipe metadata: (\{.*\})' + + def is_metadata_matching(self, user_comment: str) -> bool: + """Check if the user comment matches the metadata format""" + return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None + + async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]: + """Parse metadata from images with dedicated recipe metadata format""" + try: + # Extract recipe metadata from user comment + try: + # Look for recipe metadata section + recipe_match = re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) + if not recipe_match: + recipe_metadata = None + else: + recipe_json = recipe_match.group(1) + recipe_metadata = json.loads(recipe_json) + except Exception as e: + logger.error(f"Error extracting recipe metadata: {e}") + recipe_metadata = None + if not recipe_metadata: + return {"error": "No recipe metadata found", "loras": []} + + # Process the recipe metadata + loras = [] + for lora in recipe_metadata.get('loras', []): + # Convert recipe lora format to frontend format + lora_entry = { + 'id': lora.get('modelVersionId', ''), + 'name': lora.get('modelName', ''), + 'version': lora.get('modelVersionName', ''), + 'type': 'lora', + 'weight': lora.get('strength', 1.0), + 'file_name': lora.get('file_name', ''), + 'hash': lora.get('hash', '') + } + + # Check if this LoRA exists locally by SHA256 hash + if lora.get('hash') and recipe_scanner: + lora_scanner = recipe_scanner._lora_scanner + exists_locally = lora_scanner.has_lora_hash(lora['hash']) + if exists_locally: + lora_cache = await lora_scanner.get_cached_data() + lora_item = next((item for item in lora_cache.raw_data if item['sha256'].lower() == lora['hash'].lower()), None) + if lora_item: + lora_entry['existsLocally'] = True + lora_entry['localPath'] = lora_item['file_path'] + lora_entry['file_name'] = lora_item['file_name'] + lora_entry['size'] = lora_item['size'] + lora_entry['thumbnailUrl'] = config.get_preview_static_url(lora_item['preview_url']) + + else: + lora_entry['existsLocally'] = False + lora_entry['localPath'] = None + + # Try to get additional info from Civitai if we have a model version ID + if lora.get('modelVersionId') and civitai_client: + try: + civitai_info = await civitai_client.get_model_version_info(lora['modelVersionId']) + if civitai_info and civitai_info.get("error") != "Model not found": + # Get thumbnail URL from first image + if 'images' in civitai_info and civitai_info['images']: + lora_entry['thumbnailUrl'] = civitai_info['images'][0].get('url', '') + + # Get base model + lora_entry['baseModel'] = civitai_info.get('baseModel', '') + + # Get download URL + lora_entry['downloadUrl'] = civitai_info.get('downloadUrl', '') + + # Get size from files if available + if 'files' in civitai_info: + model_file = next((file for file in civitai_info.get('files', []) + if file.get('type') == 'Model'), None) + if model_file: + lora_entry['size'] = model_file.get('sizeKB', 0) * 1024 + else: + lora_entry['isDeleted'] = True + lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png' + except Exception as e: + logger.error(f"Error fetching Civitai info for LoRA: {e}") + lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png' + + loras.append(lora_entry) + + logger.info(f"Found {len(loras)} loras in recipe metadata") + + # Filter gen_params to only include recognized keys + filtered_gen_params = {} + if 'gen_params' in recipe_metadata: + for key, value in recipe_metadata['gen_params'].items(): + if key in GEN_PARAM_KEYS: + filtered_gen_params[key] = value + + return { + 'base_model': recipe_metadata.get('base_model', ''), + 'loras': loras, + 'gen_params': filtered_gen_params, + 'tags': recipe_metadata.get('tags', []), + 'title': recipe_metadata.get('title', ''), + 'from_recipe_metadata': True + } + + except Exception as e: + logger.error(f"Error parsing recipe format metadata: {e}", exc_info=True) + return {"error": str(e), "loras": []} + + +class StandardMetadataParser(RecipeMetadataParser): + """Parser for images with standard civitai metadata format (prompt, negative prompt, etc.)""" + + METADATA_MARKER = r'Civitai resources: ' + + def is_metadata_matching(self, user_comment: str) -> bool: + """Check if the user comment matches the metadata format""" + return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None + + async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]: + """Parse metadata from images with standard metadata format""" + try: + # Parse the standard metadata + metadata = self._parse_recipe_metadata(user_comment) + + # Look for Civitai resources in the metadata + civitai_resources = metadata.get('loras', []) + checkpoint = metadata.get('checkpoint') + + if not civitai_resources and not checkpoint: + return { + "error": "No LoRA information found in this image", + "loras": [] + } + + # Process LoRAs and collect base models + base_model_counts = {} + loras = [] + + # Process LoRAs + for resource in civitai_resources: + # Get model version ID + model_version_id = resource.get('modelVersionId') + if not model_version_id: + continue + + # Initialize lora entry with default values + lora_entry = { + 'id': model_version_id, + 'name': resource.get('modelName', ''), + 'version': resource.get('modelVersionName', ''), + 'type': resource.get('type', 'lora'), + 'weight': resource.get('weight', 1.0), + 'existsLocally': False, + 'localPath': None, + 'file_name': '', + 'hash': '', + 'thumbnailUrl': '', + 'baseModel': '', + 'size': 0, + 'downloadUrl': '', + 'isDeleted': False + } + + # Get additional info from Civitai if client is available + if civitai_client: + civitai_info = await civitai_client.get_model_version_info(model_version_id) + + # Check if this LoRA exists locally by SHA256 hash + if civitai_info and civitai_info.get("error") != "Model not found": + # LoRA exists on Civitai, process its information + if 'files' in civitai_info: + # Find the model file (type="Model") in the files list + model_file = next((file for file in civitai_info.get('files', []) + if file.get('type') == 'Model'), None) + + if model_file and recipe_scanner: + sha256 = model_file.get('hashes', {}).get('SHA256', '') + if sha256: + lora_scanner = recipe_scanner._lora_scanner + exists_locally = lora_scanner.has_lora_hash(sha256) + if exists_locally: + local_path = lora_scanner.get_lora_path_by_hash(sha256) + lora_entry['existsLocally'] = True + lora_entry['localPath'] = local_path + lora_entry['file_name'] = os.path.splitext(os.path.basename(local_path))[0] + else: + # For missing LoRAs, get file_name from model_file.name + file_name = model_file.get('name', '') + lora_entry['file_name'] = os.path.splitext(file_name)[0] if file_name else '' + + lora_entry['hash'] = sha256 + lora_entry['size'] = model_file.get('sizeKB', 0) * 1024 + + # Get thumbnail URL from first image + if 'images' in civitai_info and civitai_info['images']: + lora_entry['thumbnailUrl'] = civitai_info['images'][0].get('url', '') + + # Get base model and update counts + current_base_model = civitai_info.get('baseModel', '') + lora_entry['baseModel'] = current_base_model + if current_base_model: + base_model_counts[current_base_model] = base_model_counts.get(current_base_model, 0) + 1 + + # Get download URL + lora_entry['downloadUrl'] = civitai_info.get('downloadUrl', '') + else: + # LoRA is deleted from Civitai or not found + lora_entry['isDeleted'] = True + lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png' + + loras.append(lora_entry) + + # Set base_model to the most common one from civitai_info + base_model = None + if base_model_counts: + base_model = max(base_model_counts.items(), key=lambda x: x[1])[0] + + # Extract generation parameters for recipe metadata + gen_params = {} + for key in GEN_PARAM_KEYS: + if key in metadata: + gen_params[key] = metadata.get(key, '') + + return { + 'base_model': base_model, + 'loras': loras, + 'gen_params': gen_params, + 'raw_metadata': metadata + } + + except Exception as e: + logger.error(f"Error parsing standard metadata: {e}", exc_info=True) + return {"error": str(e), "loras": []} + + def _parse_recipe_metadata(self, user_comment: str) -> Dict[str, Any]: + """Parse recipe metadata from UserComment""" + try: + # Split by 'Negative prompt:' to get the prompt + parts = user_comment.split('Negative prompt:', 1) + prompt = parts[0].strip() + + # Initialize metadata with prompt + metadata = {"prompt": prompt, "loras": [], "checkpoint": None} + + # Extract additional fields if available + if len(parts) > 1: + negative_and_params = parts[1] + + # Extract negative prompt + if "Steps:" in negative_and_params: + neg_prompt = negative_and_params.split("Steps:", 1)[0].strip() + metadata["negative_prompt"] = neg_prompt + + # Extract key-value parameters (Steps, Sampler, CFG scale, etc.) + param_pattern = r'([A-Za-z ]+): ([^,]+)' + params = re.findall(param_pattern, negative_and_params) + for key, value in params: + clean_key = key.strip().lower().replace(' ', '_') + metadata[clean_key] = value.strip() + + # Extract Civitai resources + if 'Civitai resources:' in user_comment: + resources_part = user_comment.split('Civitai resources:', 1)[1] + if '],' in resources_part: + resources_json = resources_part.split('],', 1)[0] + ']' + try: + resources = json.loads(resources_json) + # Filter loras and checkpoints + for resource in resources: + if resource.get('type') == 'lora': + # 确保 weight 字段被正确保留 + lora_entry = resource.copy() + # 如果找不到 weight,默认为 1.0 + if 'weight' not in lora_entry: + lora_entry['weight'] = 1.0 + # Ensure modelVersionName is included + if 'modelVersionName' not in lora_entry: + lora_entry['modelVersionName'] = '' + metadata['loras'].append(lora_entry) + elif resource.get('type') == 'checkpoint': + metadata['checkpoint'] = resource + except json.JSONDecodeError: + pass + + return metadata + except Exception as e: + logger.error(f"Error parsing recipe metadata: {e}") + return {"prompt": user_comment, "loras": [], "checkpoint": None} + + +class A1111MetadataParser(RecipeMetadataParser): + """Parser for images with A1111 metadata format (Lora hashes)""" + + METADATA_MARKER = r'Lora hashes:' + LORA_PATTERN = r']+)>' + LORA_HASH_PATTERN = r'([^:]+): ([a-f0-9]+)' + + def is_metadata_matching(self, user_comment: str) -> bool: + """Check if the user comment matches the A1111 metadata format""" + return 'Lora hashes:' in user_comment + + async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]: + """Parse metadata from images with A1111 metadata format""" + try: + # Extract prompt and negative prompt + parts = user_comment.split('Negative prompt:', 1) + prompt = parts[0].strip() + + # Initialize metadata + metadata = {"prompt": prompt, "loras": []} + + # Extract negative prompt and parameters + if len(parts) > 1: + negative_and_params = parts[1] + + # Extract negative prompt + if "Steps:" in negative_and_params: + neg_prompt = negative_and_params.split("Steps:", 1)[0].strip() + metadata["negative_prompt"] = neg_prompt + + # Extract key-value parameters (Steps, Sampler, CFG scale, etc.) + param_pattern = r'([A-Za-z ]+): ([^,]+)' + params = re.findall(param_pattern, negative_and_params) + for key, value in params: + clean_key = key.strip().lower().replace(' ', '_') + metadata[clean_key] = value.strip() + + # Extract LoRA information from prompt + lora_weights = {} + lora_matches = re.findall(self.LORA_PATTERN, prompt) + for lora_name, weight in lora_matches: + lora_weights[lora_name.strip()] = float(weight.strip()) + + # Remove LoRA patterns from prompt + metadata["prompt"] = re.sub(self.LORA_PATTERN, '', prompt).strip() + + # Extract LoRA hashes + lora_hashes = {} + if 'Lora hashes:' in user_comment: + lora_hash_section = user_comment.split('Lora hashes:', 1)[1].strip() + if lora_hash_section.startswith('"'): + lora_hash_section = lora_hash_section[1:].split('"', 1)[0] + hash_matches = re.findall(self.LORA_HASH_PATTERN, lora_hash_section) + for lora_name, hash_value in hash_matches: + # Remove any leading comma and space from lora name + clean_name = lora_name.strip().lstrip(',').strip() + lora_hashes[clean_name] = hash_value.strip() + + # Process LoRAs and collect base models + base_model_counts = {} + loras = [] + + # Process each LoRA with hash and weight + for lora_name, hash_value in lora_hashes.items(): + weight = lora_weights.get(lora_name, 1.0) + + # Initialize lora entry with default values + lora_entry = { + 'name': lora_name, + 'type': 'lora', + 'weight': weight, + 'existsLocally': False, + 'localPath': None, + 'file_name': lora_name, + 'hash': hash_value, + 'thumbnailUrl': '/loras_static/images/no-preview.png', + 'baseModel': '', + 'size': 0, + 'downloadUrl': '', + 'isDeleted': False + } + + # Get info from Civitai by hash + if civitai_client: + try: + civitai_info = await civitai_client.get_model_by_hash(hash_value) + if civitai_info and civitai_info.get("error") != "Model not found": + # Get model version ID + lora_entry['id'] = civitai_info.get('id', '') + + # Get model name and version + lora_entry['name'] = civitai_info.get('model', {}).get('name', lora_name) + lora_entry['version'] = civitai_info.get('name', '') + + # Get thumbnail URL + if 'images' in civitai_info and civitai_info['images']: + lora_entry['thumbnailUrl'] = civitai_info['images'][0].get('url', '') + + # Get base model and update counts + current_base_model = civitai_info.get('baseModel', '') + lora_entry['baseModel'] = current_base_model + if current_base_model: + base_model_counts[current_base_model] = base_model_counts.get(current_base_model, 0) + 1 + + # Get download URL + lora_entry['downloadUrl'] = civitai_info.get('downloadUrl', '') + + # Get file name and size from Civitai + if 'files' in civitai_info: + model_file = next((file for file in civitai_info.get('files', []) + if file.get('type') == 'Model'), None) + if model_file: + file_name = model_file.get('name', '') + lora_entry['file_name'] = os.path.splitext(file_name)[0] if file_name else lora_name + lora_entry['size'] = model_file.get('sizeKB', 0) * 1024 + # Update hash to sha256 + lora_entry['hash'] = model_file.get('hashes', {}).get('SHA256', hash_value).lower() + + # Check if exists locally with sha256 hash + if recipe_scanner and lora_entry['hash']: + lora_scanner = recipe_scanner._lora_scanner + exists_locally = lora_scanner.has_lora_hash(lora_entry['hash']) + if exists_locally: + lora_cache = await lora_scanner.get_cached_data() + lora_item = next((item for item in lora_cache.raw_data if item['sha256'] == lora_entry['hash']), None) + if lora_item: + lora_entry['existsLocally'] = True + lora_entry['localPath'] = lora_item['file_path'] + lora_entry['thumbnailUrl'] = config.get_preview_static_url(lora_item['preview_url']) + + except Exception as e: + logger.error(f"Error fetching Civitai info for LoRA hash {hash_value}: {e}") + + loras.append(lora_entry) + + # Set base_model to the most common one from civitai_info + base_model = None + if base_model_counts: + base_model = max(base_model_counts.items(), key=lambda x: x[1])[0] + + # Extract generation parameters for recipe metadata + gen_params = {} + for key in GEN_PARAM_KEYS: + if key in metadata: + gen_params[key] = metadata.get(key, '') + + # Add model information if available + if 'model' in metadata: + gen_params['checkpoint'] = metadata['model'] + + return { + 'base_model': base_model, + 'loras': loras, + 'gen_params': gen_params, + 'raw_metadata': metadata + } + + except Exception as e: + logger.error(f"Error parsing A1111 metadata: {e}", exc_info=True) + return {"error": str(e), "loras": []} + + +class RecipeParserFactory: + """Factory for creating recipe metadata parsers""" + + @staticmethod + def create_parser(user_comment: str) -> RecipeMetadataParser: + """ + Create appropriate parser based on the user comment content + + Args: + user_comment: The EXIF UserComment string from the image + + Returns: + Appropriate RecipeMetadataParser implementation + """ + if RecipeFormatParser().is_metadata_matching(user_comment): + return RecipeFormatParser() + elif StandardMetadataParser().is_metadata_matching(user_comment): + return StandardMetadataParser() + elif A1111MetadataParser().is_metadata_matching(user_comment): + return A1111MetadataParser() + else: + return None \ No newline at end of file diff --git a/py/utils/utils.py b/py/utils/utils.py new file mode 100644 index 00000000..1a3cf326 --- /dev/null +++ b/py/utils/utils.py @@ -0,0 +1,78 @@ +from difflib import SequenceMatcher +import requests +import tempfile +import re +from bs4 import BeautifulSoup + +def download_twitter_image(url): + """Download image from a URL containing twitter:image meta tag + + Args: + url (str): The URL to download image from + + Returns: + str: Path to downloaded temporary image file + """ + try: + # Download page content + response = requests.get(url) + response.raise_for_status() + + # Parse HTML + soup = BeautifulSoup(response.text, 'html.parser') + + # Find twitter:image meta tag + meta_tag = soup.find('meta', attrs={'property': 'twitter:image'}) + if not meta_tag: + return None + + image_url = meta_tag['content'] + + # Download image + image_response = requests.get(image_url) + image_response.raise_for_status() + + # Save to temp file + with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file: + temp_file.write(image_response.content) + return temp_file.name + + except Exception as e: + print(f"Error downloading twitter image: {e}") + return None + +def fuzzy_match(text: str, pattern: str, threshold: float = 0.7) -> bool: + """ + Check if text matches pattern using fuzzy matching. + Returns True if similarity ratio is above threshold. + """ + if not pattern or not text: + return False + + # Convert both to lowercase for case-insensitive matching + text = text.lower() + pattern = pattern.lower() + + # Split pattern into words + search_words = pattern.split() + + # Check each word + for word in search_words: + # First check if word is a substring (faster) + if word in text: + continue + + # If not found as substring, try fuzzy matching + # Check if any part of the text matches this word + found_match = False + for text_part in text.split(): + ratio = SequenceMatcher(None, text_part, word).ratio() + if ratio >= threshold: + found_match = True + break + + if not found_match: + return False + + # All words found either as substrings or fuzzy matches + return True diff --git a/py/workflow_params/README.md b/py/workflow_params/README.md new file mode 100644 index 00000000..fdb5971d --- /dev/null +++ b/py/workflow_params/README.md @@ -0,0 +1,116 @@ +# ComfyUI Workflow Parser + +A module for parsing ComfyUI workflow JSON and extracting generation parameters. + +## Features + +- Parse ComfyUI workflow JSON files to extract generation parameters +- Extract lora information from workflows +- Support for node traversal and parameter resolution +- Extensible architecture for supporting custom node types +- Dynamic loading of node processor extensions + +## Usage + +### Basic Usage + +```python +from workflow_params import parse_workflow + +# Parse from a file +with open('my_workflow.json', 'r') as f: + workflow_json = f.read() + +result = parse_workflow(workflow_json) +print(result) +``` + +### Using the WorkflowParser directly + +```python +from workflow_params import WorkflowParser + +parser = WorkflowParser() +result = parser.parse_workflow(workflow_json) +``` + +### Loading Extensions + +Extensions are loaded automatically by default, but you can also control this behavior: + +```python +from workflow_params import WorkflowParser + +# Don't load extensions +parser = WorkflowParser(load_extensions=False) + +# Load extensions from a custom directory +parser = WorkflowParser(extensions_dir='/path/to/extensions') +``` + +### Creating Custom Node Processors + +To support a custom node type, create a processor class: + +```python +from workflow_params import NodeProcessor, register_processor + +@register_processor +class CustomNodeProcessor(NodeProcessor): + """Processor for CustomNode nodes""" + + NODE_CLASS_TYPE = "CustomNode" + REQUIRED_FIELDS = {"param1", "param2"} + + def process(self, workflow_parser): + result = {} + + # Extract direct values + if "param1" in self.inputs: + result["value1"] = self.inputs["param1"] + + # Resolve referenced inputs + if "param2" in self.inputs: + result["value2"] = self.resolve_input("param2", workflow_parser) + + return result +``` + +## Command Line Interface + +A command-line interface is available for testing: + +```bash +python -m workflow_params.cli input_workflow.json -o output.json +``` + +## Extension System + +The module includes an extension system for dynamically loading node processors: + +```python +from workflow_params import get_extension_manager + +# Get the extension manager +manager = get_extension_manager() + +# Load all extensions +manager.load_all_extensions() + +# Load a specific extension +manager.load_extension('path/to/extension.py') +``` + +Extensions should be placed in the `workflow_params/extensions` directory by default, or a custom directory can be specified. + +## Supported Node Types + +- KSampler +- CLIPTextEncode +- EmptyLatentImage +- JoinStrings +- StringConstantMultiline +- CLIPSetLastLayer +- TriggerWord Toggle (LoraManager) +- Lora Loader (LoraManager) +- Lora Stacker (LoraManager) \ No newline at end of file diff --git a/py/workflow_params/__init__.py b/py/workflow_params/__init__.py new file mode 100644 index 00000000..f3460680 --- /dev/null +++ b/py/workflow_params/__init__.py @@ -0,0 +1,14 @@ +# This package contains modules for workflow parameter extraction and processing +from .workflow_parser import WorkflowParser, parse_workflow +from .extension_manager import ExtensionManager, get_extension_manager +from .node_processors import NodeProcessor, NODE_PROCESSORS, register_processor + +__all__ = [ + "WorkflowParser", + "parse_workflow", + "ExtensionManager", + "get_extension_manager", + "NodeProcessor", + "NODE_PROCESSORS", + "register_processor" +] \ No newline at end of file diff --git a/py/workflow_params/cli.py b/py/workflow_params/cli.py new file mode 100644 index 00000000..4c3e4414 --- /dev/null +++ b/py/workflow_params/cli.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +"""Command-line interface for testing the workflow parser""" + +import argparse +import json +import sys +from pathlib import Path +import logging + +from .workflow_parser import WorkflowParser + +# Configure logging +logging.basicConfig(level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +def main(): + """Main entry point for the command-line interface""" + parser = argparse.ArgumentParser(description="Parse ComfyUI workflow JSON files") + parser.add_argument("input_file", type=str, help="Path to input workflow JSON file") + parser.add_argument("-o", "--output", type=str, help="Path to output JSON file (defaults to stdout)") + parser.add_argument("-v", "--verbose", action="store_true", help="Enable verbose output") + + args = parser.parse_args() + + # Set log level based on verbosity + if args.verbose: + logging.getLogger().setLevel(logging.DEBUG) + + # Read input file + input_path = Path(args.input_file) + if not input_path.exists(): + logger.error(f"Input file {input_path} does not exist") + return 1 + + try: + with open(input_path, 'r', encoding='utf-8') as f: + workflow_json = f.read() + except Exception as e: + logger.error(f"Failed to read input file: {e}") + return 1 + + # Parse workflow + try: + workflow_parser = WorkflowParser() + result = workflow_parser.parse_workflow(workflow_json) + except Exception as e: + logger.error(f"Failed to parse workflow: {e}") + return 1 + + # Output result + output_json = json.dumps(result, indent=4) + + if args.output: + try: + with open(args.output, 'w', encoding='utf-8') as f: + f.write(output_json) + logger.info(f"Output written to {args.output}") + except Exception as e: + logger.error(f"Failed to write output file: {e}") + return 1 + else: + print(output_json) + + return 0 + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/py/workflow_params/extension_manager.py b/py/workflow_params/extension_manager.py new file mode 100644 index 00000000..0b553bd2 --- /dev/null +++ b/py/workflow_params/extension_manager.py @@ -0,0 +1,163 @@ +"""Module for dynamically loading node processor extensions""" + +import os +import importlib +import importlib.util +import logging +import inspect +from typing import Dict, Any, List, Set, Type +from pathlib import Path + +from .node_processors import NodeProcessor, NODE_PROCESSORS + +logger = logging.getLogger(__name__) + +class ExtensionManager: + """Manager for dynamically loading node processor extensions""" + + def __init__(self, extensions_dir: str = None): + """ + Initialize the extension manager + + Args: + extensions_dir: Optional path to a directory containing extensions + If None, uses the default extensions directory + """ + if extensions_dir is None: + # Use the default extensions directory + module_dir = os.path.dirname(os.path.abspath(__file__)) + self.extensions_dir = os.path.join(module_dir, "extensions") + else: + self.extensions_dir = extensions_dir + + self.loaded_extensions: Dict[str, Any] = {} + + def discover_extensions(self) -> List[str]: + """ + Discover available extensions in the extensions directory + + Returns: + List of extension file paths that can be loaded + """ + if not os.path.exists(self.extensions_dir): + logger.warning(f"Extensions directory not found: {self.extensions_dir}") + return [] + + extension_files = [] + + # Walk through the extensions directory + for root, _, files in os.walk(self.extensions_dir): + for filename in files: + # Only consider Python files + if filename.endswith('.py') and not filename.startswith('__'): + filepath = os.path.join(root, filename) + extension_files.append(filepath) + + return extension_files + + def load_extension(self, extension_path: str) -> bool: + """ + Load a single extension from a file path + + Args: + extension_path: Path to the extension file + + Returns: + True if loaded successfully, False otherwise + """ + if extension_path in self.loaded_extensions: + logger.debug(f"Extension already loaded: {extension_path}") + return True + + try: + # Get module name from file path + module_name = os.path.basename(extension_path).replace(".py", "") + + # Load the module + spec = importlib.util.spec_from_file_location(module_name, extension_path) + if spec is None or spec.loader is None: + logger.error(f"Failed to load extension spec: {extension_path}") + return False + + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Find NodeProcessor subclasses in the module + processor_classes = [] + for _, obj in inspect.getmembers(module): + if (inspect.isclass(obj) and + issubclass(obj, NodeProcessor) and + obj is not NodeProcessor): + processor_classes.append(obj) + + if not processor_classes: + logger.warning(f"No NodeProcessor subclasses found in {extension_path}") + return False + + # Register each processor class + for cls in processor_classes: + cls.register() + + # Store the loaded module + self.loaded_extensions[extension_path] = module + logger.info(f"Loaded extension: {extension_path} with {len(processor_classes)} processors") + + return True + + except Exception as e: + logger.error(f"Failed to load extension {extension_path}: {e}") + return False + + def load_all_extensions(self) -> Dict[str, bool]: + """ + Load all available extensions + + Returns: + Dict mapping extension paths to success/failure status + """ + extension_files = self.discover_extensions() + results = {} + + for extension_path in extension_files: + results[extension_path] = self.load_extension(extension_path) + + return results + + def get_loaded_processor_types(self) -> Set[str]: + """ + Get the set of all loaded processor types + + Returns: + Set of class_type names for all loaded processors + """ + return set(NODE_PROCESSORS.keys()) + + def get_loaded_extension_count(self) -> int: + """ + Get the number of loaded extensions + + Returns: + Number of loaded extensions + """ + return len(self.loaded_extensions) + + +# Create a singleton instance +_extension_manager = None + +def get_extension_manager(extensions_dir: str = None) -> ExtensionManager: + """ + Get the singleton ExtensionManager instance + + Args: + extensions_dir: Optional path to extensions directory + + Returns: + ExtensionManager instance + """ + global _extension_manager + + if _extension_manager is None: + _extension_manager = ExtensionManager(extensions_dir) + + return _extension_manager \ No newline at end of file diff --git a/py/workflow_params/extensions/__init__.py b/py/workflow_params/extensions/__init__.py new file mode 100644 index 00000000..394185cf --- /dev/null +++ b/py/workflow_params/extensions/__init__.py @@ -0,0 +1,2 @@ +# Extensions module for workflow parameter parsing +# This module contains extensions for specific node types that may be loaded dynamically \ No newline at end of file diff --git a/py/workflow_params/extensions/custom_node_example.py b/py/workflow_params/extensions/custom_node_example.py new file mode 100644 index 00000000..0e01c45a --- /dev/null +++ b/py/workflow_params/extensions/custom_node_example.py @@ -0,0 +1,43 @@ +""" +Example of how to extend the workflow parser with custom node processors +This file is not imported automatically - it serves as a template for creating extensions +""" + +from typing import Dict, Any +from ..node_processors import NodeProcessor, register_processor + +@register_processor +class CustomNodeProcessor(NodeProcessor): + """Example processor for a custom node type""" + + NODE_CLASS_TYPE = "CustomNodeType" + REQUIRED_FIELDS = {"custom_field1", "custom_field2"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a custom node""" + # Example implementation + result = {} + + # Extract direct values + if "custom_field1" in self.inputs: + result["custom_value1"] = self.inputs["custom_field1"] + + # Resolve references to other nodes + if "custom_field2" in self.inputs: + resolved_value = self.resolve_input("custom_field2", workflow_parser) + if resolved_value: + result["custom_value2"] = resolved_value + + return result + +# To use this extension, you would need to: +# 1. Save this file in the extensions directory +# 2. Import it in your code before using the WorkflowParser +# +# For example: +# +# from workflow_params.extensions import custom_node_example +# from workflow_params import WorkflowParser +# +# parser = WorkflowParser() +# result = parser.parse_workflow(workflow_json) \ No newline at end of file diff --git a/py/workflow_params/integration_example.py b/py/workflow_params/integration_example.py new file mode 100644 index 00000000..b34e50db --- /dev/null +++ b/py/workflow_params/integration_example.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 +"""Example of integrating the workflow parser with other modules""" + +import os +import json +import sys +import logging +import re +from pathlib import Path + +# Add the parent directory to the Python path if needed +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + +from py.workflow_params import WorkflowParser + +# Configure logging +logging.basicConfig(level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +def extract_and_save_workflow_params(workflow_path, output_path=None): + """ + Extract parameters from a workflow and save them to a file + + Args: + workflow_path: Path to the workflow JSON file + output_path: Optional path to save the extracted parameters + If None, prints the parameters to stdout + + Returns: + The extracted parameters + """ + # Ensure the workflow file exists + if not os.path.exists(workflow_path): + logger.error(f"Workflow file not found: {workflow_path}") + return None + + # Read the workflow file + try: + with open(workflow_path, 'r', encoding='utf-8') as f: + workflow_json = f.read() + except Exception as e: + logger.error(f"Failed to read workflow file: {e}") + return None + + # Parse the workflow + try: + parser = WorkflowParser() + params = parser.parse_workflow(workflow_json) + except Exception as e: + logger.error(f"Failed to parse workflow: {e}") + return None + + # Format the output + output_json = json.dumps(params, indent=4) + + # Save or print the output + if output_path: + try: + with open(output_path, 'w', encoding='utf-8') as f: + f.write(output_json) + logger.info(f"Parameters saved to {output_path}") + except Exception as e: + logger.error(f"Failed to write output file: {e}") + else: + print(output_json) + + return params + +def get_workflow_loras(workflow_path): + """ + Extract just the loras from a workflow + + Args: + workflow_path: Path to the workflow JSON file + + Returns: + List of lora names used in the workflow + """ + params = extract_and_save_workflow_params(workflow_path) + if not params or "loras" not in params: + return [] + + # Extract lora names from the lora strings + lora_text = params["loras"] + lora_names = [] + + # Parse the lora text format + lora_pattern = r']+>' + matches = re.findall(lora_pattern, lora_text) + + return matches + +def main(): + """Main example function""" + # Check for command line arguments + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} [output_file]") + return 1 + + workflow_path = sys.argv[1] + output_path = sys.argv[2] if len(sys.argv) > 2 else None + + # Example 1: Extract and save all parameters + params = extract_and_save_workflow_params(workflow_path, output_path) + if not params: + return 1 + + # Example 2: Get just the loras + loras = get_workflow_loras(workflow_path) + print(f"Loras used in the workflow: {loras}") + + return 0 + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/py/workflow_params/node_processors/__init__.py b/py/workflow_params/node_processors/__init__.py new file mode 100644 index 00000000..220c51df --- /dev/null +++ b/py/workflow_params/node_processors/__init__.py @@ -0,0 +1,6 @@ +# This module contains processors for different node types in a ComfyUI workflow + +from .base_processor import NodeProcessor, NODE_PROCESSORS, register_processor +from . import load_processors + +__all__ = ["NodeProcessor", "NODE_PROCESSORS", "register_processor"] \ No newline at end of file diff --git a/py/workflow_params/node_processors/base_processor.py b/py/workflow_params/node_processors/base_processor.py new file mode 100644 index 00000000..99ddb265 --- /dev/null +++ b/py/workflow_params/node_processors/base_processor.py @@ -0,0 +1,77 @@ +from abc import ABC, abstractmethod +from typing import Dict, Any, List, Optional, Set, Callable, Type + +# Registry to store node processors by class_type +NODE_PROCESSORS: Dict[str, Type['NodeProcessor']] = {} + +class NodeProcessor(ABC): + """Base class for node processors that extract information from workflow nodes""" + + # Class-level attributes to define which node type this processor handles + # and which fields should be extracted + NODE_CLASS_TYPE: str = None + REQUIRED_FIELDS: Set[str] = set() + + def __init__(self, node_id: str, node_data: Dict[str, Any], workflow: Dict[str, Any]): + """ + Initialize a node processor + + Args: + node_id: The ID of the node in the workflow + node_data: The node data from the workflow + workflow: The complete workflow data + """ + self.node_id = node_id + self.node_data = node_data + self.workflow = workflow + self.inputs = node_data.get('inputs', {}) + + @classmethod + def register(cls): + """Register this processor in the global registry""" + if cls.NODE_CLASS_TYPE: + NODE_PROCESSORS[cls.NODE_CLASS_TYPE] = cls + + @abstractmethod + def process(self, workflow_parser) -> Dict[str, Any]: + """ + Process the node and extract relevant information + + Args: + workflow_parser: The workflow parser instance for resolving node references + + Returns: + Dict containing extracted information from the node + """ + pass + + def resolve_input(self, input_key: str, workflow_parser) -> Any: + """ + Resolve an input value which might be a reference to another node + + Args: + input_key: The input key to resolve + workflow_parser: The workflow parser instance + + Returns: + The resolved value + """ + input_value = self.inputs.get(input_key) + + # If not found, return None + if input_value is None: + return None + + # If it's a list with node reference [node_id, slot_index] + if isinstance(input_value, list) and len(input_value) == 2: + ref_node_id, slot_index = input_value + return workflow_parser.process_node(ref_node_id) + + # Otherwise return the direct value + return input_value + + +def register_processor(cls): + """Decorator to register a node processor class""" + cls.register() + return cls \ No newline at end of file diff --git a/py/workflow_params/node_processors/clip_set_last_layer_processor.py b/py/workflow_params/node_processors/clip_set_last_layer_processor.py new file mode 100644 index 00000000..1c8d3869 --- /dev/null +++ b/py/workflow_params/node_processors/clip_set_last_layer_processor.py @@ -0,0 +1,21 @@ +from typing import Dict, Any +from .base_processor import NodeProcessor, register_processor + +@register_processor +class CLIPSetLastLayerProcessor(NodeProcessor): + """Processor for CLIPSetLastLayer nodes""" + + NODE_CLASS_TYPE = "CLIPSetLastLayer" + REQUIRED_FIELDS = {"stop_at_clip_layer", "clip"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a CLIPSetLastLayer node to extract clip skip value""" + if "stop_at_clip_layer" in self.inputs: + # Convert to positive number for clip_skip + layer = self.inputs["stop_at_clip_layer"] + if isinstance(layer, (int, float)) and layer < 0: + # CLIP skip is reported as a positive number + # but stored as a negative layer index + return {"clip_skip": str(abs(layer))} + + return None \ No newline at end of file diff --git a/py/workflow_params/node_processors/clip_text_encode_processor.py b/py/workflow_params/node_processors/clip_text_encode_processor.py new file mode 100644 index 00000000..8e285c33 --- /dev/null +++ b/py/workflow_params/node_processors/clip_text_encode_processor.py @@ -0,0 +1,18 @@ +from typing import Dict, Any +from .base_processor import NodeProcessor, register_processor + +@register_processor +class CLIPTextEncodeProcessor(NodeProcessor): + """Processor for CLIPTextEncode nodes""" + + NODE_CLASS_TYPE = "CLIPTextEncode" + REQUIRED_FIELDS = {"text", "clip"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a CLIPTextEncode node to extract text prompt""" + if "text" in self.inputs: + # Text might be a direct string or a reference to another node + text_value = self.resolve_input("text", workflow_parser) + return text_value + + return None \ No newline at end of file diff --git a/py/workflow_params/node_processors/empty_latent_image_processor.py b/py/workflow_params/node_processors/empty_latent_image_processor.py new file mode 100644 index 00000000..a85b33d8 --- /dev/null +++ b/py/workflow_params/node_processors/empty_latent_image_processor.py @@ -0,0 +1,21 @@ +from typing import Dict, Any +from .base_processor import NodeProcessor, register_processor + +@register_processor +class EmptyLatentImageProcessor(NodeProcessor): + """Processor for EmptyLatentImage nodes""" + + NODE_CLASS_TYPE = "EmptyLatentImage" + REQUIRED_FIELDS = {"width", "height", "batch_size"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process an EmptyLatentImage node to extract image dimensions""" + result = {} + + if "width" in self.inputs and "height" in self.inputs: + width = self.inputs["width"] + height = self.inputs["height"] + result["width"] = width + result["height"] = height + + return result \ No newline at end of file diff --git a/py/workflow_params/node_processors/join_strings_processor.py b/py/workflow_params/node_processors/join_strings_processor.py new file mode 100644 index 00000000..facf608d --- /dev/null +++ b/py/workflow_params/node_processors/join_strings_processor.py @@ -0,0 +1,27 @@ +from typing import Dict, Any +from .base_processor import NodeProcessor, register_processor + +@register_processor +class JoinStringsProcessor(NodeProcessor): + """Processor for JoinStrings nodes""" + + NODE_CLASS_TYPE = "JoinStrings" + REQUIRED_FIELDS = {"string1", "string2", "delimiter"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a JoinStrings node to combine strings""" + string1 = self.resolve_input("string1", workflow_parser) + string2 = self.resolve_input("string2", workflow_parser) + delimiter = self.inputs.get("delimiter", ", ") + + if string1 is None and string2 is None: + return None + + if string1 is None: + return string2 + + if string2 is None: + return string1 + + # Join the strings with the delimiter + return f"{string1}{delimiter}{string2}" \ No newline at end of file diff --git a/py/workflow_params/node_processors/ksampler_processor.py b/py/workflow_params/node_processors/ksampler_processor.py new file mode 100644 index 00000000..148d6531 --- /dev/null +++ b/py/workflow_params/node_processors/ksampler_processor.py @@ -0,0 +1,46 @@ +from typing import Dict, Any, Set +from .base_processor import NodeProcessor, register_processor + +@register_processor +class KSamplerProcessor(NodeProcessor): + """Processor for KSampler nodes""" + + NODE_CLASS_TYPE = "KSampler" + REQUIRED_FIELDS = {"seed", "steps", "cfg", "sampler_name", "scheduler", "denoise", + "positive", "negative", "latent_image"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a KSampler node to extract generation parameters""" + result = {} + + # Directly extract numeric parameters + if "seed" in self.inputs: + result["seed"] = str(self.inputs["seed"]) + + if "steps" in self.inputs: + result["steps"] = str(self.inputs["steps"]) + + if "cfg" in self.inputs: + result["cfg_scale"] = str(self.inputs["cfg"]) + + if "sampler_name" in self.inputs: + result["sampler"] = self.inputs["sampler_name"] + + # Resolve referenced inputs + if "positive" in self.inputs: + positive_text = self.resolve_input("positive", workflow_parser) + if positive_text: + result["prompt"] = positive_text + + if "negative" in self.inputs: + negative_text = self.resolve_input("negative", workflow_parser) + if negative_text: + result["negative_prompt"] = negative_text + + # Resolve latent image for size + if "latent_image" in self.inputs: + latent_info = self.resolve_input("latent_image", workflow_parser) + if latent_info and "width" in latent_info and "height" in latent_info: + result["size"] = f"{latent_info['width']}x{latent_info['height']}" + + return result \ No newline at end of file diff --git a/py/workflow_params/node_processors/load_processors.py b/py/workflow_params/node_processors/load_processors.py new file mode 100644 index 00000000..e1093640 --- /dev/null +++ b/py/workflow_params/node_processors/load_processors.py @@ -0,0 +1,15 @@ +"""Module to load all node processors""" + +# Import all processor types to register them +from .ksampler_processor import KSamplerProcessor +from .clip_text_encode_processor import CLIPTextEncodeProcessor +from .empty_latent_image_processor import EmptyLatentImageProcessor +from .join_strings_processor import JoinStringsProcessor +from .string_constant_processor import StringConstantProcessor +from .clip_set_last_layer_processor import CLIPSetLastLayerProcessor +from .trigger_word_toggle_processor import TriggerWordToggleProcessor +from .lora_loader_processor import LoraLoaderProcessor +from .lora_stacker_processor import LoraStackerProcessor + +# Update the node_processors/__init__.py to include this import +# This ensures all processors are registered when the package is imported \ No newline at end of file diff --git a/py/workflow_params/node_processors/lora_loader_processor.py b/py/workflow_params/node_processors/lora_loader_processor.py new file mode 100644 index 00000000..a0b8a78a --- /dev/null +++ b/py/workflow_params/node_processors/lora_loader_processor.py @@ -0,0 +1,50 @@ +from typing import Dict, Any, List +from .base_processor import NodeProcessor, register_processor + +@register_processor +class LoraLoaderProcessor(NodeProcessor): + """Processor for Lora Loader (LoraManager) nodes""" + + NODE_CLASS_TYPE = "Lora Loader (LoraManager)" + REQUIRED_FIELDS = {"loras", "text", "lora_stack"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a Lora Loader node to extract lora text and stack""" + result = {} + + # Get the direct lora text + if "text" in self.inputs: + lora_text = self.inputs.get("text", "") + result["lora_text"] = lora_text + + # Process the loras array + if "loras" in self.inputs: + loras = self.inputs["loras"] + active_loras = [] + + if isinstance(loras, list): + for lora in loras: + if (isinstance(lora, dict) and + lora.get("active", False) and + not lora.get("_isDummy", False) and + "name" in lora and "strength" in lora): + active_loras.append(f"") + + if active_loras: + result["active_loras"] = " ".join(active_loras) + + # Process the lora stack from a referenced node + if "lora_stack" in self.inputs: + stack_result = self.resolve_input("lora_stack", workflow_parser) + if isinstance(stack_result, dict) and "lora_stack" in stack_result: + # If we got a stack from another node, add it to our result + if "active_loras" in result: + result["active_loras"] = f"{stack_result['lora_stack']} {result['active_loras']}" + else: + result["active_loras"] = stack_result["lora_stack"] + + # Combine all loras into one stack + if "active_loras" in result: + result["lora_stack"] = result["active_loras"] + + return result \ No newline at end of file diff --git a/py/workflow_params/node_processors/lora_stacker_processor.py b/py/workflow_params/node_processors/lora_stacker_processor.py new file mode 100644 index 00000000..d183b078 --- /dev/null +++ b/py/workflow_params/node_processors/lora_stacker_processor.py @@ -0,0 +1,52 @@ +from typing import Dict, Any, List +from .base_processor import NodeProcessor, register_processor + +@register_processor +class LoraStackerProcessor(NodeProcessor): + """Processor for Lora Stacker (LoraManager) nodes""" + + NODE_CLASS_TYPE = "Lora Stacker (LoraManager)" + REQUIRED_FIELDS = {"loras", "text", "lora_stack"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a Lora Stacker node to extract lora stack""" + result = {} + + # Get the direct lora text + if "text" in self.inputs: + lora_text = self.inputs.get("text", "") + result["lora_text"] = lora_text + + # Process the loras array + if "loras" in self.inputs: + loras = self.inputs["loras"] + active_loras = [] + + if isinstance(loras, list): + for lora in loras: + if (isinstance(lora, dict) and + lora.get("active", False) and + not lora.get("_isDummy", False) and + "name" in lora and "strength" in lora): + active_loras.append(f"") + + if active_loras: + result["active_loras"] = " ".join(active_loras) + + # Process the lora stack from a referenced node + if "lora_stack" in self.inputs: + stack_result = self.resolve_input("lora_stack", workflow_parser) + if isinstance(stack_result, dict) and "lora_stack" in stack_result: + # If we got a stack from another node, add it to our result + if "active_loras" in result: + result["lora_stack"] = f"{result['active_loras']} {stack_result['lora_stack']}" + else: + result["lora_stack"] = stack_result["lora_stack"] + elif "active_loras" in result: + # If there was no stack from the referenced node but we have active loras + result["lora_stack"] = result["active_loras"] + elif "active_loras" in result: + # If there's no lora_stack input but we have active loras + result["lora_stack"] = result["active_loras"] + + return result \ No newline at end of file diff --git a/py/workflow_params/node_processors/string_constant_processor.py b/py/workflow_params/node_processors/string_constant_processor.py new file mode 100644 index 00000000..1da80a90 --- /dev/null +++ b/py/workflow_params/node_processors/string_constant_processor.py @@ -0,0 +1,22 @@ +from typing import Dict, Any +from .base_processor import NodeProcessor, register_processor + +@register_processor +class StringConstantProcessor(NodeProcessor): + """Processor for StringConstantMultiline nodes""" + + NODE_CLASS_TYPE = "StringConstantMultiline" + REQUIRED_FIELDS = {"string", "strip_newlines"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a StringConstantMultiline node to extract the string content""" + if "string" in self.inputs: + string_value = self.inputs["string"] + strip_newlines = self.inputs.get("strip_newlines", False) + + if strip_newlines and isinstance(string_value, str): + string_value = string_value.replace("\n", " ") + + return string_value + + return None \ No newline at end of file diff --git a/py/workflow_params/node_processors/trigger_word_toggle_processor.py b/py/workflow_params/node_processors/trigger_word_toggle_processor.py new file mode 100644 index 00000000..d1df4d3c --- /dev/null +++ b/py/workflow_params/node_processors/trigger_word_toggle_processor.py @@ -0,0 +1,33 @@ +from typing import Dict, Any, List +from .base_processor import NodeProcessor, register_processor + +@register_processor +class TriggerWordToggleProcessor(NodeProcessor): + """Processor for TriggerWord Toggle (LoraManager) nodes""" + + NODE_CLASS_TYPE = "TriggerWord Toggle (LoraManager)" + REQUIRED_FIELDS = {"toggle_trigger_words", "group_mode"} + + def process(self, workflow_parser) -> Dict[str, Any]: + """Process a TriggerWord Toggle node to extract active trigger words""" + if "toggle_trigger_words" not in self.inputs: + return None + + toggle_words = self.inputs["toggle_trigger_words"] + if not isinstance(toggle_words, list): + return None + + # Filter active trigger words that aren't dummy items + active_words = [] + for word_entry in toggle_words: + if (isinstance(word_entry, dict) and + word_entry.get("active", False) and + not word_entry.get("_isDummy", False) and + "text" in word_entry): + active_words.append(word_entry["text"]) + + if not active_words: + return None + + # Join all active trigger words with a comma + return ", ".join(active_words) \ No newline at end of file diff --git a/py/workflow_params/simple_test.py b/py/workflow_params/simple_test.py new file mode 100644 index 00000000..15dd62e4 --- /dev/null +++ b/py/workflow_params/simple_test.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +"""Simple test script for the workflow parser""" + +import json +import os +import sys +from pathlib import Path + +# Get project path +project_path = Path(__file__).parent.parent.parent +refs_path = project_path / "refs" +prompt_path = refs_path / "prompt.json" +output_path = refs_path / "output.json" + +print(f"Loading workflow from {prompt_path}") +print(f"Expected output from {output_path}") + +# Load the workflow JSON +with open(prompt_path, 'r', encoding='utf-8') as f: + workflow_json = json.load(f) + +# Load the expected output +with open(output_path, 'r', encoding='utf-8') as f: + expected_output = json.load(f) + +print("\nExpected output:") +print(json.dumps(expected_output, indent=2)) + +# Manually extract important parameters to verify our understanding +sampler_node_id = "3" +sampler_node = workflow_json.get(sampler_node_id, {}) +print("\nSampler node:") +print(json.dumps(sampler_node, indent=2)) + +# Extract seed, steps, cfg +seed = sampler_node.get("inputs", {}).get("seed") +steps = sampler_node.get("inputs", {}).get("steps") +cfg = sampler_node.get("inputs", {}).get("cfg") + +print(f"\nExtracted parameters:") +print(f"seed: {seed}") +print(f"steps: {steps}") +print(f"cfg_scale: {cfg}") + +# Extract positive prompt - this requires following node references +positive_ref = sampler_node.get("inputs", {}).get("positive", []) +if isinstance(positive_ref, list) and len(positive_ref) == 2: + positive_node_id, slot_index = positive_ref + positive_node = workflow_json.get(positive_node_id, {}) + + print(f"\nPositive node ({positive_node_id}):") + print(json.dumps(positive_node, indent=2)) + + # Follow the reference to the text value + text_ref = positive_node.get("inputs", {}).get("text", []) + if isinstance(text_ref, list) and len(text_ref) == 2: + text_node_id, slot_index = text_ref + text_node = workflow_json.get(text_node_id, {}) + + print(f"\nText node ({text_node_id}):") + print(json.dumps(text_node, indent=2)) + +print("\nTest completed.") \ No newline at end of file diff --git a/py/workflow_params/test_parser.py b/py/workflow_params/test_parser.py new file mode 100644 index 00000000..e6405274 --- /dev/null +++ b/py/workflow_params/test_parser.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +"""Test script for the workflow parser""" + +import os +import json +import logging +from pathlib import Path + +from .workflow_parser import WorkflowParser + +# Configure logging +logging.basicConfig(level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +def test_parse_example(): + """Test parsing the example prompt.json file and compare with expected output""" + # Get the project root directory + project_root = Path(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + + # Path to the example files + prompt_path = project_root / "refs" / "prompt.json" + output_path = project_root / "refs" / "output.json" + + # Ensure the files exist + if not prompt_path.exists(): + logger.error(f"Example prompt file not found: {prompt_path}") + return False + + if not output_path.exists(): + logger.error(f"Example output file not found: {output_path}") + return False + + # Load the files + try: + with open(prompt_path, 'r', encoding='utf-8') as f: + prompt_json = f.read() + + with open(output_path, 'r', encoding='utf-8') as f: + expected_output = json.load(f) + except Exception as e: + logger.error(f"Failed to read example files: {e}") + return False + + # Parse the workflow + parser = WorkflowParser() + result = parser.parse_workflow(prompt_json) + + # Display the result + logger.info("Parsed workflow:") + logger.info(json.dumps(result, indent=4)) + + # Compare with expected output + logger.info("Expected output:") + logger.info(json.dumps(expected_output, indent=4)) + + # Basic validation + if "loras" not in result: + logger.error("Missing 'loras' field in result") + return False + + if "gen_params" not in result: + logger.error("Missing 'gen_params' field in result") + return False + + required_params = [ + "prompt", "negative_prompt", "steps", "sampler", + "cfg_scale", "seed", "size", "clip_skip" + ] + + for param in required_params: + if param not in result["gen_params"]: + logger.error(f"Missing '{param}' in gen_params") + return False + + logger.info("Test completed successfully!") + return True + +if __name__ == "__main__": + test_parse_example() \ No newline at end of file diff --git a/py/workflow_params/verify_workflow.py b/py/workflow_params/verify_workflow.py new file mode 100644 index 00000000..87f91c82 --- /dev/null +++ b/py/workflow_params/verify_workflow.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +"""Script to verify the workflow structure and save the output to a file""" + +import json +import os +from pathlib import Path + +# Get project path +project_path = Path(__file__).parent.parent.parent +refs_path = project_path / "refs" +prompt_path = refs_path / "prompt.json" +output_path = refs_path / "output.json" +test_output_path = refs_path / "test_output.txt" + +# Load the workflow JSON +with open(prompt_path, 'r', encoding='utf-8') as f: + workflow_json = json.load(f) + +# Load the expected output +with open(output_path, 'r', encoding='utf-8') as f: + expected_output = json.load(f) + +# Open the output file +with open(test_output_path, 'w', encoding='utf-8') as f: + f.write(f"Loading workflow from {prompt_path}\n") + f.write(f"Expected output from {output_path}\n\n") + + f.write("Expected output:\n") + f.write(json.dumps(expected_output, indent=2) + "\n\n") + + # Manually extract important parameters + sampler_node_id = "3" + sampler_node = workflow_json.get(sampler_node_id, {}) + f.write("Sampler node:\n") + f.write(json.dumps(sampler_node, indent=2) + "\n\n") + + # Extract seed, steps, cfg + seed = sampler_node.get("inputs", {}).get("seed") + steps = sampler_node.get("inputs", {}).get("steps") + cfg = sampler_node.get("inputs", {}).get("cfg") + + f.write(f"Extracted parameters:\n") + f.write(f"seed: {seed}\n") + f.write(f"steps: {steps}\n") + f.write(f"cfg_scale: {cfg}\n\n") + + # Extract positive prompt - this requires following node references + positive_ref = sampler_node.get("inputs", {}).get("positive", []) + if isinstance(positive_ref, list) and len(positive_ref) == 2: + positive_node_id, slot_index = positive_ref + positive_node = workflow_json.get(positive_node_id, {}) + + f.write(f"Positive node ({positive_node_id}):\n") + f.write(json.dumps(positive_node, indent=2) + "\n\n") + + # Follow the reference to the text value + text_ref = positive_node.get("inputs", {}).get("text", []) + if isinstance(text_ref, list) and len(text_ref) == 2: + text_node_id, slot_index = text_ref + text_node = workflow_json.get(text_node_id, {}) + + f.write(f"Text node ({text_node_id}):\n") + f.write(json.dumps(text_node, indent=2) + "\n\n") + + # If the text node is a JoinStrings node, follow its inputs + if text_node.get("class_type") == "JoinStrings": + string1_ref = text_node.get("inputs", {}).get("string1", []) + string2_ref = text_node.get("inputs", {}).get("string2", []) + + if isinstance(string1_ref, list) and len(string1_ref) == 2: + string1_node_id, slot_index = string1_ref + string1_node = workflow_json.get(string1_node_id, {}) + + f.write(f"String1 node ({string1_node_id}):\n") + f.write(json.dumps(string1_node, indent=2) + "\n\n") + + if isinstance(string2_ref, list) and len(string2_ref) == 2: + string2_node_id, slot_index = string2_ref + string2_node = workflow_json.get(string2_node_id, {}) + + f.write(f"String2 node ({string2_node_id}):\n") + f.write(json.dumps(string2_node, indent=2) + "\n\n") + + # Extract negative prompt + negative_ref = sampler_node.get("inputs", {}).get("negative", []) + if isinstance(negative_ref, list) and len(negative_ref) == 2: + negative_node_id, slot_index = negative_ref + negative_node = workflow_json.get(negative_node_id, {}) + + f.write(f"Negative node ({negative_node_id}):\n") + f.write(json.dumps(negative_node, indent=2) + "\n\n") + + # Extract LoRA information + lora_nodes = [] + for node_id, node_data in workflow_json.items(): + if node_data.get("class_type") in ["Lora Loader (LoraManager)", "Lora Stacker (LoraManager)"]: + lora_nodes.append((node_id, node_data)) + + f.write(f"LoRA nodes ({len(lora_nodes)}):\n") + for node_id, node_data in lora_nodes: + f.write(f"\nLoRA node {node_id}:\n") + f.write(json.dumps(node_data, indent=2) + "\n") + + f.write("\nTest completed.\n") + +print(f"Test output written to {test_output_path}") \ No newline at end of file diff --git a/py/workflow_params/workflow_parser.py b/py/workflow_params/workflow_parser.py new file mode 100644 index 00000000..43d19986 --- /dev/null +++ b/py/workflow_params/workflow_parser.py @@ -0,0 +1,209 @@ +import json +import logging +from typing import Dict, Any, List, Optional, Set, Union +from .node_processors import NODE_PROCESSORS, NodeProcessor +from .extension_manager import get_extension_manager + +logger = logging.getLogger(__name__) + +class WorkflowParser: + """Parser for ComfyUI workflow JSON files""" + + def __init__(self, load_extensions: bool = True, extensions_dir: str = None): + """ + Initialize the workflow parser + + Args: + load_extensions: Whether to load extensions automatically + extensions_dir: Optional path to extensions directory + """ + self.workflow = None + self.processed_nodes = {} # Cache for processed nodes + self.processing_nodes = set() # To detect circular references + + # Load extensions if requested + if load_extensions: + self._load_extensions(extensions_dir) + + def _load_extensions(self, extensions_dir: str = None): + """ + Load node processor extensions + + Args: + extensions_dir: Optional path to extensions directory + """ + extension_manager = get_extension_manager(extensions_dir) + results = extension_manager.load_all_extensions() + + # Log the results + successful = sum(1 for status in results.values() if status) + logger.debug(f"Loaded {successful} of {len(results)} extensions") + + def parse_workflow(self, workflow_json: Union[str, Dict]) -> Dict[str, Any]: + """ + Parse a ComfyUI workflow JSON string or dict and extract generation parameters + + Args: + workflow_json: JSON string or dict containing the workflow + + Returns: + Dict containing extracted generation parameters + """ + # Reset state for this parsing operation + self.processed_nodes = {} + self.processing_nodes = set() + + # Load JSON if it's a string + if isinstance(workflow_json, str): + try: + self.workflow = json.loads(workflow_json) + except json.JSONDecodeError as e: + logger.error(f"Failed to parse workflow JSON: {e}") + return {} + else: + self.workflow = workflow_json + + if not self.workflow: + return {} + + # Find KSampler nodes as entry points + ksampler_nodes = self._find_nodes_by_class("KSampler") + + # Find LoraLoader nodes for lora information + lora_nodes = self._find_nodes_by_class("Lora Loader (LoraManager)") + + # Check if we need to register additional node types by scanning the workflow + self._check_for_unregistered_node_types() + + result = { + "gen_params": {} + } + + # Process KSampler nodes to get generation parameters + for node_id in ksampler_nodes: + gen_params = self.process_node(node_id) + if gen_params: + result["gen_params"].update(gen_params) + + # Process Lora nodes to get lora stack + lora_stack = "" + for node_id in lora_nodes: + lora_info = self.process_node(node_id) + if lora_info and "lora_stack" in lora_info: + if lora_stack: + lora_stack = f"{lora_stack} {lora_info['lora_stack']}" + else: + lora_stack = lora_info["lora_stack"] + + if lora_stack: + result["loras"] = lora_stack + + # Process CLIPSetLastLayer node for clip_skip + clip_layer_nodes = self._find_nodes_by_class("CLIPSetLastLayer") + for node_id in clip_layer_nodes: + clip_info = self.process_node(node_id) + if clip_info and "clip_skip" in clip_info: + result["gen_params"]["clip_skip"] = clip_info["clip_skip"] + + return result + + def _check_for_unregistered_node_types(self): + """Check for node types in the workflow that aren't registered yet""" + unknown_node_types = set() + + # Collect all unique node types in the workflow + for node_id, node_data in self.workflow.items(): + class_type = node_data.get("class_type") + if class_type and class_type not in NODE_PROCESSORS: + unknown_node_types.add(class_type) + + if unknown_node_types: + logger.debug(f"Found {len(unknown_node_types)} unregistered node types: {unknown_node_types}") + + def process_node(self, node_id: str) -> Any: + """ + Process a single node and its dependencies recursively + + Args: + node_id: The ID of the node to process + + Returns: + Processed data from the node + """ + # Check if already processed + if node_id in self.processed_nodes: + return self.processed_nodes[node_id] + + # Check for circular references + if node_id in self.processing_nodes: + logger.warning(f"Circular reference detected for node {node_id}") + return None + + # Mark as being processed + self.processing_nodes.add(node_id) + + # Get node data + node_data = self.workflow.get(node_id) + if not node_data: + logger.warning(f"Node {node_id} not found in workflow") + self.processing_nodes.remove(node_id) + return None + + class_type = node_data.get("class_type") + if not class_type: + logger.warning(f"Node {node_id} has no class_type") + self.processing_nodes.remove(node_id) + return None + + # Get the appropriate node processor + processor_class = NODE_PROCESSORS.get(class_type) + if not processor_class: + logger.debug(f"No processor for node type {class_type}") + self.processing_nodes.remove(node_id) + return None + + # Process the node + processor = processor_class(node_id, node_data, self.workflow) + result = processor.process(self) + + # Cache the result + self.processed_nodes[node_id] = result + + # Mark as processed + self.processing_nodes.remove(node_id) + + return result + + def _find_nodes_by_class(self, class_type: str) -> List[str]: + """ + Find all nodes of a particular class type in the workflow + + Args: + class_type: The node class type to find + + Returns: + List of node IDs matching the class type + """ + nodes = [] + for node_id, node_data in self.workflow.items(): + if node_data.get("class_type") == class_type: + nodes.append(node_id) + return nodes + + +def parse_workflow(workflow_json: Union[str, Dict], + load_extensions: bool = True, + extensions_dir: str = None) -> Dict[str, Any]: + """ + Helper function to parse a workflow JSON without having to create a parser instance + + Args: + workflow_json: JSON string or dict containing the workflow + load_extensions: Whether to load extensions automatically + extensions_dir: Optional path to extensions directory + + Returns: + Dict containing extracted generation parameters + """ + parser = WorkflowParser(load_extensions, extensions_dir) + return parser.parse_workflow(workflow_json) \ No newline at end of file diff --git a/refs/civitai_api_model_by_versionId.json b/refs/civitai_api_model_by_versionId.json new file mode 100644 index 00000000..2e096654 --- /dev/null +++ b/refs/civitai_api_model_by_versionId.json @@ -0,0 +1,101 @@ +{ + "id": 1387174, + "modelId": 1231067, + "name": "v1.0", + "createdAt": "2025-02-08T11:15:47.197Z", + "updatedAt": "2025-02-08T11:29:04.526Z", + "status": "Published", + "publishedAt": "2025-02-08T11:29:04.487Z", + "trainedWords": [ + "ppstorybook" + ], + "trainingStatus": null, + "trainingDetails": null, + "baseModel": "Flux.1 D", + "baseModelType": null, + "earlyAccessEndsAt": null, + "earlyAccessConfig": null, + "description": null, + "uploadType": "Created", + "usageControl": "Download", + "air": "urn:air:flux1:lora:civitai:1231067@1387174", + "stats": { + "downloadCount": 1436, + "ratingCount": 0, + "rating": 0, + "thumbsUpCount": 316 + }, + "model": { + "name": "Vivid Impressions Storybook Style", + "type": "LORA", + "nsfw": false, + "poi": false + }, + "files": [ + { + "id": 1289799, + "sizeKB": 18829.1484375, + "name": "pp-storybook_rank2_bf16.safetensors", + "type": "Model", + "pickleScanResult": "Success", + "pickleScanMessage": "No Pickle imports", + "virusScanResult": "Success", + "virusScanMessage": null, + "scannedAt": "2025-02-08T11:21:04.247Z", + "metadata": { + "format": "SafeTensor", + "size": null, + "fp": null + }, + "hashes": { + "AutoV1": "F414C813", + "AutoV2": "9753338AB6", + "SHA256": "9753338AB693CA82BF89ED77A5D1912879E40051463EC6E330FB9866CE798668", + "CRC32": "A65AE7B3", + "BLAKE3": "A5F8AB95AC2486345E4ACCAE541FF19D97ED53EFB0A7CC9226636975A0437591", + "AutoV3": "34A22376739D" + }, + "primary": true, + "downloadUrl": "https://civitai.com/api/download/models/1387174" + } + ], + "images": [ + { + "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/42b875cf-c62b-41fa-a349-383b7f074351/width=832/56547310.jpeg", + "nsfwLevel": 1, + "width": 832, + "height": 1216, + "hash": "U5IiO6s-4Vn+0~EO^5xa00VsL#IU_O?E7yWC", + "type": "image", + "metadata": { + "hash": "U5IiO6s-4Vn+0~EO^5xa00VsL#IU_O?E7yWC", + "size": 1361590, + "width": 832, + "height": 1216 + }, + "meta": { + "Size": "832x1216", + "seed": 1116375220995209, + "Model": "flux_dev_fp8", + "steps": 23, + "hashes": { + "model": "" + }, + "prompt": "ppstorybook,A dreamy bunny hopping across a rainbow bridge, with fluffy clouds surrounding it and tiny birds flying alongside, rendered in a magical, soft-focus style with pastel hues and glowing accents.", + "Version": "ComfyUI", + "sampler": "DPM++ 2M", + "cfgScale": 3.5, + "clipSkip": 1, + "resources": [], + "Model hash": "" + }, + "availability": "Public", + "hasMeta": true, + "hasPositivePrompt": true, + "onSite": false, + "remixOfId": null + } + // more images here + ], + "downloadUrl": "https://civitai.com/api/download/models/1387174" +} \ No newline at end of file diff --git a/refs/jpeg_civitai_exif_userComment_example b/refs/jpeg_civitai_exif_userComment_example new file mode 100644 index 00000000..2bebe279 --- /dev/null +++ b/refs/jpeg_civitai_exif_userComment_example @@ -0,0 +1,29 @@ +a dynamic and dramatic digital artwork featuring a stylized anthropomorphic white tiger with striking yellow eyes. The tiger is depicted in a powerful stance, wielding a katana with one hand raised above its head. Its fur is detailed with black stripes, and its mane flows wildly, blending with the stormy background. The scene is set amidst swirling dark clouds and flashes of lightning, enhancing the sense of movement and energy. The composition is vertical, with the tiger positioned centrally, creating a sense of depth and intensity. The color palette is dominated by shades of blue, gray, and white, with bright highlights from the lightning. The overall style is reminiscent of fantasy or manga art, with a focus on dynamic action and dramatic lighting. +Negative prompt: +Steps: 30, Sampler: Undefined, CFG scale: 3.5, Seed: 90300501, Size: 832x1216, Clip skip: 2, Created Date: 2025-03-05T13:51:18.1770234Z, Civitai resources: [{"type":"checkpoint","modelVersionId":691639,"modelName":"FLUX","modelVersionName":"Dev"},{"type":"lora","weight":0.4,"modelVersionId":1202162,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Gothic Lines"},{"type":"lora","weight":0.8,"modelVersionId":1470588,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Retro"},{"type":"lora","weight":0.75,"modelVersionId":746484,"modelName":"Elden Ring - Yoshitaka Amano","modelVersionName":"V1"},{"type":"lora","weight":0.2,"modelVersionId":914935,"modelName":"Ink-style","modelVersionName":"ink-dynamic"},{"type":"lora","weight":0.2,"modelVersionId":1189379,"modelName":"Painterly Fantasy by ChronoKnight - [FLUX \u0026 IL]","modelVersionName":"FLUX"},{"type":"lora","weight":0.2,"modelVersionId":757030,"modelName":"Mezzotint Artstyle for Flux - by Ethanar","modelVersionName":"V1"}], Civitai metadata: {} + +, +masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, +dynamic angle, dutch angle, from below, epic half body portrait, gritty, wabi sabi, looking at viewer, woman is a geisha, parted lips, +holographic skin, holofoil glitter, faint, glowing, ethereal, neon hair, glowing hair, otherworldly glow, she is dangerous, + + + +Negative prompt: score_6, score_5, score_4, bad quality, worst quality, worst detail, sketch, censorship, furry, window, headphones, +Steps: 30, Sampler: Euler a, Schedule type: Simple, CFG scale: 7, Seed: 1405717592, Size: 832x1216, Model hash: 1ad6ca7f70, Model: waiNSFWIllustrious_v100, Denoising strength: 0.35, Hires CFG Scale: 5, Hires upscale: 1.3, Hires steps: 20, Hires upscaler: 4x-AnimeSharp, Lora hashes: "ck-shadow-circuit-IL: 88e247aa8c3d, ck-nc-cyberpunk-IL-000011: 935e6755554c, ck-neon-retrowave-IL: edafb9df7da1, ck-yoneyama-mai-IL-000014: 1b9305692a2e", Version: f2.0.1v1.10.1-1.10.1, Diffusion in Low Bits: Automatic (fp16 LoRA) + +masterpiece, best quality,high quality, newest, highres,8K,HDR,absurdres, 1girl, solo, steampunk aesthetic, mechanical monocle, long trench coat, leather gloves, brass accessories, intricate clockwork rifle, aiming at viewer, wind-blown scarf, high boots, fingerless gloves, pocket watch, corset, brown and gold color scheme, industrial cityscape, smoke and gears, atmospheric lighting, depth of field, dynamic pose, dramatic composition, detailed background, foreshortening, detailed background, dynamic pose, dynamic composition,dutch angle, detailed backgroud,foreshortening,blurry edges MythAn1m3 +Negative prompt: worst quality, normal quality, anatomical nonsense, bad anatomy,interlocked fingers, extra fingers,watermark,simple background, loli, +Steps: 35, Sampler: DPM++ 2M SDE, Schedule type: Karras, CFG scale: 4, Seed: 3537159932, Size: 1072x1376, Model hash: c364bbdae9, Model: waiNSFWIllustrious_v110, Clip skip: 2, ADetailer model: face_yolov8n.pt, ADetailer confidence: 0.3, ADetailer dilate erode: 4, ADetailer mask blur: 4, ADetailer denoising strength: 0.4, ADetailer inpaint only masked: True, ADetailer inpaint padding: 32, ADetailer version: 24.8.0, Lora hashes: "iLLMythAn1m3Style: d3480076057b", Version: f2.0.1v1.10.1-previous-519-g44eb4ea8, Module 1: sdxl.vae + +Masterpiece, best quality, high quality, newest, highres, 8K, HDR, absurdres, 1girl, solo, futuristic warrior, sleek exosuit with glowing energy cores, long braided hair flowing behind, gripping a high-tech bow with an energy arrow drawn, standing on a floating platform overlooking a massive space station, planets and nebulae in the distance, soft glow from distant stars, cinematic depth, foreshortening, dynamic pose, dramatic sci-fi lighting. +Negative prompt: worst quality, normal quality, anatomical nonsense, bad anatomy,interlocked fingers, extra fingers,watermark,simple background, loli, +Steps: 20, Sampler: euler_ancestral_karras, CFG scale: 8.0, Seed: 691121152183439, Model: il\waiNSFWIllustrious_v110.safetensors, Model hash: c3688ee04c, Lora_0 Model name: iLLMythAn1m3Style.safetensors, Lora_0 Model hash: ba7a040786, Lora_0 Strength model: 1.0, Lora_0 Strength clip: 1.0, Hashes: {"model": "c3688ee04c", "lora:iLLMythAn1m3Style": "ba7a040786"} + +Masterpiece, best quality, high quality, newest, highres, 8K, HDR, absurdres, 1boy, solo, gothic horror, pale vampire lord in regal, intricately detailed robes, crimson eyes glowing under the dim candlelight of a grand but decayed castle hall, holding a silver goblet filled with an unknown substance, a massive stained-glass window shattered behind him, cold mist rolling in, dramatic lighting, dark yet elegant aesthetic, foreshortening, cinematic perspective. +Negative prompt: worst quality, normal quality, anatomical nonsense, bad anatomy,interlocked fingers, extra fingers,watermark,simple background, loli, +Steps: 20, Sampler: euler_ancestral_karras, CFG scale: 8.0, Seed: 290117945770094, Model: il\waiNSFWIllustrious_v110.safetensors, Model hash: c3688ee04c, Lora_0 Model name: iLLMythAn1m3Style.safetensors, Lora_0 Model hash: ba7a040786, Lora_0 Strength model: 0.6, Lora_0 Strength clip: 0.7000000000000001, Hashes: {"model": "c3688ee04c", "lora:iLLMythAn1m3Style": "ba7a040786"} + +bo-exposure, An impressionistic oil painting in the style of J.M.W. Turner, depicting a ghostly ship sailing through a sea of swirling golden mist. The waves crash and dissolve into abstract, fiery strokes of orange and deep indigo, blurring the line between ocean and sky. The ship appears almost ethereal, as if drifting between worlds, lost in the ever-changing tides of memory and myth. The dynamic brushstrokes capture the relentless power of nature and the fleeting essence of time. +Negative prompt: +Steps: 25, Sampler: DPM++ 2M, CFG scale: 3.5, Seed: 1024252061321625, Size: 832x1216, Clip skip: 1, Model hash: , Model: flux_dev, Hashes: {"model": ""}, Version: ComfyUI \ No newline at end of file diff --git a/refs/output.json b/refs/output.json new file mode 100644 index 00000000..9bfc59f5 --- /dev/null +++ b/refs/output.json @@ -0,0 +1,13 @@ +{ + "loras": " ", + "gen_params": { + "prompt": "in the style of ck-rw, aorun, scales, makeup, bare shoulders, pointy ears, dress, claws, in the style of cksc, artist:moriimee, in the style of cknc, masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing", + "negative_prompt": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw", + "steps": "20", + "sampler": "euler_ancestral", + "cfg_scale": "8", + "seed": "241", + "size": "832x1216", + "clip_skip": "2" + } +} \ No newline at end of file diff --git a/refs/prompt.json b/refs/prompt.json new file mode 100644 index 00000000..c0982fff --- /dev/null +++ b/refs/prompt.json @@ -0,0 +1,313 @@ +{ + "3": { + "inputs": { + "seed": 241, + "steps": 20, + "cfg": 8, + "sampler_name": "euler_ancestral", + "scheduler": "karras", + "denoise": 1, + "model": [ + "56", + 0 + ], + "positive": [ + "6", + 0 + ], + "negative": [ + "7", + 0 + ], + "latent_image": [ + "5", + 0 + ] + }, + "class_type": "KSampler", + "_meta": { + "title": "KSampler" + } + }, + "4": { + "inputs": { + "ckpt_name": "il\\waiNSFWIllustrious_v110.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint" + } + }, + "5": { + "inputs": { + "width": 832, + "height": 1216, + "batch_size": 1 + }, + "class_type": "EmptyLatentImage", + "_meta": { + "title": "Empty Latent Image" + } + }, + "6": { + "inputs": { + "text": [ + "22", + 0 + ], + "clip": [ + "56", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "7": { + "inputs": { + "text": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw", + "clip": [ + "56", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "8": { + "inputs": { + "samples": [ + "3", + 0 + ], + "vae": [ + "4", + 2 + ] + }, + "class_type": "VAEDecode", + "_meta": { + "title": "VAE Decode" + } + }, + "14": { + "inputs": { + "images": [ + "8", + 0 + ] + }, + "class_type": "PreviewImage", + "_meta": { + "title": "Preview Image" + } + }, + "19": { + "inputs": { + "stop_at_clip_layer": -2, + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPSetLastLayer", + "_meta": { + "title": "CLIP Set Last Layer" + } + }, + "21": { + "inputs": { + "string": "masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing", + "strip_newlines": false + }, + "class_type": "StringConstantMultiline", + "_meta": { + "title": "positive" + } + }, + "22": { + "inputs": { + "string1": [ + "55", + 0 + ], + "string2": [ + "21", + 0 + ], + "delimiter": ", " + }, + "class_type": "JoinStrings", + "_meta": { + "title": "Join Strings" + } + }, + "55": { + "inputs": { + "group_mode": true, + "toggle_trigger_words": [ + { + "text": "in the style of ck-rw", + "active": true + }, + { + "text": "aorun, scales, makeup, bare shoulders, pointy ears", + "active": true + }, + { + "text": "dress", + "active": true + }, + { + "text": "claws", + "active": true + }, + { + "text": "in the style of cksc", + "active": true + }, + { + "text": "artist:moriimee", + "active": true + }, + { + "text": "in the style of cknc", + "active": true + }, + { + "text": "__dummy_item__", + "active": false, + "_isDummy": true + }, + { + "text": "__dummy_item__", + "active": false, + "_isDummy": true + } + ], + "orinalMessage": "in the style of ck-rw,, aorun, scales, makeup, bare shoulders, pointy ears,, dress,, claws,, in the style of cksc,, artist:moriimee,, in the style of cknc", + "trigger_words": [ + "56", + 2 + ] + }, + "class_type": "TriggerWord Toggle (LoraManager)", + "_meta": { + "title": "TriggerWord Toggle (LoraManager)" + } + }, + "56": { + "inputs": { + "text": " ", + "loras": [ + { + "name": "ck-shadow-circuit-IL-000012", + "strength": 0.78, + "active": true + }, + { + "name": "MoriiMee_Gothic_Niji_Style_Illustrious_r1", + "strength": 0.45, + "active": true + }, + { + "name": "ck-nc-cyberpunk-IL-000011", + "strength": 0.4, + "active": true + }, + { + "name": "__dummy_item1__", + "strength": 0, + "active": false, + "_isDummy": true + }, + { + "name": "__dummy_item2__", + "strength": 0, + "active": false, + "_isDummy": true + } + ], + "model": [ + "4", + 0 + ], + "clip": [ + "4", + 1 + ], + "lora_stack": [ + "57", + 0 + ] + }, + "class_type": "Lora Loader (LoraManager)", + "_meta": { + "title": "Lora Loader (LoraManager)" + } + }, + "57": { + "inputs": { + "text": "", + "loras": [ + { + "name": "aorunIllstrious", + "strength": "0.90", + "active": true + }, + { + "name": "__dummy_item1__", + "strength": 0, + "active": false, + "_isDummy": true + }, + { + "name": "__dummy_item2__", + "strength": 0, + "active": false, + "_isDummy": true + } + ], + "lora_stack": [ + "59", + 0 + ] + }, + "class_type": "Lora Stacker (LoraManager)", + "_meta": { + "title": "Lora Stacker (LoraManager)" + } + }, + "59": { + "inputs": { + "text": "", + "loras": [ + { + "name": "ck-neon-retrowave-IL-000012", + "strength": 0.8, + "active": true + }, + { + "name": "__dummy_item1__", + "strength": 0, + "active": false, + "_isDummy": true + }, + { + "name": "__dummy_item2__", + "strength": 0, + "active": false, + "_isDummy": true + } + ] + }, + "class_type": "Lora Stacker (LoraManager)", + "_meta": { + "title": "Lora Stacker (LoraManager)" + } + } +} \ No newline at end of file diff --git a/refs/recipe.json b/refs/recipe.json new file mode 100644 index 00000000..bcf1d6b8 --- /dev/null +++ b/refs/recipe.json @@ -0,0 +1,82 @@ +{ + "id": "0448c06d-de1b-46ab-975c-c5aa60d90dbc", + "file_path": "D:/Workspace/ComfyUI/models/loras/recipes/0448c06d-de1b-46ab-975c-c5aa60d90dbc.jpg", + "title": "a mysterious, steampunk-inspired character standing in a dramatic pose", + "modified": 1741837612.3931093, + "created_date": 1741492786.5581934, + "base_model": "Flux.1 D", + "loras": [ + { + "file_name": "ChronoDivinitiesFlux_r1", + "hash": "ddbc5abd00db46ad464f5e3ca85f8f7121bc14b594d6785f441d9b002fffe66a", + "strength": 0.8, + "modelVersionId": 1438879, + "modelName": "Chrono Divinities - By HailoKnight", + "modelVersionName": "Flux" + }, + { + "file_name": "flux.1_lora_flyway_ink-dynamic", + "hash": "4b4f3b469a0d5d3a04a46886abfa33daa37a905db070ccfbd10b345c6fb00eff", + "strength": 0.2, + "modelVersionId": 914935, + "modelName": "Ink-style", + "modelVersionName": "ink-dynamic" + }, + { + "file_name": "ck-painterly-fantasy-000017", + "hash": "48c67064e2936aec342580a2a729d91d75eb818e45ecf993b9650cc66c94c420", + "strength": 0.2, + "modelVersionId": 1189379, + "modelName": "Painterly Fantasy by ChronoKnight - [FLUX & IL]", + "modelVersionName": "FLUX" + }, + { + "file_name": "RetroAnimeFluxV1", + "hash": "8f43c31b6c3238ac44195c970d511d759c5893bddd00f59f42b8fe51e8e76fa0", + "strength": 0.8, + "modelVersionId": 806265, + "modelName": "Retro Anime Flux - Style", + "modelVersionName": "v1.0" + }, + { + "file_name": "Mezzotint_Artstyle_for_Flux_-_by_Ethanar", + "hash": "e6961502769123bf23a66c5c5298d76264fd6b9610f018319a0ccb091bfc308e", + "strength": 0.2, + "modelVersionId": 757030, + "modelName": "Mezzotint Artstyle for Flux - by Ethanar", + "modelVersionName": "V1" + }, + { + "file_name": "FluxMythG0thicL1nes", + "hash": "ecb03595de62bd6183a0dd2b38bea35669fd4d509f4bbae5aa0572cfb7ef4279", + "strength": 0.4, + "modelVersionId": 1202162, + "modelName": "Velvet's Mythic Fantasy Styles | Flux + Pony + illustrious", + "modelVersionName": "Flux Gothic Lines" + }, + { + "file_name": "Elden_Ring_-_Yoshitaka_Amano", + "hash": "c660c4c55320be7206cb6a917c59d8da3953cc07169fe10bda833a54ec0024f9", + "strength": 0.75, + "modelVersionId": 746484, + "modelName": "Elden Ring - Yoshitaka Amano", + "modelVersionName": "V1" + } + ], + "gen_params": { + "prompt": "a mysterious, steampunk-inspired character standing in a dramatic pose. The character is dressed in a long, intricately detailed dark coat with ornate patterns, a wide-brimmed hat, and leather boots. The face is partially obscured by the hat's shadow, adding to the enigmatic aura. The background showcases a large, antique clock with Roman numerals, surrounded by dynamic lightning and ethereal white birds, enhancing the fantastical atmosphere. The color palette is dominated by dark tones with striking contrasts of white and blue lightning, creating a sense of tension and energy. The overall composition is vertical, with the character centrally positioned, exuding a sense of power and mystery. hkchrono", + "negative_prompt": "", + "checkpoint": { + "type": "checkpoint", + "modelVersionId": 691639, + "modelName": "FLUX", + "modelVersionName": "Dev" + }, + "steps": "30", + "sampler": "Undefined", + "cfg_scale": "3.5", + "seed": "1472903449", + "size": "832x1216", + "clip_skip": "2" + } +} \ No newline at end of file diff --git a/refs/test_output.txt b/refs/test_output.txt new file mode 100644 index 00000000..c732f53c --- /dev/null +++ b/refs/test_output.txt @@ -0,0 +1,294 @@ +Loading workflow from D:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\refs\prompt.json +Expected output from D:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\refs\output.json + +Expected output: +{ + "loras": " ", + "gen_params": { + "prompt": "in the style of ck-rw, aorun, scales, makeup, bare shoulders, pointy ears, dress, claws, in the style of cksc, artist:moriimee, in the style of cknc, masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing", + "negative_prompt": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw", + "steps": "20", + "sampler": "euler_ancestral", + "cfg_scale": "8", + "seed": "241", + "size": "832x1216", + "clip_skip": "2" + } +} + +Sampler node: +{ + "inputs": { + "seed": 241, + "steps": 20, + "cfg": 8, + "sampler_name": "euler_ancestral", + "scheduler": "karras", + "denoise": 1, + "model": [ + "56", + 0 + ], + "positive": [ + "6", + 0 + ], + "negative": [ + "7", + 0 + ], + "latent_image": [ + "5", + 0 + ] + }, + "class_type": "KSampler", + "_meta": { + "title": "KSampler" + } +} + +Extracted parameters: +seed: 241 +steps: 20 +cfg_scale: 8 + +Positive node (6): +{ + "inputs": { + "text": [ + "22", + 0 + ], + "clip": [ + "56", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } +} + +Text node (22): +{ + "inputs": { + "string1": [ + "55", + 0 + ], + "string2": [ + "21", + 0 + ], + "delimiter": ", " + }, + "class_type": "JoinStrings", + "_meta": { + "title": "Join Strings" + } +} + +String1 node (55): +{ + "inputs": { + "group_mode": true, + "toggle_trigger_words": [ + { + "text": "in the style of ck-rw", + "active": true + }, + { + "text": "aorun, scales, makeup, bare shoulders, pointy ears", + "active": true + }, + { + "text": "dress", + "active": true + }, + { + "text": "claws", + "active": true + }, + { + "text": "in the style of cksc", + "active": true + }, + { + "text": "artist:moriimee", + "active": true + }, + { + "text": "in the style of cknc", + "active": true + }, + { + "text": "__dummy_item__", + "active": false, + "_isDummy": true + }, + { + "text": "__dummy_item__", + "active": false, + "_isDummy": true + } + ], + "orinalMessage": "in the style of ck-rw,, aorun, scales, makeup, bare shoulders, pointy ears,, dress,, claws,, in the style of cksc,, artist:moriimee,, in the style of cknc", + "trigger_words": [ + "56", + 2 + ] + }, + "class_type": "TriggerWord Toggle (LoraManager)", + "_meta": { + "title": "TriggerWord Toggle (LoraManager)" + } +} + +String2 node (21): +{ + "inputs": { + "string": "masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing", + "strip_newlines": false + }, + "class_type": "StringConstantMultiline", + "_meta": { + "title": "positive" + } +} + +Negative node (7): +{ + "inputs": { + "text": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw", + "clip": [ + "56", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } +} + +LoRA nodes (3): + +LoRA node 56: +{ + "inputs": { + "text": " ", + "loras": [ + { + "name": "ck-shadow-circuit-IL-000012", + "strength": 0.78, + "active": true + }, + { + "name": "MoriiMee_Gothic_Niji_Style_Illustrious_r1", + "strength": 0.45, + "active": true + }, + { + "name": "ck-nc-cyberpunk-IL-000011", + "strength": 0.4, + "active": true + }, + { + "name": "__dummy_item1__", + "strength": 0, + "active": false, + "_isDummy": true + }, + { + "name": "__dummy_item2__", + "strength": 0, + "active": false, + "_isDummy": true + } + ], + "model": [ + "4", + 0 + ], + "clip": [ + "4", + 1 + ], + "lora_stack": [ + "57", + 0 + ] + }, + "class_type": "Lora Loader (LoraManager)", + "_meta": { + "title": "Lora Loader (LoraManager)" + } +} + +LoRA node 57: +{ + "inputs": { + "text": "", + "loras": [ + { + "name": "aorunIllstrious", + "strength": "0.90", + "active": true + }, + { + "name": "__dummy_item1__", + "strength": 0, + "active": false, + "_isDummy": true + }, + { + "name": "__dummy_item2__", + "strength": 0, + "active": false, + "_isDummy": true + } + ], + "lora_stack": [ + "59", + 0 + ] + }, + "class_type": "Lora Stacker (LoraManager)", + "_meta": { + "title": "Lora Stacker (LoraManager)" + } +} + +LoRA node 59: +{ + "inputs": { + "text": "", + "loras": [ + { + "name": "ck-neon-retrowave-IL-000012", + "strength": 0.8, + "active": true + }, + { + "name": "__dummy_item1__", + "strength": 0, + "active": false, + "_isDummy": true + }, + { + "name": "__dummy_item2__", + "strength": 0, + "active": false, + "_isDummy": true + } + ] + }, + "class_type": "Lora Stacker (LoraManager)", + "_meta": { + "title": "Lora Stacker (LoraManager)" + } +} + +Test completed. diff --git a/static/css/base.css b/static/css/base.css index 2d305567..971d4d85 100644 --- a/static/css/base.css +++ b/static/css/base.css @@ -1,6 +1,8 @@ -/* 强制显示滚动条,防止页面跳动 */ -html { - overflow-y: scroll; +html, body { + margin: 0; + padding: 0; + height: 100%; + overflow: hidden; /* Disable default scrolling */ } /* 针对Firefox */ @@ -16,6 +18,7 @@ html { ::-webkit-scrollbar-track { background: transparent; + margin-top: 0; } ::-webkit-scrollbar-thumb { @@ -35,6 +38,7 @@ html { --lora-border: oklch(90% 0.02 256 / 0.15); --lora-text: oklch(95% 0.02 256); --lora-error: oklch(75% 0.32 29); + --lora-warning: oklch(75% 0.25 80); /* Add warning color for deleted LoRAs */ /* Spacing Scale */ --space-1: calc(8px * 1); @@ -43,6 +47,7 @@ html { /* Z-index Scale */ --z-base: 10; + --z-header: 100; --z-modal: 1000; --z-overlay: 2000; @@ -64,11 +69,14 @@ html { --lora-surface: oklch(25% 0.02 256 / 0.98); --lora-border: oklch(90% 0.02 256 / 0.15); --lora-text: oklch(98% 0.02 256); + --lora-warning: oklch(75% 0.25 80); /* Add warning color for dark theme too */ } body { - margin: 0; font-family: 'Segoe UI', sans-serif; background: var(--bg-color); color: var(--text-color); + display: flex; + flex-direction: column; + padding-top: 0; /* Remove the padding-top */ } diff --git a/static/css/components/card.css b/static/css/components/card.css index 84801ef3..e1d77d50 100644 --- a/static/css/components/card.css +++ b/static/css/components/card.css @@ -20,6 +20,9 @@ aspect-ratio: 896/1152; max-width: 260px; /* Adjusted from 320px to fit 5 cards */ margin: 0 auto; + cursor: pointer; /* Added from recipe-card */ + display: flex; /* Added from recipe-card */ + flex-direction: column; /* Added from recipe-card */ } .lora-card:hover { @@ -274,4 +277,55 @@ border-radius: var(--border-radius-xs); backdrop-filter: blur(2px); font-size: 0.85em; +} + +/* Recipe specific elements - migrated from recipe-card.css */ +.recipe-indicator { + position: absolute; + top: 6px; + left: 8px; + width: 24px; + height: 24px; + background: var(--lora-primary); + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + color: white; + font-weight: bold; + z-index: 2; +} + +.base-model-wrapper { + display: flex; + align-items: center; + gap: 8px; + margin-left: 32px; /* For accommodating the recipe indicator */ +} + +.lora-count { + display: flex; + align-items: center; + gap: 4px; + background: rgba(255, 255, 255, 0.2); + padding: 2px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.85em; + position: relative; +} + +.lora-count.ready { + background: rgba(46, 204, 113, 0.3); +} + +.lora-count.missing { + background: rgba(231, 76, 60, 0.3); +} + +.placeholder-message { + grid-column: 1 / -1; + text-align: center; + padding: 2rem; + background: var(--lora-surface-alt); + border-radius: var(--border-radius-base); } \ No newline at end of file diff --git a/static/css/components/download-modal.css b/static/css/components/download-modal.css index ceab416e..6ba34053 100644 --- a/static/css/components/download-modal.css +++ b/static/css/components/download-modal.css @@ -23,12 +23,6 @@ color: var(--text-color); } -.error-message { - color: var(--lora-error); - font-size: 0.9em; - margin-top: 4px; -} - /* Version List Styles */ .version-list { max-height: 400px; @@ -130,50 +124,6 @@ gap: 4px; } -/* Local Version Badge */ -.local-badge { - display: inline-flex; - align-items: center; - background: var(--lora-accent); - color: var(--lora-text); - padding: 4px 8px; - border-radius: var(--border-radius-xs); - font-size: 0.8em; - font-weight: 500; - white-space: nowrap; - flex-shrink: 0; - position: relative; -} - -.local-badge i { - margin-right: 4px; - font-size: 0.9em; -} - -.local-path { - display: none; - position: absolute; - top: 100%; - right: 0; - background: var(--card-bg); - border: 1px solid var(--border-color); - border-radius: var(--border-radius-xs); - padding: var(--space-1); - margin-top: 4px; - font-size: 0.9em; - color: var(--text-color); - white-space: normal; - word-break: break-all; - box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); - z-index: 1; - min-width: 200px; - max-width: 300px; -} - -.local-badge:hover .local-path { - display: block; -} - /* Folder Browser Styles */ .folder-browser { border: 1px solid var(--border-color); @@ -251,47 +201,4 @@ .version-item.exists-locally { background: oklch(var(--lora-accent) / 0.05); border-left: 4px solid var(--lora-accent); -} - -.local-badge { - display: inline-flex; - align-items: center; - background: var(--lora-accent); - color: var(--lora-text); - padding: 4px 8px; - border-radius: var(--border-radius-xs); - font-size: 0.8em; - font-weight: 500; - white-space: nowrap; - flex-shrink: 0; - position: relative; -} - -.local-badge i { - margin-right: 4px; - font-size: 0.9em; -} - -.local-path { - display: none; - position: absolute; - top: 100%; - right: 0; - background: var(--card-bg); - border: 1px solid var(--border-color); - border-radius: var(--border-radius-xs); - padding: var(--space-1); - margin-top: 4px; - font-size: 0.9em; - color: var(--text-color); - white-space: normal; - word-break: break-all; - box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); - z-index: 1; - min-width: 200px; - max-width: 300px; -} - -.local-badge:hover .local-path { - display: block; -} \ No newline at end of file +} \ No newline at end of file diff --git a/static/css/components/header.css b/static/css/components/header.css new file mode 100644 index 00000000..9b5e8b92 --- /dev/null +++ b/static/css/components/header.css @@ -0,0 +1,167 @@ +.app-header { + background: var(--card-bg); + border-bottom: 1px solid var(--border-color); + position: fixed; + top: 0; + z-index: var(--z-header); + height: 48px; /* Reduced height */ + width: 100%; + box-shadow: 0 1px 3px rgba(0,0,0,0.05); +} + +.header-container { + max-width: 1400px; + margin: 0 auto; + padding: 0 15px; + display: flex; + align-items: center; + justify-content: space-between; + height: 100%; +} + +/* Logo and title styling */ +.header-branding { + display: flex; + align-items: center; + flex-shrink: 0; +} + +.logo-link { + display: flex; + align-items: center; + text-decoration: none; + color: var(--text-color); + gap: 8px; +} + +.app-logo { + width: 24px; + height: 24px; +} + +.app-title { + font-size: 1rem; + font-weight: 600; + margin: 0; +} + +/* Navigation styling */ +.main-nav { + display: flex; + gap: 0.5rem; + flex-shrink: 0; + margin-right: 1rem; +} + +.nav-item { + padding: 0.25rem 0.75rem; + border-radius: var(--border-radius-xs); + color: var(--text-color); + text-decoration: none; + display: flex; + align-items: center; + gap: 0.5rem; + transition: all 0.2s ease; + font-size: 0.9rem; +} + +.nav-item:hover { + background-color: var(--lora-surface-hover, oklch(95% 0.02 256)); +} + +.nav-item.active { + background-color: var(--lora-accent); + color: white; +} + +/* Header search */ +.header-search { + flex: 1; + max-width: 400px; + margin: 0 1rem; +} + +/* Header controls (formerly corner controls) */ +.header-controls { + display: flex; + align-items: center; + gap: 8px; + flex-shrink: 0; +} + +.header-controls > div { + width: 32px; + height: 32px; + border-radius: 50%; + background: var(--card-bg); + border: 1px solid var(--border-color); + color: var(--text-color); + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + transition: all 0.2s ease; + position: relative; +} + +.header-controls > div:hover { + background: var(--lora-accent); + color: white; + transform: translateY(-2px); +} + +.theme-toggle .light-icon { + opacity: 0; +} + +.theme-toggle .dark-icon { + opacity: 1; +} + +[data-theme="light"] .theme-toggle .light-icon { + opacity: 1; +} + +[data-theme="light"] .theme-toggle .dark-icon { + opacity: 0; +} + +/* Mobile adjustments */ +@media (max-width: 768px) { + .app-title { + display: none; /* Hide text title on mobile */ + } + + .header-controls { + gap: 4px; + } + + .header-controls > div { + width: 28px; + height: 28px; + } + + .header-search { + max-width: none; + margin: 0 0.5rem; + } + + .main-nav { + margin-right: 0.5rem; + } +} + +/* For very small screens */ +@media (max-width: 600px) { + .header-container { + padding: 0 8px; + } + + .main-nav { + display: none; /* Hide navigation on very small screens */ + } + + .header-search { + flex: 1; + } +} \ No newline at end of file diff --git a/static/css/components/import-modal.css b/static/css/components/import-modal.css new file mode 100644 index 00000000..76711dc4 --- /dev/null +++ b/static/css/components/import-modal.css @@ -0,0 +1,684 @@ +/* Import Modal Styles */ +.import-step { + margin: var(--space-2) 0; + transition: none !important; /* Disable any transitions that might affect display */ +} + +/* Import Mode Toggle */ +.import-mode-toggle { + display: flex; + margin-bottom: var(--space-3); + border-radius: var(--border-radius-sm); + overflow: hidden; + border: 1px solid var(--border-color); +} + +.toggle-btn { + flex: 1; + padding: 10px 16px; + background: var(--bg-color); + color: var(--text-color); + border: none; + cursor: pointer; + font-weight: 500; + display: flex; + align-items: center; + justify-content: center; + gap: 8px; + transition: background-color 0.2s, color 0.2s; +} + +.toggle-btn:first-child { + border-right: 1px solid var(--border-color); +} + +.toggle-btn.active { + background: var(--lora-accent); + color: var(--lora-text); +} + +.toggle-btn:hover:not(.active) { + background: var(--lora-surface); +} + +.import-section { + margin-bottom: var(--space-3); +} + +/* File Input Styles */ +.file-input-wrapper { + position: relative; + margin-bottom: var(--space-1); +} + +.file-input-wrapper input[type="file"] { + position: absolute; + width: 100%; + height: 100%; + opacity: 0; + cursor: pointer; + z-index: 2; +} + +.file-input-button { + display: flex; + align-items: center; + justify-content: center; + gap: 8px; + padding: 10px 16px; + background: var(--lora-accent); + color: var(--lora-text); + border-radius: var(--border-radius-xs); + font-weight: 500; + cursor: pointer; + transition: background-color 0.2s; +} + +.file-input-button:hover { + background: oklch(from var(--lora-accent) l c h / 0.9); +} + +.file-input-wrapper:hover .file-input-button { + background: oklch(from var(--lora-accent) l c h / 0.9); +} + +/* Recipe Details Layout */ +.recipe-details-layout { + display: grid; + grid-template-columns: 200px 1fr; + gap: var(--space-3); + margin-bottom: var(--space-3); +} + +.recipe-image-container { + width: 100%; + height: 200px; + border-radius: var(--border-radius-sm); + overflow: hidden; + background: var(--lora-surface); + border: 1px solid var(--border-color); +} + +.recipe-image { + width: 100%; + height: 100%; + display: flex; + align-items: center; + justify-content: center; +} + +.recipe-image img { + max-width: 100%; + max-height: 100%; + object-fit: contain; +} + +.recipe-form-container { + display: flex; + flex-direction: column; + gap: var(--space-2); +} + +/* Tags Input Styles */ +.tag-input-container { + display: flex; + gap: 8px; + margin-bottom: var(--space-1); +} + +.tag-input-container input { + flex: 1; + padding: 8px; + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + background: var(--bg-color); + color: var(--text-color); +} + +.tags-container { + display: flex; + flex-wrap: wrap; + gap: 8px; + margin-top: var(--space-1); + min-height: 32px; +} + +.recipe-tag { + display: inline-flex; + align-items: center; + gap: 6px; + padding: 4px 10px; + background: var(--lora-surface); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + font-size: 0.9em; +} + +.recipe-tag i { + cursor: pointer; + opacity: 0.7; + transition: opacity 0.2s; +} + +.recipe-tag i:hover { + opacity: 1; + color: var(--lora-error); +} + +.empty-tags { + color: var(--text-color); + opacity: 0.6; + font-size: 0.9em; + font-style: italic; +} + +/* LoRAs List Styles */ +.loras-list { + max-height: 300px; + overflow-y: auto; + margin: var(--space-2) 0; + display: flex; + flex-direction: column; + gap: 12px; + padding: 1px; +} + +.lora-item { + display: flex; + gap: var(--space-2); + padding: var(--space-2); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-sm); + background: var(--bg-color); + margin: 1px; +} + +.lora-item.exists-locally { + background: oklch(var(--lora-accent) / 0.05); + border-left: 4px solid var(--lora-accent); +} + +.lora-item.missing-locally { + border-left: 4px solid var(--lora-error); +} + +.lora-item.is-deleted { + background: oklch(var(--lora-warning) / 0.05); + border-left: 4px solid var(--lora-warning); +} + +.lora-thumbnail { + width: 80px; + height: 80px; + flex-shrink: 0; + border-radius: var(--border-radius-xs); + overflow: hidden; + background: var(--bg-color); +} + +.lora-thumbnail img { + width: 100%; + height: 100%; + object-fit: cover; +} + +.lora-content { + display: flex; + flex-direction: column; + gap: 8px; + flex: 1; + min-width: 0; +} + +.lora-header { + display: flex; + align-items: flex-start; + justify-content: space-between; + gap: var(--space-2); +} + +.lora-content h3 { + margin: 0; + font-size: 1.1em; + color: var(--text-color); + flex: 1; +} + +.lora-info { + display: flex; + flex-wrap: wrap; + gap: 8px; + align-items: center; + font-size: 0.9em; +} + +.lora-info .base-model { + background: oklch(var(--lora-accent) / 0.1); + color: var(--lora-accent); + padding: 2px 8px; + border-radius: var(--border-radius-xs); +} + +.lora-version { + font-size: 0.9em; + color: var(--text-color); + opacity: 0.7; +} + +.weight-badge { + background: var(--lora-surface); + padding: 2px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.85em; +} + +/* Missing LoRAs List */ +.missing-loras-list { + max-height: 200px; + overflow-y: auto; + margin: var(--space-2) 0; + display: flex; + flex-direction: column; + gap: 8px; + padding: var(--space-1); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-sm); + background: var(--lora-surface); +} + +.missing-lora-item { + display: flex; + gap: var(--space-2); + padding: var(--space-1); + border-bottom: 1px solid var(--border-color); +} + +.missing-lora-item:last-child { + border-bottom: none; +} + +.missing-badge { + display: inline-flex; + align-items: center; + background: var(--lora-error); + color: white; + padding: 4px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.8em; + font-weight: 500; + white-space: nowrap; + flex-shrink: 0; +} + +.missing-badge i { + margin-right: 4px; + font-size: 0.9em; +} + +.lora-count-info { + font-size: 0.85em; + opacity: 0.8; + font-weight: normal; + margin-left: 8px; +} + +/* Location Selection Styles */ +.location-selection { + margin: var(--space-2) 0; + padding: var(--space-2); + background: var(--lora-surface); + border-radius: var(--border-radius-sm); +} + +/* Reuse folder browser and path preview styles from download-modal.css */ +.folder-browser { + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + padding: var(--space-1); + max-height: 200px; + overflow-y: auto; +} + +.folder-item { + padding: 8px; + cursor: pointer; + border-radius: var(--border-radius-xs); + transition: background-color 0.2s; +} + +.folder-item:hover { + background: var(--lora-surface); +} + +.folder-item.selected { + background: oklch(var(--lora-accent) / 0.1); + border: 1px solid var(--lora-accent); +} + +.path-preview { + margin-bottom: var(--space-3); + padding: var(--space-2); + background: var(--bg-color); + border-radius: var(--border-radius-sm); + border: 1px dashed var(--border-color); +} + +.path-preview label { + display: block; + margin-bottom: 8px; + color: var(--text-color); + font-size: 0.9em; + opacity: 0.8; +} + +.path-display { + padding: var(--space-1); + color: var(--text-color); + font-family: monospace; + font-size: 0.9em; + line-height: 1.4; + white-space: pre-wrap; + word-break: break-all; + opacity: 0.85; + background: var(--lora-surface); + border-radius: var(--border-radius-xs); +} + +/* Input Group Styles */ +.input-group { + margin-bottom: var(--space-2); +} + +.input-with-button { + display: flex; + gap: 8px; +} + +.input-with-button input { + flex: 1; + min-width: 0; +} + +.input-with-button button { + flex-shrink: 0; + white-space: nowrap; + padding: 8px 16px; + background: var(--lora-accent); + color: var(--lora-text); + border: none; + border-radius: var(--border-radius-xs); + cursor: pointer; + transition: background-color 0.2s; +} + +.input-with-button button:hover { + background: oklch(from var(--lora-accent) l c h / 0.9); +} + +.input-group label { + display: block; + margin-bottom: 8px; + color: var(--text-color); +} + +.input-group input, +.input-group select { + width: 100%; + padding: 8px; + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + background: var(--bg-color); + color: var(--text-color); +} + +/* Dark theme adjustments */ +[data-theme="dark"] .lora-item { + background: var(--lora-surface); +} + +[data-theme="dark"] .recipe-tag { + background: var(--card-bg); +} + +/* Responsive adjustments */ +@media (max-width: 768px) { + .recipe-details-layout { + grid-template-columns: 1fr; + } + + .recipe-image-container { + height: 150px; + } +} + +/* Size badge for LoRA items */ +.size-badge { + background: var(--lora-surface); + padding: 2px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.85em; + color: var(--text-color); + opacity: 0.8; +} + +/* Improved Missing LoRAs summary section */ +.missing-loras-summary { + margin-bottom: var(--space-3); + padding: var(--space-2); + background: var(--bg-color); + border-radius: var(--border-radius-sm); + border: 1px solid var(--border-color); +} + +.summary-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 0; +} + +.summary-header h3 { + margin: 0; + font-size: 1.1em; + color: var(--text-color); + display: flex; + align-items: center; + gap: var(--space-1); +} + +.lora-count-badge { + font-size: 0.9em; + font-weight: normal; + opacity: 0.7; +} + +.total-size-badge { + font-size: 0.85em; + font-weight: normal; + background: var(--lora-surface); + padding: 2px 8px; + border-radius: var(--border-radius-xs); + margin-left: var(--space-1); +} + +.toggle-list-btn { + background: none; + border: none; + cursor: pointer; + color: var(--text-color); + padding: 4px 8px; + border-radius: var(--border-radius-xs); +} + +.toggle-list-btn:hover { + background: var(--lora-surface); +} + +.missing-loras-list { + max-height: 200px; + overflow-y: auto; + transition: max-height 0.3s ease, margin-top 0.3s ease, padding-top 0.3s ease; + margin-top: 0; + padding-top: 0; +} + +.missing-loras-list.collapsed { + max-height: 0; + overflow: hidden; + padding-top: 0; +} + +.missing-loras-list:not(.collapsed) { + margin-top: var(--space-1); + padding-top: var(--space-1); + border-top: 1px solid var(--border-color); +} + +.missing-lora-item { + display: flex; + justify-content: space-between; + align-items: center; + padding: 8px; + border-bottom: 1px solid var(--border-color); +} + +.missing-lora-item:last-child { + border-bottom: none; +} + +.missing-lora-info { + display: flex; + flex-direction: column; + gap: 4px; +} + +.missing-lora-name { + font-weight: 500; +} + +.lora-base-model { + font-size: 0.85em; + color: var(--lora-accent); + background: oklch(var(--lora-accent) / 0.1); + padding: 2px 6px; + border-radius: var(--border-radius-xs); + display: inline-block; +} + +.missing-lora-size { + font-size: 0.9em; + color: var(--text-color); + opacity: 0.8; +} + +/* Recipe name input select-all behavior */ +#recipeName:focus { + outline: 2px solid var(--lora-accent); +} + +/* Prevent layout shift with scrollbar */ +.modal-content { + overflow-y: scroll; /* Always show scrollbar */ + scrollbar-gutter: stable; /* Reserve space for scrollbar */ +} + +/* For browsers that don't support scrollbar-gutter */ +@supports not (scrollbar-gutter: stable) { + .modal-content { + padding-right: calc(var(--space-2) + var(--scrollbar-width)); /* Add extra padding for scrollbar */ + } +} + +/* Deleted LoRA styles - Fix layout issues */ +.lora-item.is-deleted { + background: oklch(var(--lora-warning) / 0.05); + border-left: 4px solid var(--lora-warning); +} + +.deleted-badge { + display: inline-flex; + align-items: center; + background: var(--lora-warning); + color: white; + padding: 4px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.8em; + font-weight: 500; + white-space: nowrap; + flex-shrink: 0; +} + +.deleted-badge i { + margin-right: 4px; + font-size: 0.9em; +} + +.exclude-lora-checkbox { + display: none; +} + +/* Deleted LoRAs warning - redesigned to not interfere with modal buttons */ +.deleted-loras-warning { + display: flex; + align-items: flex-start; + gap: 12px; + padding: 12px 16px; + background: oklch(var(--lora-warning) / 0.1); + border: 1px solid var(--lora-warning); + border-radius: var(--border-radius-sm); + color: var(--text-color); + margin-bottom: var(--space-2); +} + +.warning-icon { + color: var(--lora-warning); + font-size: 1.2em; + padding-top: 2px; +} + +.warning-content { + flex: 1; +} + +.warning-title { + font-weight: 600; + margin-bottom: 4px; +} + +.warning-text { + font-size: 0.9em; + line-height: 1.4; +} + +/* Remove the old warning-message styles that were causing layout issues */ +.warning-message { + display: none; /* Hide the old style */ +} + +/* Update deleted badge to be more prominent */ +.deleted-badge { + display: inline-flex; + align-items: center; + background: var(--lora-warning); + color: white; + padding: 4px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.8em; + font-weight: 500; + white-space: nowrap; + flex-shrink: 0; +} + +.deleted-badge i { + margin-right: 4px; + font-size: 0.9em; +} + +/* Error message styling */ +.error-message { + color: var(--lora-error); + font-size: 0.9em; + margin-top: 8px; + min-height: 20px; /* Ensure there's always space for the error message */ + font-weight: 500; +} diff --git a/static/css/components/loading.css b/static/css/components/loading.css index 34c28765..e118c4a9 100644 --- a/static/css/components/loading.css +++ b/static/css/components/loading.css @@ -56,6 +56,53 @@ transition: width 200ms ease-out; } +/* Enhanced progress display */ +.progress-details-container { + margin-top: var(--space-3); + width: 100%; + text-align: left; +} + +.overall-progress-label { + font-size: 0.9rem; + margin-bottom: var(--space-1); + color: var(--text-color); +} + +.current-item-progress { + margin-top: var(--space-2); +} + +.current-item-label { + font-size: 0.9rem; + margin-bottom: var(--space-1); + color: var(--text-color); + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.current-item-bar-container { + height: 8px; + background-color: var(--lora-border); + border-radius: 4px; + overflow: hidden; + margin-bottom: var(--space-1); +} + +.current-item-bar { + height: 100%; + background-color: var(--lora-accent); + transition: width 200ms ease-out; + width: 0%; +} + +.current-item-percent { + font-size: 0.8rem; + color: var(--text-color-secondary, var(--text-color)); + opacity: 0.7; +} + @keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } @@ -63,7 +110,8 @@ @media (prefers-reduced-motion: reduce) { .lora-card, - .progress-bar { + .progress-bar, + .current-item-bar { transition: none; } } \ No newline at end of file diff --git a/static/css/components/lora-modal.css b/static/css/components/lora-modal.css index 16250e94..0e3245c0 100644 --- a/static/css/components/lora-modal.css +++ b/static/css/components/lora-modal.css @@ -593,56 +593,59 @@ /* Model name field styles - complete replacement */ .model-name-field { - display: flex; - align-items: center; - gap: var(--space-2); - width: calc(100% - 40px); /* Reduce width to avoid overlap with close button */ - position: relative; /* Add position relative for absolute positioning of save button */ + display: none; } -.model-name-field h2 { +/* New Model Name Header Styles */ +.model-name-header { + display: flex; + align-items: center; + width: calc(100% - 40px); /* Avoid overlap with close button */ + position: relative; +} + +.model-name-content { margin: 0; padding: var(--space-1); border-radius: var(--border-radius-xs); - transition: background-color 0.2s; - flex: 1; - font-size: 1.5em !important; /* Increased and forced size */ - font-weight: 600; /* Make it bolder */ - min-height: 1.5em; - box-sizing: border-box; - border: 1px solid transparent; + font-size: 1.5em !important; + font-weight: 600; line-height: 1.2; - color: var(--text-color); /* Ensure correct color */ -} - -.model-name-field h2:hover { - background: oklch(var(--lora-accent) / 0.1); - cursor: text; -} - -.model-name-field h2:focus { + color: var(--text-color); + border: 1px solid transparent; outline: none; - background: var(--bg-color); + flex: 1; +} + +.model-name-content:focus { border: 1px solid var(--lora-accent); + background: var(--bg-color); } -.model-name-field .save-btn { - position: absolute; - right: 10px; /* Position closer to the end of the field */ - top: 50%; - transform: translateY(-50%); +.edit-model-name-btn { + background: transparent; + border: none; + color: var(--text-color); opacity: 0; - transition: opacity 0.2s; + cursor: pointer; + padding: 2px 5px; + border-radius: var(--border-radius-xs); + transition: all 0.2s ease; + margin-left: var(--space-1); } -.model-name-field:hover .save-btn, -.model-name-field h2:focus ~ .save-btn { - opacity: 1; +.edit-model-name-btn.visible, +.model-name-header:hover .edit-model-name-btn { + opacity: 0.5; } -/* Ensure close button is accessible */ -.modal-content .close { - z-index: 10; /* Ensure close button is above other elements */ +.edit-model-name-btn:hover { + opacity: 0.8 !important; + background: rgba(0, 0, 0, 0.05); +} + +[data-theme="dark"] .edit-model-name-btn:hover { + background: rgba(255, 255, 255, 0.05); } /* Tab System Styling */ @@ -796,12 +799,6 @@ display: none !important; } -.error-message { - color: var(--lora-error); - text-align: center; - padding: var(--space-2); -} - .no-examples { text-align: center; padding: var(--space-3); @@ -913,7 +910,6 @@ /* Updated Model Tags styles - improved visibility in light theme */ .model-tags-container { position: relative; - margin-top: 4px; } .model-tags-compact { diff --git a/static/css/components/modal.css b/static/css/components/modal.css index a8684865..141713ef 100644 --- a/static/css/components/modal.css +++ b/static/css/components/modal.css @@ -2,13 +2,13 @@ .modal { display: none; position: fixed; - top: 0; + top: 48px; /* Start below the header */ left: 0; width: 100%; - height: 100%; + height: calc(100% - 48px); /* Adjust height to exclude header */ background: rgba(0, 0, 0, 0.2); /* 调整为更淡的半透明黑色 */ z-index: var(--z-modal); - overflow: hidden; /* 改为 hidden,防止双滚动条 */ + overflow: auto; /* Change from hidden to auto to allow scrolling */ } /* 当模态窗口打开时,禁止body滚动 */ @@ -23,8 +23,8 @@ body.modal-open { position: relative; max-width: 800px; height: auto; - max-height: 90vh; - margin: 2rem auto; + max-height: calc(90vh - 48px); /* Adjust to account for header height */ + margin: 1rem auto; /* Keep reduced top margin */ background: var(--lora-surface); border-radius: var(--border-radius-base); padding: var(--space-3); @@ -443,4 +443,43 @@ input:checked + .toggle-slider:before { .nsfw-blur:hover { filter: blur(8px); +} + +/* Add styles for delete preview image */ +.delete-preview { + max-width: 150px; + margin: 0 auto var(--space-2); + overflow: hidden; +} + +.delete-preview img { + width: 100%; + height: auto; + max-height: 150px; + object-fit: contain; + border-radius: var(--border-radius-sm); +} + +.delete-info { + text-align: center; +} + +.delete-info h3 { + margin-bottom: var(--space-1); + word-break: break-word; +} + +.delete-info p { + margin: var(--space-1) 0; + font-size: 0.9em; + opacity: 0.8; +} + +.delete-note { + font-size: 0.85em; + color: var(--text-color); + opacity: 0.7; + font-style: italic; + margin-top: var(--space-1); + text-align: center; } \ No newline at end of file diff --git a/static/css/components/recipe-card.css b/static/css/components/recipe-card.css new file mode 100644 index 00000000..f61f6bb1 --- /dev/null +++ b/static/css/components/recipe-card.css @@ -0,0 +1,184 @@ +.recipe-tag-container { + display: flex; + flex-wrap: wrap; + gap: 0.5rem; + margin-bottom: 1rem; +} + +.recipe-tag { + background: var(--lora-surface-hover); + color: var(--lora-text-secondary); + padding: 0.25rem 0.5rem; + border-radius: var(--border-radius-sm); + font-size: 0.8rem; + cursor: pointer; + transition: all 0.2s ease; +} + +.recipe-tag:hover, .recipe-tag.active { + background: var(--lora-primary); + color: var(--lora-text-on-primary); +} + +.recipe-card { + position: relative; + background: var(--lora-surface); + border-radius: var(--border-radius-base); + overflow: hidden; + box-shadow: var(--shadow-sm); + transition: all 0.2s ease; + aspect-ratio: 896/1152; + cursor: pointer; + display: flex; + flex-direction: column; +} + +.recipe-card:hover { + transform: translateY(-3px); + box-shadow: var(--shadow-md); +} + +.recipe-card:focus-visible { + outline: 2px solid var(--lora-accent); + outline-offset: 2px; +} + +.recipe-indicator { + position: absolute; + top: 6px; + left: 8px; + width: 24px; + height: 24px; + background: var(--lora-primary); + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + color: white; + font-weight: bold; + z-index: 2; +} + +.recipe-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(250px, 1fr)); + gap: 1.5rem; + margin-top: 1.5rem; +} + +.placeholder-message { + grid-column: 1 / -1; + text-align: center; + padding: 2rem; + background: var(--lora-surface-alt); + border-radius: var(--border-radius-base); +} + +.card-preview { + position: relative; + width: 100%; + height: 100%; + border-radius: var(--border-radius-base); + overflow: hidden; +} + +.card-preview img { + width: 100%; + height: 100%; + object-fit: cover; + object-position: center top; +} + +.card-header { + position: absolute; + top: 0; + left: 0; + right: 0; + background: linear-gradient(oklch(0% 0 0 / 0.75), transparent 85%); + backdrop-filter: blur(8px); + color: white; + padding: var(--space-1); + display: flex; + justify-content: space-between; + align-items: center; + z-index: 1; + min-height: 20px; +} + +.base-model-wrapper { + display: flex; + align-items: center; + gap: 8px; + margin-left: 32px; +} + +.card-actions { + display: flex; + gap: 8px; +} + +.card-actions i { + cursor: pointer; + opacity: 0.8; + transition: opacity 0.2s ease; +} + +.card-actions i:hover { + opacity: 1; +} + +.card-footer { + position: absolute; + bottom: 0; + left: 0; + right: 0; + background: linear-gradient(transparent 15%, oklch(0% 0 0 / 0.75)); + backdrop-filter: blur(8px); + color: white; + padding: var(--space-1); + display: flex; + justify-content: space-between; + align-items: flex-start; + min-height: 32px; + gap: var(--space-1); +} + +.lora-count { + display: flex; + align-items: center; + gap: 4px; + background: rgba(255, 255, 255, 0.2); + padding: 2px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.85em; + position: relative; +} + +.lora-count.ready { + background: rgba(46, 204, 113, 0.3); +} + +.lora-count.missing { + background: rgba(231, 76, 60, 0.3); +} + +/* 响应式设计 */ +@media (max-width: 1400px) { + .recipe-grid { + grid-template-columns: repeat(auto-fill, minmax(240px, 1fr)); + } + + .recipe-card { + max-width: 240px; + } +} + +@media (max-width: 768px) { + .recipe-grid { + grid-template-columns: minmax(260px, 1fr); + } + + .recipe-card { + max-width: 100%; + } +} \ No newline at end of file diff --git a/static/css/components/recipe-modal.css b/static/css/components/recipe-modal.css new file mode 100644 index 00000000..912a595c --- /dev/null +++ b/static/css/components/recipe-modal.css @@ -0,0 +1,416 @@ +.recipe-modal-header { + display: flex; + flex-direction: column; + justify-content: flex-start; + align-items: flex-start; + border-bottom: 1px solid var(--lora-border); +} + +/* Recipe Tags styles */ +.recipe-tags-container { + position: relative; + margin-top: 6px; + margin-bottom: 10px; +} + +.recipe-tags-compact { + display: flex; + flex-wrap: nowrap; + gap: 6px; + align-items: center; +} + +.recipe-tag-compact { + background: rgba(0, 0, 0, 0.03); + border: 1px solid rgba(0, 0, 0, 0.1); + border-radius: var(--border-radius-xs); + padding: 2px 8px; + font-size: 0.75em; + color: var(--text-color); + white-space: nowrap; +} + +[data-theme="dark"] .recipe-tag-compact { + background: rgba(255, 255, 255, 0.03); + border: 1px solid var(--lora-border); +} + +.recipe-tag-more { + background: var(--lora-accent); + color: var(--lora-text); + border-radius: var(--border-radius-xs); + padding: 2px 8px; + font-size: 0.75em; + cursor: pointer; + white-space: nowrap; + font-weight: 500; +} + +.recipe-tags-tooltip { + position: absolute; + top: calc(100% + 8px); + left: 0; + background: var(--card-bg); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-sm); + box-shadow: 0 3px 8px rgba(0, 0, 0, 0.15); + padding: 10px 14px; + max-width: 400px; + z-index: 10; + opacity: 0; + visibility: hidden; + transform: translateY(-4px); + transition: all 0.2s ease; + pointer-events: none; +} + +.recipe-tags-tooltip.visible { + opacity: 1; + visibility: visible; + transform: translateY(0); + pointer-events: auto; +} + +.tooltip-content { + display: flex; + flex-wrap: wrap; + gap: 6px; + max-height: 200px; + overflow-y: auto; +} + +.tooltip-tag { + background: rgba(0, 0, 0, 0.03); + border: 1px solid rgba(0, 0, 0, 0.1); + border-radius: var(--border-radius-xs); + padding: 3px 8px; + font-size: 0.75em; + color: var(--text-color); +} + +[data-theme="dark"] .tooltip-tag { + background: rgba(255, 255, 255, 0.03); + border: 1px solid var(--lora-border); +} + +/* Top Section: Preview and Gen Params */ +.recipe-top-section { + display: grid; + grid-template-columns: 280px 1fr; + gap: var(--space-2); + flex-shrink: 0; + margin-bottom: var(--space-2); +} + +/* Recipe Preview */ +.recipe-preview-container { + width: 100%; + height: 360px; + border-radius: var(--border-radius-sm); + overflow: hidden; + background: var(--lora-surface); + border: 1px solid var(--border-color); + display: flex; + align-items: center; + justify-content: center; +} + +.recipe-preview-container img { + max-width: 100%; + max-height: 100%; + object-fit: contain; +} + +/* Generation Parameters */ +.recipe-gen-params { + height: 360px; + display: flex; + flex-direction: column; +} + +.recipe-gen-params h3 { + margin-top: 0; + margin-bottom: var(--space-2); + font-size: 1.2em; + color: var(--text-color); + padding-bottom: var(--space-1); + border-bottom: 1px solid var(--border-color); + flex-shrink: 0; +} + +.gen-params-container { + display: flex; + flex-direction: column; + gap: var(--space-2); + overflow-y: auto; + flex: 1; +} + +.param-group { + display: flex; + flex-direction: column; + gap: 8px; +} + +.param-header { + display: flex; + justify-content: space-between; + align-items: center; +} + +.param-header label { + font-weight: 500; + color: var(--text-color); +} + +.copy-btn { + background: none; + border: none; + color: var(--text-color); + opacity: 0.6; + cursor: pointer; + padding: 4px 8px; + border-radius: var(--border-radius-xs); + transition: all 0.2s; +} + +.copy-btn:hover { + opacity: 1; + background: var(--lora-surface); +} + +.param-content { + background: var(--lora-surface); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + padding: var(--space-2); + color: var(--text-color); + font-size: 0.9em; + line-height: 1.5; + max-height: 150px; + overflow-y: auto; + white-space: pre-wrap; + word-break: break-word; +} + +/* Other Parameters */ +.other-params { + display: flex; + flex-wrap: wrap; + gap: 8px; + margin-top: var(--space-1); +} + +.param-tag { + background: var(--lora-surface); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + padding: 4px 8px; + font-size: 0.85em; + color: var(--text-color); + display: flex; + align-items: center; + gap: 6px; +} + +.param-tag .param-name { + font-weight: 500; + opacity: 0.8; +} + +/* Bottom Section: Resources */ +.recipe-bottom-section { + max-height: 320px; + display: flex; + flex-direction: column; + border-top: 1px solid var(--border-color); + padding-top: var(--space-2); +} + +.recipe-section-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: var(--space-2); + padding-bottom: var(--space-1); + border-bottom: 1px solid var(--border-color); + flex-shrink: 0; +} + +.recipe-section-header h3 { + margin: 0; + font-size: 1.2em; + color: var(--text-color); + display: flex; + align-items: center; + gap: 8px; +} + +.recipe-status { + display: inline-flex; + align-items: center; + font-size: 0.85em; + padding: 4px 8px; + border-radius: var(--border-radius-xs); + margin-left: var(--space-1); +} + +.recipe-status.ready { + background: oklch(var(--lora-accent) / 0.1); + color: var(--lora-accent); +} + +.recipe-status.missing { + background: oklch(var(--lora-error) / 0.1); + color: var(--lora-error); +} + +.recipe-status i { + margin-right: 4px; +} + +.recipe-section-actions { + display: flex; + align-items: center; + gap: var(--space-1); +} + +#recipeLorasCount { + font-size: 0.9em; + color: var(--text-color); + opacity: 0.8; + display: flex; + align-items: center; + gap: 6px; +} + +#recipeLorasCount i { + font-size: 1em; +} + +/* LoRAs List */ +.recipe-loras-list { + display: flex; + flex-direction: column; + gap: 10px; + overflow-y: auto; + flex: 1; +} + +.recipe-lora-item { + display: flex; + gap: var(--space-2); + padding: 10px var(--space-2); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-sm); + background: var(--bg-color); +} + +.recipe-lora-item.exists-locally { + background: oklch(var(--lora-accent) / 0.05); + border-left: 4px solid var(--lora-accent); +} + +.recipe-lora-item.missing-locally { + border-left: 4px solid var(--lora-error); +} + +.recipe-lora-thumbnail { + width: 46px; + height: 46px; + flex-shrink: 0; + border-radius: var(--border-radius-xs); + overflow: hidden; + background: var(--bg-color); +} + +.recipe-lora-thumbnail img { + width: 100%; + height: 100%; + object-fit: cover; +} + +.recipe-lora-content { + display: flex; + flex-direction: column; + gap: 3px; + flex: 1; + min-width: 0; +} + +.recipe-lora-header { + display: flex; + align-items: flex-start; + justify-content: space-between; + gap: var(--space-2); +} + +.recipe-lora-content h4 { + margin: 0; + font-size: 1em; + color: var(--text-color); + flex: 1; +} + +.recipe-lora-info { + display: flex; + flex-wrap: wrap; + gap: 8px; + align-items: center; + font-size: 0.85em; +} + +.recipe-lora-info .base-model { + background: oklch(var(--lora-accent) / 0.1); + color: var(--lora-accent); + padding: 2px 8px; + border-radius: var(--border-radius-xs); +} + +.recipe-lora-version { + font-size: 0.85em; + color: var(--text-color); + opacity: 0.7; +} + +.recipe-lora-weight { + background: var(--lora-surface); + padding: 2px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.85em; + color: var(--lora-accent); +} + +.missing-badge { + display: inline-flex; + align-items: center; + background: var(--lora-error); + color: white; + padding: 3px 6px; + border-radius: var(--border-radius-xs); + font-size: 0.75em; + font-weight: 500; + white-space: nowrap; + flex-shrink: 0; +} + +.missing-badge i { + margin-right: 4px; + font-size: 0.9em; +} + +/* Responsive adjustments */ +@media (max-width: 768px) { + .recipe-top-section { + grid-template-columns: 1fr; + } + + .recipe-preview-container { + height: 200px; + } + + .recipe-gen-params { + height: auto; + max-height: 300px; + } +} diff --git a/static/css/components/search-filter.css b/static/css/components/search-filter.css index f18abfeb..7734b85c 100644 --- a/static/css/components/search-filter.css +++ b/static/css/components/search-filter.css @@ -1,9 +1,7 @@ /* Search Container Styles */ .search-container { position: relative; - width: 250px; - margin-left: auto; - flex-shrink: 0; /* 防止搜索框被压缩 */ + width: 100%; display: flex; align-items: center; gap: 4px; @@ -12,14 +10,14 @@ /* 调整搜索框样式以匹配其他控件 */ .search-container input { width: 100%; - padding: 6px 75px 6px 12px; /* Increased right padding to accommodate both buttons */ - border: 1px solid oklch(65% 0.02 256); /* 更深的边框颜色,提高对比度 */ + padding: 6px 35px 6px 12px; /* Reduced right padding */ + border: 1px solid oklch(65% 0.02 256); border-radius: var(--border-radius-sm); background: var(--lora-surface); color: var(--text-color); font-size: 0.9em; height: 32px; - box-sizing: border-box; /* 确保padding不会增加总宽度 */ + box-sizing: border-box; } .search-container input:focus { @@ -34,7 +32,7 @@ transform: translateY(-50%); color: oklch(var(--text-color) / 0.5); pointer-events: none; - line-height: 1; /* 防止图标影响容器高度 */ + line-height: 1; } /* 修改清空按钮样式 */ @@ -47,8 +45,8 @@ cursor: pointer; border: none; background: none; - padding: 4px 8px; /* 增加点击区域 */ - display: none; /* 默认隐藏 */ + padding: 4px 8px; + display: none; line-height: 1; transition: color 0.2s ease; } @@ -144,19 +142,19 @@ /* Filter Panel Styles */ .filter-panel { - position: absolute; - top: 140px; /* Adjust to be closer to the filter button */ + position: fixed; right: 20px; - width: 300px; + top: 50px; /* Position below header */ + width: 320px; background-color: var(--card-bg); border: 1px solid var(--border-color); border-radius: var(--border-radius-base); box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); - z-index: var(--z-overlay); /* Increase z-index to be above cards */ + z-index: var(--z-overlay); padding: 16px; transition: transform 0.3s ease, opacity 0.3s ease; transform-origin: top right; - max-height: calc(100vh - 160px); + max-height: calc(100vh - 70px); /* Adjusted for header height */ overflow-y: auto; } @@ -312,7 +310,7 @@ width: calc(100% - 40px); left: 20px; right: 20px; - top: 140px; + top: 160px; /* Adjusted for mobile layout */ } } @@ -351,10 +349,10 @@ /* Search Options Panel */ .search-options-panel { - position: absolute; - top: 140px; - right: 65px; /* Position it closer to the search options button */ - width: 280px; /* Slightly wider to accommodate tags better */ + position: fixed; + right: 20px; + top: 50px; /* Position below header */ + width: 280px; background-color: var(--card-bg); border: 1px solid var(--border-color); border-radius: var(--border-radius-base); @@ -363,6 +361,7 @@ padding: 16px; transition: transform 0.3s ease, opacity 0.3s ease; transform-origin: top right; + display: block; /* Ensure it's block by default */ } .search-options-panel.hidden { @@ -507,4 +506,15 @@ input:checked + .slider:before { .slider.round:before { border-radius: 50%; +} + +/* Mobile adjustments */ +@media (max-width: 768px) { + .search-options-panel, + .filter-panel { + width: calc(100% - 40px); + left: 20px; + right: 20px; + top: 160px; /* Adjusted for mobile layout */ + } } \ No newline at end of file diff --git a/static/css/components/shared.css b/static/css/components/shared.css new file mode 100644 index 00000000..1c7965a8 --- /dev/null +++ b/static/css/components/shared.css @@ -0,0 +1,49 @@ +/* Local Version Badge */ +.local-badge { + display: inline-flex; + align-items: center; + background: var(--lora-accent); + color: var(--lora-text); + padding: 4px 8px; + border-radius: var(--border-radius-xs); + font-size: 0.8em; + font-weight: 500; + white-space: nowrap; + flex-shrink: 0; + position: relative; +} + +.local-badge i { + margin-right: 4px; + font-size: 0.9em; +} + +.local-path { + display: none; + position: absolute; + top: 100%; + right: 0; + background: var(--card-bg); + border: 1px solid var(--border-color); + border-radius: var(--border-radius-xs); + padding: var(--space-1); + margin-top: 4px; + font-size: 0.9em; + color: var(--text-color); + white-space: normal; + word-break: break-all; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); + z-index: 1; + min-width: 200px; + max-width: 300px; +} + +.local-badge:hover .local-path { + display: block; +} + +.error-message { + color: var(--lora-error); + font-size: 0.9em; + margin-top: 4px; +} \ No newline at end of file diff --git a/static/css/components/support-modal.css b/static/css/components/support-modal.css index 55dc64af..16e37c55 100644 --- a/static/css/components/support-modal.css +++ b/static/css/components/support-modal.css @@ -141,7 +141,7 @@ .support-toggle:hover { background: var(--lora-accent); - color: white; + color: var(--lora-error) !important; transform: translateY(-2px); } diff --git a/static/css/layout.css b/static/css/layout.css index e4f8a74c..4a2a5fbd 100644 --- a/static/css/layout.css +++ b/static/css/layout.css @@ -1,7 +1,18 @@ +.page-content { + height: calc(100vh - 48px); /* Full height minus header */ + margin-top: 48px; /* Push down below header */ + overflow-y: auto; /* Enable scrolling here */ + width: 100%; + position: relative; + overflow-y: scroll; +} + .container { max-width: 1400px; margin: 20px auto; padding: 0 15px; + position: relative; + z-index: var(--z-base); } .controls { @@ -14,69 +25,17 @@ .actions { display: flex; align-items: center; + justify-content: space-between; gap: var(--space-2); flex-wrap: nowrap; width: 100%; } -/* Search and filter styles moved to components/search-filter.css */ - -/* Update corner-controls for collapsible behavior */ -.corner-controls { - position: fixed; - top: 20px; - right: 20px; - z-index: var(--z-overlay); - display: flex; - flex-direction: column; - align-items: center; - transition: all 0.3s ease; -} - -.corner-controls-toggle { - width: 36px; - height: 36px; - border-radius: 50%; - background: var(--card-bg); - border: 1px solid var(--border-color); - color: var(--text-color); +.action-buttons { display: flex; align-items: center; - justify-content: center; - cursor: pointer; - transition: all 0.2s ease; - z-index: 2; - margin-bottom: 10px; -} - -.corner-controls-toggle:hover { - background: var(--lora-accent); - color: white; - transform: translateY(-2px); -} - -.corner-controls-items { - display: flex; - flex-direction: column; - gap: 10px; - opacity: 0; - transform: translateY(-10px) scale(0.9); - transition: all 0.3s ease; - pointer-events: none; -} - -/* Expanded state */ -.corner-controls.expanded .corner-controls-items { - opacity: 1; - transform: translateY(0) scale(1); - pointer-events: all; -} - -/* Expanded state - only expand on hover if not already expanded by click */ -.corner-controls:hover:not(.expanded) .corner-controls-items { - opacity: 1; - transform: translateY(0) scale(1); - pointer-events: all; + gap: var(--space-2); + flex-wrap: nowrap; } /* Ensure hidden class works properly */ @@ -84,46 +43,6 @@ display: none !important; } -/* Update toggle button styles */ -.update-toggle { - width: 36px; - height: 36px; - border-radius: 50%; - background: var(--card-bg); - border: 1px solid var(--border-color); - color: var(--text-color); /* Changed from var(--lora-accent) to match other toggles */ - display: flex; - align-items: center; - justify-content: center; - cursor: pointer; - transition: all 0.2s ease; - position: relative; -} - -.update-toggle:hover { - background: var(--lora-accent); - color: white; - transform: translateY(-2px); -} - -/* Update badge styles */ -.update-badge { - position: absolute; - top: -3px; - right: -3px; - background-color: var(--lora-error); - width: 8px; - height: 8px; - border-radius: 50%; - box-shadow: 0 0 0 2px var(--card-bg); -} - -/* Badge on corner toggle */ -.corner-badge { - top: 0; - right: 0; -} - .folder-tags-container { position: relative; width: 100%; @@ -131,11 +50,14 @@ } .folder-tags { + display: flex; + gap: 4px; + padding: 2px 0; + flex-wrap: wrap; transition: max-height 0.3s ease, opacity 0.2s ease; max-height: 150px; /* Limit height to prevent overflow */ opacity: 1; overflow-y: auto; /* Enable vertical scrolling */ - padding-right: 40px; /* Make space for the toggle button */ margin-bottom: 5px; /* Add margin below the tags */ } @@ -144,13 +66,15 @@ opacity: 0; margin: 0; padding-bottom: 0; + overflow: hidden; +} + +.toggle-folders-container { + margin-left: auto; } /* Toggle Folders Button */ .toggle-folders-btn { - position: absolute; - bottom: 0; /* 固定在容器底部 */ - right: 0; /* 固定在容器右侧 */ width: 36px; height: 36px; border-radius: 50%; @@ -162,7 +86,6 @@ justify-content: center; cursor: pointer; transition: all 0.3s ease; - z-index: 2; } .toggle-folders-btn:hover { @@ -175,25 +98,18 @@ transition: transform 0.3s ease; } -/* 折叠状态样式 */ -.folder-tags.collapsed + .toggle-folders-btn { - position: static; - margin-right: auto; /* 确保按钮在左侧 */ - transform: translateY(0); +/* Icon-only button style */ +.icon-only { + min-width: unset !important; + width: 36px !important; + padding: 0 !important; } -.folder-tags.collapsed + .toggle-folders-btn i { +/* Rotate icon when folders are collapsed */ +.folder-tags.collapsed ~ .actions .toggle-folders-btn i { transform: rotate(180deg); } -/* 文件夹标签样式 */ -.folder-tags { - display: flex; - gap: 4px; - padding: 2px 0; - flex-wrap: wrap; -} - /* Add custom scrollbar for better visibility */ .folder-tags::-webkit-scrollbar { width: 6px; @@ -263,124 +179,32 @@ transform: translateY(-2px); } -.theme-toggle { - width: 36px; - height: 36px; - border-radius: 50%; - background: var(--card-bg); - border: 1px solid var(--border-color); - color: var(--text-color); - display: flex; - align-items: center; - justify-content: center; - cursor: pointer; - transition: all 0.2s ease; -} - -.theme-toggle:hover { - background: var(--lora-accent); - color: white; - transform: translateY(-2px); -} - -.support-toggle { - width: 36px; - height: 36px; - border-radius: 50%; - background: var(--card-bg); - border: 1px solid var(--border-color); - color: var(--lora-error); - display: flex; - align-items: center; - justify-content: center; - cursor: pointer; - transition: all 0.2s ease; -} - -.support-toggle:hover { - background: var(--lora-error); - color: white; - transform: translateY(-2px); -} - -.support-toggle i { - font-size: 1.1em; - position: relative; - top: 1px; - left: -0.5px; -} - -.theme-toggle img { - width: 20px; - height: 20px; -} - -.theme-toggle .theme-icon { - width: 20px; - height: 20px; - position: absolute; - transition: opacity 0.2s ease; -} - -.theme-toggle .light-icon { - opacity: 0; -} - -.theme-toggle .dark-icon { - opacity: 1; -} - -[data-theme="light"] .theme-toggle .light-icon { - opacity: 1; -} - -[data-theme="light"] .theme-toggle .dark-icon { - opacity: 0; -} - @media (max-width: 768px) { .actions { flex-wrap: wrap; gap: var(--space-1); } - - .controls { - flex-direction: column; - gap: 15px; + + .action-buttons { + flex-wrap: wrap; + gap: var(--space-1); + width: 100%; + } + + .toggle-folders-container { + margin-left: 0; + width: 100%; + display: flex; + justify-content: flex-end; } .folder-tags-container { order: -1; } - .toggle-folders-btn { - position: absolute; - bottom: 0; - right: 0; - transform: none; /* 移除transform,防止hover时的位移 */ - } - .toggle-folders-btn:hover { transform: none; /* 移动端下禁用hover效果 */ } - - .folder-tags.collapsed + .toggle-folders-btn { - position: relative; - transform: none; - } - - .corner-controls { - top: 10px; - right: 10px; - } - - .corner-controls-items { - display: none; - } - - .corner-controls.expanded .corner-controls-items { - display: flex; - } .back-to-top { bottom: 60px; /* Give some extra space from bottom on mobile */ diff --git a/static/css/style.css b/static/css/style.css index fd27e5ba..b48ee6c2 100644 --- a/static/css/style.css +++ b/static/css/style.css @@ -5,6 +5,7 @@ @import 'layout.css'; /* Import Components */ +@import 'components/header.css'; @import 'components/card.css'; @import 'components/modal.css'; @import 'components/download-modal.css'; @@ -16,6 +17,7 @@ @import 'components/support-modal.css'; @import 'components/search-filter.css'; @import 'components/bulk.css'; +@import 'components/shared.css'; .initialization-notice { display: flex; diff --git a/static/images/android-chrome-192x192.png b/static/images/android-chrome-192x192.png new file mode 100644 index 00000000..ec93ff36 Binary files /dev/null and b/static/images/android-chrome-192x192.png differ diff --git a/static/images/android-chrome-512x512.png b/static/images/android-chrome-512x512.png new file mode 100644 index 00000000..f508e6d4 Binary files /dev/null and b/static/images/android-chrome-512x512.png differ diff --git a/static/images/site.webmanifest b/static/images/site.webmanifest index 45dc8a20..c46983d8 100644 --- a/static/images/site.webmanifest +++ b/static/images/site.webmanifest @@ -1 +1 @@ -{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} \ No newline at end of file +{"name":"","short_name":"","icons":[{"src":"/loras_static/images/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/loras_static/images/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} \ No newline at end of file diff --git a/static/js/api/loraApi.js b/static/js/api/loraApi.js index 86278c21..69aba234 100644 --- a/static/js/api/loraApi.js +++ b/static/js/api/loraApi.js @@ -1,50 +1,62 @@ -import { state } from '../state/index.js'; +import { state, getCurrentPageState } from '../state/index.js'; import { showToast } from '../utils/uiHelpers.js'; import { createLoraCard } from '../components/LoraCard.js'; import { initializeInfiniteScroll } from '../utils/infiniteScroll.js'; import { showDeleteModal } from '../utils/modalUtils.js'; import { toggleFolder } from '../utils/uiHelpers.js'; -export async function loadMoreLoras(boolUpdateFolders = false) { - if (state.isLoading || !state.hasMore) return; +export async function loadMoreLoras(resetPage = false, updateFolders = false) { + const pageState = getCurrentPageState(); - state.isLoading = true; + if (pageState.isLoading || (!pageState.hasMore && !resetPage)) return; + + pageState.isLoading = true; try { + // Reset to first page if requested + if (resetPage) { + pageState.currentPage = 1; + // Clear grid if resetting + const grid = document.getElementById('loraGrid'); + if (grid) grid.innerHTML = ''; + initializeInfiniteScroll(); + } + const params = new URLSearchParams({ - page: state.currentPage, + page: pageState.currentPage, page_size: 20, - sort_by: state.sortBy + sort_by: pageState.sortBy }); - // 使用 state 中的 searchManager 获取递归搜索状态 - const isRecursiveSearch = state.searchManager?.isRecursiveSearch ?? false; - - if (state.activeFolder !== null) { - params.append('folder', state.activeFolder); - params.append('recursive', isRecursiveSearch.toString()); + if (pageState.activeFolder !== null) { + params.append('folder', pageState.activeFolder); } // Add search parameters if there's a search term - const searchInput = document.getElementById('searchInput'); - if (searchInput && searchInput.value.trim()) { - params.append('search', searchInput.value.trim()); + if (pageState.filters?.search) { + params.append('search', pageState.filters.search); params.append('fuzzy', 'true'); + + // Add search option parameters if available + if (pageState.searchOptions) { + params.append('search_filename', pageState.searchOptions.filename.toString()); + params.append('search_modelname', pageState.searchOptions.modelname.toString()); + params.append('search_tags', (pageState.searchOptions.tags || false).toString()); + params.append('recursive', (pageState.searchOptions?.recursive ?? false).toString()); + } } // Add filter parameters if active - if (state.filters) { - if (state.filters.tags && state.filters.tags.length > 0) { + if (pageState.filters) { + if (pageState.filters.tags && pageState.filters.tags.length > 0) { // Convert the array of tags to a comma-separated string - params.append('tags', state.filters.tags.join(',')); + params.append('tags', pageState.filters.tags.join(',')); } - if (state.filters.baseModel && state.filters.baseModel.length > 0) { + if (pageState.filters.baseModel && pageState.filters.baseModel.length > 0) { // Convert the array of base models to a comma-separated string - params.append('base_models', state.filters.baseModel.join(',')); + params.append('base_models', pageState.filters.baseModel.join(',')); } } - console.log('Loading loras with params:', params.toString()); - const response = await fetch(`/api/loras?${params}`); if (!response.ok) { throw new Error(`Failed to fetch loras: ${response.statusText}`); @@ -53,13 +65,13 @@ export async function loadMoreLoras(boolUpdateFolders = false) { const data = await response.json(); console.log('Received data:', data); - if (data.items.length === 0 && state.currentPage === 1) { + if (data.items.length === 0 && pageState.currentPage === 1) { const grid = document.getElementById('loraGrid'); grid.innerHTML = '
No loras found in this folder
'; - state.hasMore = false; + pageState.hasMore = false; } else if (data.items.length > 0) { - state.hasMore = state.currentPage < data.total_pages; - state.currentPage++; + pageState.hasMore = pageState.currentPage < data.total_pages; + pageState.currentPage++; appendLoraCards(data.items); const sentinel = document.getElementById('scroll-sentinel'); @@ -67,10 +79,10 @@ export async function loadMoreLoras(boolUpdateFolders = false) { state.observer.observe(sentinel); } } else { - state.hasMore = false; + pageState.hasMore = false; } - if (boolUpdateFolders && data.folders) { + if (updateFolders && data.folders) { updateFolderTags(data.folders); } @@ -78,7 +90,7 @@ export async function loadMoreLoras(boolUpdateFolders = false) { console.error('Error loading loras:', error); showToast('Failed to load loras: ' + error.message, 'error'); } finally { - state.isLoading = false; + pageState.isLoading = false; } } @@ -87,7 +99,8 @@ function updateFolderTags(folders) { if (!folderTagsContainer) return; // Keep track of currently selected folder - const currentFolder = state.activeFolder; + const pageState = getCurrentPageState(); + const currentFolder = pageState.activeFolder; // Create HTML for folder tags const tagsHTML = folders.map(folder => { @@ -260,31 +273,19 @@ export function appendLoraCards(loras) { loras.forEach(lora => { const card = createLoraCard(lora); - if (sentinel) { - grid.insertBefore(card, sentinel); - } else { - grid.appendChild(card); - } + grid.appendChild(card); }); } -export async function resetAndReload(boolUpdateFolders = false) { - console.log('Resetting with state:', { ...state }); - - state.currentPage = 1; - state.hasMore = true; - state.isLoading = false; - - const grid = document.getElementById('loraGrid'); - grid.innerHTML = ''; - - const sentinel = document.createElement('div'); - sentinel.id = 'scroll-sentinel'; - grid.appendChild(sentinel); +export async function resetAndReload(updateFolders = false) { + const pageState = getCurrentPageState(); + console.log('Resetting with state:', { ...pageState }); + // Initialize infinite scroll - will reset the observer initializeInfiniteScroll(); - await loadMoreLoras(boolUpdateFolders); + // Load more loras with reset flag + await loadMoreLoras(true, updateFolders); } export async function refreshLoras() { diff --git a/static/js/checkpoints.js b/static/js/checkpoints.js new file mode 100644 index 00000000..ea149a2f --- /dev/null +++ b/static/js/checkpoints.js @@ -0,0 +1,36 @@ +import { appCore } from './core.js'; +import { state, initPageState } from './state/index.js'; + +// Initialize the Checkpoints page +class CheckpointsPageManager { + constructor() { + // Initialize any necessary state + this.initialized = false; + } + + async initialize() { + if (this.initialized) return; + + // Initialize page state + initPageState('checkpoints'); + + // Initialize core application + await appCore.initialize(); + + // Initialize page-specific components + this._initializeWorkInProgress(); + + this.initialized = true; + } + + _initializeWorkInProgress() { + // Add any work-in-progress specific initialization here + console.log('Checkpoints Manager is under development'); + } +} + +// Initialize everything when DOM is ready +document.addEventListener('DOMContentLoaded', async () => { + const checkpointsPage = new CheckpointsPageManager(); + await checkpointsPage.initialize(); +}); diff --git a/static/js/components/Header.js b/static/js/components/Header.js new file mode 100644 index 00000000..edba15e5 --- /dev/null +++ b/static/js/components/Header.js @@ -0,0 +1,82 @@ +import { updateService } from '../managers/UpdateService.js'; +import { toggleTheme } from '../utils/uiHelpers.js'; +import { SearchManager } from '../managers/SearchManager.js'; +import { FilterManager } from '../managers/FilterManager.js'; +import { initPageState } from '../state/index.js'; + +/** + * Header.js - Manages the application header behavior across different pages + * Handles initialization of appropriate search and filter managers based on current page + */ +export class HeaderManager { + constructor() { + this.currentPage = this.detectCurrentPage(); + initPageState(this.currentPage); + this.searchManager = null; + this.filterManager = null; + + // Initialize appropriate managers based on current page + this.initializeManagers(); + + // Set up common header functionality + this.initializeCommonElements(); + } + + detectCurrentPage() { + const path = window.location.pathname; + if (path.includes('/loras/recipes')) return 'recipes'; + if (path.includes('/checkpoints')) return 'checkpoints'; + if (path.includes('/loras')) return 'loras'; + return 'unknown'; + } + + initializeManagers() { + // Initialize SearchManager for all page types + this.searchManager = new SearchManager({ page: this.currentPage }); + window.searchManager = this.searchManager; + + // Initialize FilterManager for all page types that have filters + if (document.getElementById('filterButton')) { + this.filterManager = new FilterManager({ page: this.currentPage }); + window.filterManager = this.filterManager; + } + } + + initializeCommonElements() { + // Handle theme toggle + const themeToggle = document.querySelector('.theme-toggle'); + if (themeToggle) { + themeToggle.addEventListener('click', () => { + if (typeof toggleTheme === 'function') { + toggleTheme(); + } + }); + } + + // Handle settings toggle + const settingsToggle = document.querySelector('.settings-toggle'); + if (settingsToggle) { + settingsToggle.addEventListener('click', () => { + if (window.settingsManager) { + window.settingsManager.toggleSettings(); + } + }); + } + + // Handle update toggle + const updateToggle = document.getElementById('updateToggleBtn'); + if (updateToggle) { + updateToggle.addEventListener('click', () => { + updateService.toggleUpdateModal(); + }); + } + + // Handle support toggle + const supportToggle = document.getElementById('supportToggleBtn'); + if (supportToggle) { + supportToggle.addEventListener('click', () => { + // Handle support panel logic + }); + } + } +} diff --git a/static/js/components/LoraModal.js b/static/js/components/LoraModal.js index 5feaee44..b8254bff 100644 --- a/static/js/components/LoraModal.js +++ b/static/js/components/LoraModal.js @@ -1,5 +1,6 @@ import { showToast } from '../utils/uiHelpers.js'; import { state } from '../state/index.js'; +import { modalManager } from '../managers/ModalManager.js'; import { NSFW_LEVELS } from '../utils/constants.js'; export function showLoraModal(lora) { @@ -10,10 +11,10 @@ export function showLoraModal(lora) {
-
- +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
-
- -
-
- -
-
-
-
- -
-
- - - - - -
-
-
- - - - - - diff --git a/templates/components/header.html b/templates/components/header.html new file mode 100644 index 00000000..f9ddc624 --- /dev/null +++ b/templates/components/header.html @@ -0,0 +1,159 @@ +
+
+ + + + + + +
+ +
+
+ + +
+
+ +
+
+ + +
+
+ +
+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/templates/components/import_modal.html b/templates/components/import_modal.html new file mode 100644 index 00000000..4d0c57cc --- /dev/null +++ b/templates/components/import_modal.html @@ -0,0 +1,142 @@ + diff --git a/templates/components/lora_modals.html b/templates/components/lora_modals.html new file mode 100644 index 00000000..a6ab03d1 --- /dev/null +++ b/templates/components/lora_modals.html @@ -0,0 +1,114 @@ + + + + + + + + \ No newline at end of file diff --git a/templates/components/modals.html b/templates/components/modals.html index 2f14fdce..f6102eff 100644 --- a/templates/components/modals.html +++ b/templates/components/modals.html @@ -1,6 +1,3 @@ - - - - - - - - -