mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-25 15:15:44 -03:00
Merge branch 'main' into fix-crash-on-symlinks
This commit is contained in:
@@ -1,5 +1,10 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
from server import PromptServer # type: ignore
|
from server import PromptServer # type: ignore
|
||||||
|
|
||||||
from .config import config
|
from .config import config
|
||||||
from .routes.lora_routes import LoraRoutes
|
from .routes.lora_routes import LoraRoutes
|
||||||
from .routes.api_routes import ApiRoutes
|
from .routes.api_routes import ApiRoutes
|
||||||
@@ -10,10 +15,7 @@ from .routes.misc_routes import MiscRoutes
|
|||||||
from .routes.example_images_routes import ExampleImagesRoutes
|
from .routes.example_images_routes import ExampleImagesRoutes
|
||||||
from .services.service_registry import ServiceRegistry
|
from .services.service_registry import ServiceRegistry
|
||||||
from .services.settings_manager import settings
|
from .services.settings_manager import settings
|
||||||
from pathlib import Path
|
from .utils.example_images_migration import ExampleImagesMigration
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -135,13 +137,13 @@ class LoraManager:
|
|||||||
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
|
logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
|
||||||
|
|
||||||
# Initialize CivitaiClient first to ensure it's ready for other services
|
# Initialize CivitaiClient first to ensure it's ready for other services
|
||||||
civitai_client = await ServiceRegistry.get_civitai_client()
|
await ServiceRegistry.get_civitai_client()
|
||||||
|
|
||||||
# Register DownloadManager with ServiceRegistry
|
# Register DownloadManager with ServiceRegistry
|
||||||
download_manager = await ServiceRegistry.get_download_manager()
|
await ServiceRegistry.get_download_manager()
|
||||||
|
|
||||||
# Initialize WebSocket manager
|
# Initialize WebSocket manager
|
||||||
ws_manager = await ServiceRegistry.get_websocket_manager()
|
await ServiceRegistry.get_websocket_manager()
|
||||||
|
|
||||||
# Initialize scanners in background
|
# Initialize scanners in background
|
||||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||||
@@ -160,6 +162,8 @@ class LoraManager:
|
|||||||
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
|
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
|
||||||
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
|
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
|
||||||
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
|
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
|
||||||
|
|
||||||
|
await ExampleImagesMigration.check_and_run_migrations()
|
||||||
|
|
||||||
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
|
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import asyncio
|
|||||||
from .update_routes import UpdateRoutes
|
from .update_routes import UpdateRoutes
|
||||||
from ..utils.constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH, VALID_LORA_TYPES
|
from ..utils.constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH, VALID_LORA_TYPES
|
||||||
from ..utils.exif_utils import ExifUtils
|
from ..utils.exif_utils import ExifUtils
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
from ..services.service_registry import ServiceRegistry
|
from ..services.service_registry import ServiceRegistry
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -289,22 +290,6 @@ class ApiRoutes:
|
|||||||
|
|
||||||
return preview_path
|
return preview_path
|
||||||
|
|
||||||
async def _update_preview_metadata(self, model_path: str, preview_path: str):
|
|
||||||
"""Update preview path in metadata"""
|
|
||||||
metadata_path = os.path.splitext(model_path)[0] + '.metadata.json'
|
|
||||||
if os.path.exists(metadata_path):
|
|
||||||
try:
|
|
||||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
|
||||||
metadata = json.load(f)
|
|
||||||
|
|
||||||
# Update preview_url directly in the metadata dict
|
|
||||||
metadata['preview_url'] = preview_path
|
|
||||||
|
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error updating metadata: {e}")
|
|
||||||
|
|
||||||
async def fetch_all_civitai(self, request: web.Request) -> web.Response:
|
async def fetch_all_civitai(self, request: web.Request) -> web.Response:
|
||||||
"""Fetch CivitAI metadata for all loras in the background"""
|
"""Fetch CivitAI metadata for all loras in the background"""
|
||||||
try:
|
try:
|
||||||
@@ -640,8 +625,7 @@ class ApiRoutes:
|
|||||||
metadata[key] = value
|
metadata[key] = value
|
||||||
|
|
||||||
# Save updated metadata
|
# Save updated metadata
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update cache
|
# Update cache
|
||||||
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
|
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||||
@@ -854,9 +838,7 @@ class ApiRoutes:
|
|||||||
metadata['tags'] = tags
|
metadata['tags'] = tags
|
||||||
metadata['creator'] = creator
|
metadata['creator'] = creator
|
||||||
|
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
logger.info(f"Saved model metadata to file for {file_path}")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error saving model metadata: {e}")
|
logger.error(f"Error saving model metadata: {e}")
|
||||||
|
|
||||||
@@ -972,6 +954,7 @@ class ApiRoutes:
|
|||||||
patterns = [
|
patterns = [
|
||||||
f"{old_file_name}.safetensors", # Required
|
f"{old_file_name}.safetensors", # Required
|
||||||
f"{old_file_name}.metadata.json",
|
f"{old_file_name}.metadata.json",
|
||||||
|
f"{old_file_name}.metadata.json.bak",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Add all preview file extensions
|
# Add all preview file extensions
|
||||||
@@ -1027,8 +1010,7 @@ class ApiRoutes:
|
|||||||
metadata['preview_url'] = new_preview
|
metadata['preview_url'] = new_preview
|
||||||
|
|
||||||
# Save updated metadata
|
# Save updated metadata
|
||||||
with open(new_metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(new_file_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update the scanner cache
|
# Update the scanner cache
|
||||||
if metadata:
|
if metadata:
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import asyncio
|
|||||||
|
|
||||||
from ..utils.routes_common import ModelRouteUtils
|
from ..utils.routes_common import ModelRouteUtils
|
||||||
from ..utils.constants import NSFW_LEVELS
|
from ..utils.constants import NSFW_LEVELS
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
from ..services.websocket_manager import ws_manager
|
from ..services.websocket_manager import ws_manager
|
||||||
from ..services.service_registry import ServiceRegistry
|
from ..services.service_registry import ServiceRegistry
|
||||||
from ..config import config
|
from ..config import config
|
||||||
@@ -650,8 +651,7 @@ class CheckpointsRoutes:
|
|||||||
metadata.update(metadata_updates)
|
metadata.update(metadata_updates)
|
||||||
|
|
||||||
# Save updated metadata
|
# Save updated metadata
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update cache
|
# Update cache
|
||||||
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
|
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -70,8 +70,7 @@ class LoraRoutes:
|
|||||||
# It's initializing if the cache object doesn't exist yet,
|
# It's initializing if the cache object doesn't exist yet,
|
||||||
# OR if the scanner explicitly says it's initializing (background task running).
|
# OR if the scanner explicitly says it's initializing (background task running).
|
||||||
is_initializing = (
|
is_initializing = (
|
||||||
self.scanner._cache is None or
|
self.scanner._cache is None or self.scanner.is_initializing()
|
||||||
(hasattr(self.scanner, '_is_initializing') and self.scanner._is_initializing)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_initializing:
|
if is_initializing:
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from typing import Dict
|
|||||||
from ..utils.models import LoraMetadata, CheckpointMetadata
|
from ..utils.models import LoraMetadata, CheckpointMetadata
|
||||||
from ..utils.constants import CARD_PREVIEW_WIDTH
|
from ..utils.constants import CARD_PREVIEW_WIDTH
|
||||||
from ..utils.exif_utils import ExifUtils
|
from ..utils.exif_utils import ExifUtils
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
from .service_registry import ServiceRegistry
|
from .service_registry import ServiceRegistry
|
||||||
|
|
||||||
# Download to temporary file first
|
# Download to temporary file first
|
||||||
@@ -198,8 +199,6 @@ class DownloadManager:
|
|||||||
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
|
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
|
||||||
else:
|
else:
|
||||||
# For images, use WebP format for better performance
|
# For images, use WebP format for better performance
|
||||||
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
||||||
@@ -226,8 +225,6 @@ class DownloadManager:
|
|||||||
# Update metadata
|
# Update metadata
|
||||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Remove temporary file
|
# Remove temporary file
|
||||||
try:
|
try:
|
||||||
@@ -258,8 +255,7 @@ class DownloadManager:
|
|||||||
metadata.update_file_info(save_path)
|
metadata.update_file_info(save_path)
|
||||||
|
|
||||||
# 5. Final metadata update
|
# 5. Final metadata update
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(save_path, metadata)
|
||||||
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# 6. Update cache based on model type
|
# 6. Update cache based on model type
|
||||||
if model_type == "checkpoint":
|
if model_type == "checkpoint":
|
||||||
|
|||||||
@@ -32,12 +32,13 @@ class ModelCache:
|
|||||||
all_folders = set(l['folder'] for l in self.raw_data)
|
all_folders = set(l['folder'] for l in self.raw_data)
|
||||||
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
|
||||||
|
|
||||||
async def update_preview_url(self, file_path: str, preview_url: str) -> bool:
|
async def update_preview_url(self, file_path: str, preview_url: str, preview_nsfw_level: int) -> bool:
|
||||||
"""Update preview_url for a specific model in all cached data
|
"""Update preview_url for a specific model in all cached data
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
file_path: The file path of the model to update
|
file_path: The file path of the model to update
|
||||||
preview_url: The new preview URL
|
preview_url: The new preview URL
|
||||||
|
preview_nsfw_level: The NSFW level of the preview
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the update was successful, False if the model wasn't found
|
bool: True if the update was successful, False if the model wasn't found
|
||||||
@@ -47,19 +48,9 @@ class ModelCache:
|
|||||||
for item in self.raw_data:
|
for item in self.raw_data:
|
||||||
if item['file_path'] == file_path:
|
if item['file_path'] == file_path:
|
||||||
item['preview_url'] = preview_url
|
item['preview_url'] = preview_url
|
||||||
|
item['preview_nsfw_level'] = preview_nsfw_level
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return False # Model not found
|
return False # Model not found
|
||||||
|
|
||||||
# Update in sorted lists (references to the same dict objects)
|
|
||||||
for item in self.sorted_by_name:
|
|
||||||
if item['file_path'] == file_path:
|
|
||||||
item['preview_url'] = preview_url
|
|
||||||
break
|
|
||||||
|
|
||||||
for item in self.sorted_by_date:
|
|
||||||
if item['file_path'] == file_path:
|
|
||||||
item['preview_url'] = preview_url
|
|
||||||
break
|
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@@ -9,7 +9,8 @@ import msgpack # Add MessagePack import for efficient serialization
|
|||||||
|
|
||||||
from ..utils.models import BaseModelMetadata
|
from ..utils.models import BaseModelMetadata
|
||||||
from ..config import config
|
from ..config import config
|
||||||
from ..utils.file_utils import load_metadata, get_file_info, find_preview_file, save_metadata
|
from ..utils.file_utils import find_preview_file
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
from .model_cache import ModelCache
|
from .model_cache import ModelCache
|
||||||
from .model_hash_index import ModelHashIndex
|
from .model_hash_index import ModelHashIndex
|
||||||
from ..utils.constants import PREVIEW_EXTENSIONS
|
from ..utils.constants import PREVIEW_EXTENSIONS
|
||||||
@@ -748,13 +749,17 @@ class ModelScanner:
|
|||||||
"""Scan all model directories and return metadata"""
|
"""Scan all model directories and return metadata"""
|
||||||
raise NotImplementedError("Subclasses must implement scan_all_models")
|
raise NotImplementedError("Subclasses must implement scan_all_models")
|
||||||
|
|
||||||
|
def is_initializing(self) -> bool:
|
||||||
|
"""Check if the scanner is currently initializing"""
|
||||||
|
return self._is_initializing
|
||||||
|
|
||||||
def get_model_roots(self) -> List[str]:
|
def get_model_roots(self) -> List[str]:
|
||||||
"""Get model root directories"""
|
"""Get model root directories"""
|
||||||
raise NotImplementedError("Subclasses must implement get_model_roots")
|
raise NotImplementedError("Subclasses must implement get_model_roots")
|
||||||
|
|
||||||
async def _get_file_info(self, file_path: str) -> Optional[BaseModelMetadata]:
|
async def _create_default_metadata(self, file_path: str) -> Optional[BaseModelMetadata]:
|
||||||
"""Get model file info and metadata (extensible for different model types)"""
|
"""Get model file info and metadata (extensible for different model types)"""
|
||||||
return await get_file_info(file_path, self.model_class)
|
return await MetadataManager.create_default_metadata(file_path, self.model_class)
|
||||||
|
|
||||||
def _calculate_folder(self, file_path: str) -> str:
|
def _calculate_folder(self, file_path: str) -> str:
|
||||||
"""Calculate the folder path for a model file"""
|
"""Calculate the folder path for a model file"""
|
||||||
@@ -767,7 +772,7 @@ class ModelScanner:
|
|||||||
# Common methods shared between scanners
|
# Common methods shared between scanners
|
||||||
async def _process_model_file(self, file_path: str, root_path: str) -> Dict:
|
async def _process_model_file(self, file_path: str, root_path: str) -> Dict:
|
||||||
"""Process a single model file and return its metadata"""
|
"""Process a single model file and return its metadata"""
|
||||||
metadata = await load_metadata(file_path, self.model_class)
|
metadata = await MetadataManager.load_metadata(file_path, self.model_class)
|
||||||
|
|
||||||
if metadata is None:
|
if metadata is None:
|
||||||
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
||||||
@@ -783,7 +788,7 @@ class ModelScanner:
|
|||||||
|
|
||||||
metadata = self.model_class.from_civitai_info(version_info, file_info, file_path)
|
metadata = self.model_class.from_civitai_info(version_info, file_info, file_path)
|
||||||
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
|
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
|
||||||
await save_metadata(file_path, metadata)
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
logger.debug(f"Created metadata from .civitai.info for {file_path}")
|
logger.debug(f"Created metadata from .civitai.info for {file_path}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
|
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
|
||||||
@@ -810,13 +815,13 @@ class ModelScanner:
|
|||||||
metadata.modelDescription = version_info['model']['description']
|
metadata.modelDescription = version_info['model']['description']
|
||||||
|
|
||||||
# Save the updated metadata
|
# Save the updated metadata
|
||||||
await save_metadata(file_path, metadata)
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
logger.debug(f"Updated metadata with civitai info for {file_path}")
|
logger.debug(f"Updated metadata with civitai info for {file_path}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error restoring civitai data from .civitai.info for {file_path}: {e}")
|
logger.error(f"Error restoring civitai data from .civitai.info for {file_path}: {e}")
|
||||||
|
|
||||||
if metadata is None:
|
if metadata is None:
|
||||||
metadata = await self._get_file_info(file_path)
|
metadata = await self._create_default_metadata(file_path)
|
||||||
|
|
||||||
model_data = metadata.to_dict()
|
model_data = metadata.to_dict()
|
||||||
|
|
||||||
@@ -866,9 +871,7 @@ class ModelScanner:
|
|||||||
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
||||||
model_data['civitai_deleted'] = True
|
model_data['civitai_deleted'] = True
|
||||||
|
|
||||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
await MetadataManager.save_metadata(file_path, model_data)
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(model_data, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
elif model_metadata:
|
elif model_metadata:
|
||||||
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
||||||
@@ -881,9 +884,7 @@ class ModelScanner:
|
|||||||
|
|
||||||
model_data['civitai']['creator'] = model_metadata['creator']
|
model_data['civitai']['creator'] = model_metadata['creator']
|
||||||
|
|
||||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
await MetadataManager.save_metadata(file_path, model_data)
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(model_data, f, indent=2, ensure_ascii=False)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
||||||
|
|
||||||
@@ -1049,8 +1050,7 @@ class ModelScanner:
|
|||||||
new_preview_path = os.path.join(preview_dir, f"{preview_name}{preview_ext}")
|
new_preview_path = os.path.join(preview_dir, f"{preview_name}{preview_ext}")
|
||||||
metadata['preview_url'] = new_preview_path.replace(os.sep, '/')
|
metadata['preview_url'] = new_preview_path.replace(os.sep, '/')
|
||||||
|
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(metadata_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
@@ -1184,12 +1184,13 @@ class ModelScanner:
|
|||||||
"""Get list of excluded model file paths"""
|
"""Get list of excluded model file paths"""
|
||||||
return self._excluded_models.copy()
|
return self._excluded_models.copy()
|
||||||
|
|
||||||
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
|
async def update_preview_in_cache(self, file_path: str, preview_url: str, preview_nsfw_level: int) -> bool:
|
||||||
"""Update preview URL in cache for a specific lora
|
"""Update preview URL in cache for a specific lora
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
file_path: The file path of the lora to update
|
file_path: The file path of the lora to update
|
||||||
preview_url: The new preview URL
|
preview_url: The new preview URL
|
||||||
|
preview_nsfw_level: The NSFW level of the preview
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
|
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
|
||||||
@@ -1197,7 +1198,7 @@ class ModelScanner:
|
|||||||
if self._cache is None:
|
if self._cache is None:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
updated = await self._cache.update_preview_url(file_path, preview_url)
|
updated = await self._cache.update_preview_url(file_path, preview_url, preview_nsfw_level)
|
||||||
if updated:
|
if updated:
|
||||||
# Save updated cache to disk
|
# Save updated cache to disk
|
||||||
await self._save_cache_to_disk()
|
await self._save_cache_to_disk()
|
||||||
|
|||||||
399
py/utils/example_images_download_manager.py
Normal file
399
py/utils/example_images_download_manager.py
Normal file
@@ -0,0 +1,399 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import aiohttp
|
||||||
|
from aiohttp import web
|
||||||
|
from ..services.service_registry import ServiceRegistry
|
||||||
|
from .example_images_processor import ExampleImagesProcessor
|
||||||
|
from .example_images_metadata import MetadataUpdater
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Download status tracking
|
||||||
|
download_task = None
|
||||||
|
is_downloading = False
|
||||||
|
download_progress = {
|
||||||
|
'total': 0,
|
||||||
|
'completed': 0,
|
||||||
|
'current_model': '',
|
||||||
|
'status': 'idle', # idle, running, paused, completed, error
|
||||||
|
'errors': [],
|
||||||
|
'last_error': None,
|
||||||
|
'start_time': None,
|
||||||
|
'end_time': None,
|
||||||
|
'processed_models': set(), # Track models that have been processed
|
||||||
|
'refreshed_models': set() # Track models that had metadata refreshed
|
||||||
|
}
|
||||||
|
|
||||||
|
class DownloadManager:
|
||||||
|
"""Manages downloading example images for models"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def start_download(request):
|
||||||
|
"""
|
||||||
|
Start downloading example images for models
|
||||||
|
|
||||||
|
Expects a JSON body with:
|
||||||
|
{
|
||||||
|
"output_dir": "path/to/output", # Base directory to save example images
|
||||||
|
"optimize": true, # Whether to optimize images (default: true)
|
||||||
|
"model_types": ["lora", "checkpoint"], # Model types to process (default: both)
|
||||||
|
"delay": 1.0 # Delay between downloads to avoid rate limiting (default: 1.0)
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
global download_task, is_downloading, download_progress
|
||||||
|
|
||||||
|
if is_downloading:
|
||||||
|
# Create a copy for JSON serialization
|
||||||
|
response_progress = download_progress.copy()
|
||||||
|
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||||
|
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Download already in progress',
|
||||||
|
'status': response_progress
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Parse the request body
|
||||||
|
data = await request.json()
|
||||||
|
output_dir = data.get('output_dir')
|
||||||
|
optimize = data.get('optimize', True)
|
||||||
|
model_types = data.get('model_types', ['lora', 'checkpoint'])
|
||||||
|
delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
|
||||||
|
|
||||||
|
if not output_dir:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Missing output_dir parameter'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Create the output directory
|
||||||
|
os.makedirs(output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# Initialize progress tracking
|
||||||
|
download_progress['total'] = 0
|
||||||
|
download_progress['completed'] = 0
|
||||||
|
download_progress['current_model'] = ''
|
||||||
|
download_progress['status'] = 'running'
|
||||||
|
download_progress['errors'] = []
|
||||||
|
download_progress['last_error'] = None
|
||||||
|
download_progress['start_time'] = time.time()
|
||||||
|
download_progress['end_time'] = None
|
||||||
|
|
||||||
|
# Get the processed models list from a file if it exists
|
||||||
|
progress_file = os.path.join(output_dir, '.download_progress.json')
|
||||||
|
if os.path.exists(progress_file):
|
||||||
|
try:
|
||||||
|
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||||
|
saved_progress = json.load(f)
|
||||||
|
download_progress['processed_models'] = set(saved_progress.get('processed_models', []))
|
||||||
|
logger.info(f"Loaded previous progress, {len(download_progress['processed_models'])} models already processed")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load progress file: {e}")
|
||||||
|
download_progress['processed_models'] = set()
|
||||||
|
else:
|
||||||
|
download_progress['processed_models'] = set()
|
||||||
|
|
||||||
|
# Start the download task
|
||||||
|
is_downloading = True
|
||||||
|
download_task = asyncio.create_task(
|
||||||
|
DownloadManager._download_all_example_images(
|
||||||
|
output_dir,
|
||||||
|
optimize,
|
||||||
|
model_types,
|
||||||
|
delay
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a copy for JSON serialization
|
||||||
|
response_progress = download_progress.copy()
|
||||||
|
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||||
|
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'message': 'Download started',
|
||||||
|
'status': response_progress
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to start example images download: {e}", exc_info=True)
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': str(e)
|
||||||
|
}, status=500)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def get_status(request):
|
||||||
|
"""Get the current status of example images download"""
|
||||||
|
global download_progress
|
||||||
|
|
||||||
|
# Create a copy of the progress dict with the set converted to a list for JSON serialization
|
||||||
|
response_progress = download_progress.copy()
|
||||||
|
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||||
|
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'is_downloading': is_downloading,
|
||||||
|
'status': response_progress
|
||||||
|
})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def pause_download(request):
|
||||||
|
"""Pause the example images download"""
|
||||||
|
global download_progress
|
||||||
|
|
||||||
|
if not is_downloading:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No download in progress'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
download_progress['status'] = 'paused'
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'message': 'Download paused'
|
||||||
|
})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def resume_download(request):
|
||||||
|
"""Resume the example images download"""
|
||||||
|
global download_progress
|
||||||
|
|
||||||
|
if not is_downloading:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No download in progress'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
if download_progress['status'] == 'paused':
|
||||||
|
download_progress['status'] = 'running'
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'message': 'Download resumed'
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Download is in '{download_progress['status']}' state, cannot resume"
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _download_all_example_images(output_dir, optimize, model_types, delay):
|
||||||
|
"""Download example images for all models"""
|
||||||
|
global is_downloading, download_progress
|
||||||
|
|
||||||
|
# Create independent download session
|
||||||
|
connector = aiohttp.TCPConnector(
|
||||||
|
ssl=True,
|
||||||
|
limit=3,
|
||||||
|
force_close=False,
|
||||||
|
enable_cleanup_closed=True
|
||||||
|
)
|
||||||
|
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
|
||||||
|
independent_session = aiohttp.ClientSession(
|
||||||
|
connector=connector,
|
||||||
|
trust_env=True,
|
||||||
|
timeout=timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get scanners
|
||||||
|
scanners = []
|
||||||
|
if 'lora' in model_types:
|
||||||
|
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||||
|
scanners.append(('lora', lora_scanner))
|
||||||
|
|
||||||
|
if 'checkpoint' in model_types:
|
||||||
|
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||||
|
scanners.append(('checkpoint', checkpoint_scanner))
|
||||||
|
|
||||||
|
# Get all models
|
||||||
|
all_models = []
|
||||||
|
for scanner_type, scanner in scanners:
|
||||||
|
cache = await scanner.get_cached_data()
|
||||||
|
if cache and cache.raw_data:
|
||||||
|
for model in cache.raw_data:
|
||||||
|
if model.get('sha256'):
|
||||||
|
all_models.append((scanner_type, model, scanner))
|
||||||
|
|
||||||
|
# Update total count
|
||||||
|
download_progress['total'] = len(all_models)
|
||||||
|
logger.info(f"Found {download_progress['total']} models to process")
|
||||||
|
|
||||||
|
# Process each model
|
||||||
|
for i, (scanner_type, model, scanner) in enumerate(all_models):
|
||||||
|
# Main logic for processing model is here, but actual operations are delegated to other classes
|
||||||
|
was_remote_download = await DownloadManager._process_model(
|
||||||
|
scanner_type, model, scanner,
|
||||||
|
output_dir, optimize, independent_session
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update progress
|
||||||
|
download_progress['completed'] += 1
|
||||||
|
|
||||||
|
# Only add delay after remote download of models, and not after processing the last model
|
||||||
|
if was_remote_download and i < len(all_models) - 1 and download_progress['status'] == 'running':
|
||||||
|
await asyncio.sleep(delay)
|
||||||
|
|
||||||
|
# Mark as completed
|
||||||
|
download_progress['status'] = 'completed'
|
||||||
|
download_progress['end_time'] = time.time()
|
||||||
|
logger.info(f"Example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Error during example images download: {str(e)}"
|
||||||
|
logger.error(error_msg, exc_info=True)
|
||||||
|
download_progress['errors'].append(error_msg)
|
||||||
|
download_progress['last_error'] = error_msg
|
||||||
|
download_progress['status'] = 'error'
|
||||||
|
download_progress['end_time'] = time.time()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Close the independent session
|
||||||
|
try:
|
||||||
|
await independent_session.close()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error closing download session: {e}")
|
||||||
|
|
||||||
|
# Save final progress to file
|
||||||
|
try:
|
||||||
|
DownloadManager._save_progress(output_dir)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save progress file: {e}")
|
||||||
|
|
||||||
|
# Set download status to not downloading
|
||||||
|
is_downloading = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _process_model(scanner_type, model, scanner, output_dir, optimize, independent_session):
|
||||||
|
"""Process a single model download"""
|
||||||
|
global download_progress
|
||||||
|
|
||||||
|
# Check if download is paused
|
||||||
|
while download_progress['status'] == 'paused':
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
# Check if download should continue
|
||||||
|
if download_progress['status'] != 'running':
|
||||||
|
logger.info(f"Download stopped: {download_progress['status']}")
|
||||||
|
return False # Return False to indicate no remote download happened
|
||||||
|
|
||||||
|
model_hash = model.get('sha256', '').lower()
|
||||||
|
model_name = model.get('model_name', 'Unknown')
|
||||||
|
model_file_path = model.get('file_path', '')
|
||||||
|
model_file_name = model.get('file_name', '')
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Update current model info
|
||||||
|
download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
|
||||||
|
|
||||||
|
# Skip if already processed
|
||||||
|
if model_hash in download_progress['processed_models']:
|
||||||
|
logger.debug(f"Skipping already processed model: {model_name}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Create model directory
|
||||||
|
model_dir = os.path.join(output_dir, model_hash)
|
||||||
|
os.makedirs(model_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# First check for local example images - local processing doesn't need delay
|
||||||
|
local_images_processed = await ExampleImagesProcessor.process_local_examples(
|
||||||
|
model_file_path, model_file_name, model_name, model_dir, optimize
|
||||||
|
)
|
||||||
|
|
||||||
|
# If we processed local images, update metadata
|
||||||
|
if local_images_processed:
|
||||||
|
await MetadataUpdater.update_metadata_from_local_examples(
|
||||||
|
model_hash, model, scanner_type, scanner, model_dir
|
||||||
|
)
|
||||||
|
download_progress['processed_models'].add(model_hash)
|
||||||
|
return False # Return False to indicate no remote download happened
|
||||||
|
|
||||||
|
# If no local images, try to download from remote
|
||||||
|
elif model.get('civitai') and model.get('civitai', {}).get('images'):
|
||||||
|
images = model.get('civitai', {}).get('images', [])
|
||||||
|
|
||||||
|
success, is_stale = await ExampleImagesProcessor.download_model_images(
|
||||||
|
model_hash, model_name, images, model_dir, optimize, independent_session
|
||||||
|
)
|
||||||
|
|
||||||
|
# If metadata is stale, try to refresh it
|
||||||
|
if is_stale and model_hash not in download_progress['refreshed_models']:
|
||||||
|
await MetadataUpdater.refresh_model_metadata(
|
||||||
|
model_hash, model_name, scanner_type, scanner
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get the updated model data
|
||||||
|
updated_model = await MetadataUpdater.get_updated_model(
|
||||||
|
model_hash, scanner
|
||||||
|
)
|
||||||
|
|
||||||
|
if updated_model and updated_model.get('civitai', {}).get('images'):
|
||||||
|
# Retry download with updated metadata
|
||||||
|
updated_images = updated_model.get('civitai', {}).get('images', [])
|
||||||
|
success, _ = await ExampleImagesProcessor.download_model_images(
|
||||||
|
model_hash, model_name, updated_images, model_dir, optimize, independent_session
|
||||||
|
)
|
||||||
|
|
||||||
|
# Only mark as processed if all images were downloaded successfully
|
||||||
|
if success:
|
||||||
|
download_progress['processed_models'].add(model_hash)
|
||||||
|
|
||||||
|
return True # Return True to indicate a remote download happened
|
||||||
|
|
||||||
|
# Save progress periodically
|
||||||
|
if download_progress['completed'] % 10 == 0 or download_progress['completed'] == download_progress['total'] - 1:
|
||||||
|
DownloadManager._save_progress(output_dir)
|
||||||
|
|
||||||
|
return False # Default return if no conditions met
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Error processing model {model.get('model_name')}: {str(e)}"
|
||||||
|
logger.error(error_msg, exc_info=True)
|
||||||
|
download_progress['errors'].append(error_msg)
|
||||||
|
download_progress['last_error'] = error_msg
|
||||||
|
return False # Return False on exception
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _save_progress(output_dir):
|
||||||
|
"""Save download progress to file"""
|
||||||
|
global download_progress
|
||||||
|
try:
|
||||||
|
progress_file = os.path.join(output_dir, '.download_progress.json')
|
||||||
|
|
||||||
|
# Read existing progress file if it exists
|
||||||
|
existing_data = {}
|
||||||
|
if os.path.exists(progress_file):
|
||||||
|
try:
|
||||||
|
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||||
|
existing_data = json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to read existing progress file: {e}")
|
||||||
|
|
||||||
|
# Create new progress data
|
||||||
|
progress_data = {
|
||||||
|
'processed_models': list(download_progress['processed_models']),
|
||||||
|
'refreshed_models': list(download_progress['refreshed_models']),
|
||||||
|
'completed': download_progress['completed'],
|
||||||
|
'total': download_progress['total'],
|
||||||
|
'last_update': time.time()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Preserve existing fields (especially naming_version)
|
||||||
|
for key, value in existing_data.items():
|
||||||
|
if key not in progress_data:
|
||||||
|
progress_data[key] = value
|
||||||
|
|
||||||
|
# Write updated progress data
|
||||||
|
with open(progress_file, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(progress_data, f, indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save progress file: {e}")
|
||||||
201
py/utils/example_images_file_manager.py
Normal file
201
py/utils/example_images_file_manager.py
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
from aiohttp import web
|
||||||
|
from ..services.settings_manager import settings
|
||||||
|
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class ExampleImagesFileManager:
|
||||||
|
"""Manages access and operations for example image files"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def open_folder(request):
|
||||||
|
"""
|
||||||
|
Open the example images folder for a specific model
|
||||||
|
|
||||||
|
Expects a JSON request body with:
|
||||||
|
{
|
||||||
|
"model_hash": "sha256_hash" # SHA256 hash of the model
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse request body
|
||||||
|
data = await request.json()
|
||||||
|
model_hash = data.get('model_hash')
|
||||||
|
|
||||||
|
if not model_hash:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Missing model_hash parameter'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Get example images path from settings
|
||||||
|
example_images_path = settings.get('example_images_path')
|
||||||
|
if not example_images_path:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No example images path configured. Please set it in the settings panel first.'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Construct folder path for this model
|
||||||
|
model_folder = os.path.join(example_images_path, model_hash)
|
||||||
|
|
||||||
|
# Check if folder exists
|
||||||
|
if not os.path.exists(model_folder):
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No example images found for this model. Download example images first.'
|
||||||
|
}, status=404)
|
||||||
|
|
||||||
|
# Open folder in file explorer
|
||||||
|
if os.name == 'nt': # Windows
|
||||||
|
os.startfile(model_folder)
|
||||||
|
elif os.name == 'posix': # macOS and Linux
|
||||||
|
if sys.platform == 'darwin': # macOS
|
||||||
|
subprocess.Popen(['open', model_folder])
|
||||||
|
else: # Linux
|
||||||
|
subprocess.Popen(['xdg-open', model_folder])
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'message': f'Opened example images folder for model {model_hash}'
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to open example images folder: {e}", exc_info=True)
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': str(e)
|
||||||
|
}, status=500)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def get_files(request):
|
||||||
|
"""
|
||||||
|
Get the list of example image files for a specific model
|
||||||
|
|
||||||
|
Expects:
|
||||||
|
- model_hash in query parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- List of image files and their paths
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Get model_hash from query parameters
|
||||||
|
model_hash = request.query.get('model_hash')
|
||||||
|
|
||||||
|
if not model_hash:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Missing model_hash parameter'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Get example images path from settings
|
||||||
|
example_images_path = settings.get('example_images_path')
|
||||||
|
if not example_images_path:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No example images path configured'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Construct folder path for this model
|
||||||
|
model_folder = os.path.join(example_images_path, model_hash)
|
||||||
|
|
||||||
|
# Check if folder exists
|
||||||
|
if not os.path.exists(model_folder):
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No example images found for this model',
|
||||||
|
'files': []
|
||||||
|
}, status=404)
|
||||||
|
|
||||||
|
# Get list of files in the folder
|
||||||
|
files = []
|
||||||
|
for file in os.listdir(model_folder):
|
||||||
|
file_path = os.path.join(model_folder, file)
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
# Check if file is a supported media file
|
||||||
|
file_ext = os.path.splitext(file)[1].lower()
|
||||||
|
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||||
|
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||||
|
files.append({
|
||||||
|
'name': file,
|
||||||
|
'path': f'/example_images_static/{model_hash}/{file}',
|
||||||
|
'extension': file_ext,
|
||||||
|
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||||
|
})
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'files': files
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to get example image files: {e}", exc_info=True)
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': str(e)
|
||||||
|
}, status=500)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def has_images(request):
|
||||||
|
"""
|
||||||
|
Check if the example images folder for a model exists and is not empty
|
||||||
|
|
||||||
|
Expects:
|
||||||
|
- model_hash in query parameters
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- Boolean indicating whether the folder exists and contains images/videos
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Get model_hash from query parameters
|
||||||
|
model_hash = request.query.get('model_hash')
|
||||||
|
|
||||||
|
if not model_hash:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Missing model_hash parameter'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Get example images path from settings
|
||||||
|
example_images_path = settings.get('example_images_path')
|
||||||
|
if not example_images_path:
|
||||||
|
return web.json_response({
|
||||||
|
'has_images': False
|
||||||
|
})
|
||||||
|
|
||||||
|
# Construct folder path for this model
|
||||||
|
model_folder = os.path.join(example_images_path, model_hash)
|
||||||
|
|
||||||
|
# Check if folder exists
|
||||||
|
if not os.path.exists(model_folder) or not os.path.isdir(model_folder):
|
||||||
|
return web.json_response({
|
||||||
|
'has_images': False
|
||||||
|
})
|
||||||
|
|
||||||
|
# Check if folder contains any supported media files
|
||||||
|
for file in os.listdir(model_folder):
|
||||||
|
file_path = os.path.join(model_folder, file)
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
file_ext = os.path.splitext(file)[1].lower()
|
||||||
|
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||||
|
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||||
|
return web.json_response({
|
||||||
|
'has_images': True
|
||||||
|
})
|
||||||
|
|
||||||
|
# If reached here, folder exists but has no supported media files
|
||||||
|
return web.json_response({
|
||||||
|
'has_images': False
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to check example images folder: {e}", exc_info=True)
|
||||||
|
return web.json_response({
|
||||||
|
'has_images': False,
|
||||||
|
'error': str(e)
|
||||||
|
})
|
||||||
390
py/utils/example_images_metadata.py
Normal file
390
py/utils/example_images_metadata.py
Normal file
@@ -0,0 +1,390 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
|
from ..utils.routes_common import ModelRouteUtils
|
||||||
|
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||||
|
from ..utils.exif_utils import ExifUtils
|
||||||
|
from ..recipes.constants import GEN_PARAM_KEYS
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class MetadataUpdater:
|
||||||
|
"""Handles updating model metadata related to example images"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def refresh_model_metadata(model_hash, model_name, scanner_type, scanner):
|
||||||
|
"""Refresh model metadata from CivitAI
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_hash: SHA256 hash of the model
|
||||||
|
model_name: Model name (for logging)
|
||||||
|
scanner_type: Scanner type ('lora' or 'checkpoint')
|
||||||
|
scanner: Scanner instance for this model type
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if metadata was successfully refreshed, False otherwise
|
||||||
|
"""
|
||||||
|
from ..utils.example_images_download_manager import download_progress
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Find the model in the scanner cache
|
||||||
|
cache = await scanner.get_cached_data()
|
||||||
|
model_data = None
|
||||||
|
|
||||||
|
for item in cache.raw_data:
|
||||||
|
if item.get('sha256') == model_hash:
|
||||||
|
model_data = item
|
||||||
|
break
|
||||||
|
|
||||||
|
if not model_data:
|
||||||
|
logger.warning(f"Model {model_name} with hash {model_hash} not found in cache")
|
||||||
|
return False
|
||||||
|
|
||||||
|
file_path = model_data.get('file_path')
|
||||||
|
if not file_path:
|
||||||
|
logger.warning(f"Model {model_name} has no file path")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Track that we're refreshing this model
|
||||||
|
download_progress['refreshed_models'].add(model_hash)
|
||||||
|
|
||||||
|
# Use ModelRouteUtils to refresh metadata
|
||||||
|
async def update_cache_func(old_path, new_path, metadata):
|
||||||
|
return await scanner.update_single_model_cache(old_path, new_path, metadata)
|
||||||
|
|
||||||
|
success = await ModelRouteUtils.fetch_and_update_model(
|
||||||
|
model_hash,
|
||||||
|
file_path,
|
||||||
|
model_data,
|
||||||
|
update_cache_func
|
||||||
|
)
|
||||||
|
|
||||||
|
if success:
|
||||||
|
logger.info(f"Successfully refreshed metadata for {model_name}")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.warning(f"Failed to refresh metadata for {model_name}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Error refreshing metadata for {model_name}: {str(e)}"
|
||||||
|
logger.error(error_msg, exc_info=True)
|
||||||
|
download_progress['errors'].append(error_msg)
|
||||||
|
download_progress['last_error'] = error_msg
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def get_updated_model(model_hash, scanner):
|
||||||
|
"""Get updated model data
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_hash: SHA256 hash of the model
|
||||||
|
scanner: Scanner instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Updated model data or None if not found
|
||||||
|
"""
|
||||||
|
cache = await scanner.get_cached_data()
|
||||||
|
for item in cache.raw_data:
|
||||||
|
if item.get('sha256') == model_hash:
|
||||||
|
return item
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def update_metadata_from_local_examples(model_hash, model, scanner_type, scanner, model_dir):
|
||||||
|
"""Update model metadata with local example image information
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_hash: SHA256 hash of the model
|
||||||
|
model: Model data dictionary
|
||||||
|
scanner_type: Scanner type ('lora' or 'checkpoint')
|
||||||
|
scanner: Scanner instance for this model type
|
||||||
|
model_dir: Model images directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if metadata was successfully updated, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Collect local image paths
|
||||||
|
local_images_paths = []
|
||||||
|
if os.path.exists(model_dir):
|
||||||
|
for file in os.listdir(model_dir):
|
||||||
|
file_path = os.path.join(model_dir, file)
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
file_ext = os.path.splitext(file)[1].lower()
|
||||||
|
is_supported = (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||||
|
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'])
|
||||||
|
if is_supported:
|
||||||
|
local_images_paths.append(file_path)
|
||||||
|
|
||||||
|
# Check if metadata update is needed (no civitai field or empty images)
|
||||||
|
needs_update = not model.get('civitai') or not model.get('civitai', {}).get('images')
|
||||||
|
|
||||||
|
if needs_update and local_images_paths:
|
||||||
|
logger.debug(f"Found {len(local_images_paths)} local example images for {model.get('model_name')}, updating metadata")
|
||||||
|
|
||||||
|
# Create or get civitai field
|
||||||
|
if not model.get('civitai'):
|
||||||
|
model['civitai'] = {}
|
||||||
|
|
||||||
|
# Create images array
|
||||||
|
images = []
|
||||||
|
|
||||||
|
# Generate metadata for each local image/video
|
||||||
|
for path in local_images_paths:
|
||||||
|
# Determine if video or image
|
||||||
|
file_ext = os.path.splitext(path)[1].lower()
|
||||||
|
is_video = file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||||
|
|
||||||
|
# Create image metadata entry
|
||||||
|
image_entry = {
|
||||||
|
"url": "", # Empty URL as required
|
||||||
|
"nsfwLevel": 0,
|
||||||
|
"width": 720, # Default dimensions
|
||||||
|
"height": 1280,
|
||||||
|
"type": "video" if is_video else "image",
|
||||||
|
"meta": None,
|
||||||
|
"hasMeta": False,
|
||||||
|
"hasPositivePrompt": False
|
||||||
|
}
|
||||||
|
|
||||||
|
# If it's an image, try to get actual dimensions (optional enhancement)
|
||||||
|
try:
|
||||||
|
from PIL import Image
|
||||||
|
if not is_video and os.path.exists(path):
|
||||||
|
with Image.open(path) as img:
|
||||||
|
image_entry["width"], image_entry["height"] = img.size
|
||||||
|
except:
|
||||||
|
# If PIL fails or is unavailable, use default dimensions
|
||||||
|
pass
|
||||||
|
|
||||||
|
images.append(image_entry)
|
||||||
|
|
||||||
|
# Update the model's civitai.images field
|
||||||
|
model['civitai']['images'] = images
|
||||||
|
|
||||||
|
# Save metadata to .metadata.json file
|
||||||
|
file_path = model.get('file_path')
|
||||||
|
try:
|
||||||
|
# Create a copy of model data without 'folder' field
|
||||||
|
model_copy = model.copy()
|
||||||
|
model_copy.pop('folder', None)
|
||||||
|
|
||||||
|
# Write metadata to file
|
||||||
|
await MetadataManager.save_metadata(file_path, model_copy)
|
||||||
|
logger.info(f"Saved metadata for {model.get('model_name')}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save metadata for {model.get('model_name')}: {str(e)}")
|
||||||
|
|
||||||
|
# Save updated metadata to scanner cache
|
||||||
|
success = await scanner.update_single_model_cache(file_path, file_path, model)
|
||||||
|
if success:
|
||||||
|
logger.info(f"Successfully updated metadata for {model.get('model_name')} with {len(images)} local examples")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.warning(f"Failed to update metadata for {model.get('model_name')}")
|
||||||
|
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating metadata from local examples: {str(e)}", exc_info=True)
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def update_metadata_after_import(model_hash, model_data, scanner, newly_imported_paths):
|
||||||
|
"""Update model metadata after importing example images
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_hash: SHA256 hash of the model
|
||||||
|
model_data: Model data dictionary
|
||||||
|
scanner: Scanner instance (lora or checkpoint)
|
||||||
|
newly_imported_paths: List of paths to newly imported files
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (regular_images, custom_images) - Both image arrays
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Ensure civitai field exists in model_data
|
||||||
|
if not model_data.get('civitai'):
|
||||||
|
model_data['civitai'] = {}
|
||||||
|
|
||||||
|
# Ensure customImages array exists
|
||||||
|
if not model_data['civitai'].get('customImages'):
|
||||||
|
model_data['civitai']['customImages'] = []
|
||||||
|
|
||||||
|
# Get current customImages array
|
||||||
|
custom_images = model_data['civitai']['customImages']
|
||||||
|
|
||||||
|
# Add new image entry for each imported file
|
||||||
|
for path_tuple in newly_imported_paths:
|
||||||
|
path, short_id = path_tuple
|
||||||
|
|
||||||
|
# Determine if video or image
|
||||||
|
file_ext = os.path.splitext(path)[1].lower()
|
||||||
|
is_video = file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||||
|
|
||||||
|
# Create image metadata entry
|
||||||
|
image_entry = {
|
||||||
|
"url": "", # Empty URL as requested
|
||||||
|
"id": short_id,
|
||||||
|
"nsfwLevel": 0,
|
||||||
|
"width": 720, # Default dimensions
|
||||||
|
"height": 1280,
|
||||||
|
"type": "video" if is_video else "image",
|
||||||
|
"meta": None,
|
||||||
|
"hasMeta": False,
|
||||||
|
"hasPositivePrompt": False
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract and parse metadata if this is an image
|
||||||
|
if not is_video:
|
||||||
|
try:
|
||||||
|
# Extract metadata from image
|
||||||
|
extracted_metadata = ExifUtils.extract_image_metadata(path)
|
||||||
|
|
||||||
|
if extracted_metadata:
|
||||||
|
# Parse the extracted metadata to get generation parameters
|
||||||
|
parsed_meta = MetadataUpdater._parse_image_metadata(extracted_metadata)
|
||||||
|
|
||||||
|
if parsed_meta:
|
||||||
|
image_entry["meta"] = parsed_meta
|
||||||
|
image_entry["hasMeta"] = True
|
||||||
|
image_entry["hasPositivePrompt"] = bool(parsed_meta.get("prompt", ""))
|
||||||
|
logger.debug(f"Extracted metadata from {os.path.basename(path)}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to extract metadata from {os.path.basename(path)}: {e}")
|
||||||
|
|
||||||
|
# If it's an image, try to get actual dimensions
|
||||||
|
try:
|
||||||
|
from PIL import Image
|
||||||
|
if not is_video and os.path.exists(path):
|
||||||
|
with Image.open(path) as img:
|
||||||
|
image_entry["width"], image_entry["height"] = img.size
|
||||||
|
except:
|
||||||
|
# If PIL fails or is unavailable, use default dimensions
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Append to existing customImages array
|
||||||
|
custom_images.append(image_entry)
|
||||||
|
|
||||||
|
# Save metadata to .metadata.json file
|
||||||
|
file_path = model_data.get('file_path')
|
||||||
|
if file_path:
|
||||||
|
try:
|
||||||
|
# Create a copy of model data without 'folder' field
|
||||||
|
model_copy = model_data.copy()
|
||||||
|
model_copy.pop('folder', None)
|
||||||
|
|
||||||
|
# Write metadata to file
|
||||||
|
await MetadataManager.save_metadata(file_path, model_copy)
|
||||||
|
logger.info(f"Saved metadata for {model_data.get('model_name')}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save metadata: {str(e)}")
|
||||||
|
|
||||||
|
# Save updated metadata to scanner cache
|
||||||
|
if file_path:
|
||||||
|
await scanner.update_single_model_cache(file_path, file_path, model_data)
|
||||||
|
|
||||||
|
# Get regular images array (might be None)
|
||||||
|
regular_images = model_data['civitai'].get('images', [])
|
||||||
|
|
||||||
|
# Return both image arrays
|
||||||
|
return regular_images, custom_images
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to update metadata after import: {e}", exc_info=True)
|
||||||
|
return [], []
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_image_metadata(user_comment):
|
||||||
|
"""Parse metadata from image to extract generation parameters
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_comment: Metadata string extracted from image
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Parsed metadata with generation parameters
|
||||||
|
"""
|
||||||
|
if not user_comment:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initialize metadata dictionary
|
||||||
|
metadata = {}
|
||||||
|
|
||||||
|
# Split on Negative prompt if it exists
|
||||||
|
if "Negative prompt:" in user_comment:
|
||||||
|
parts = user_comment.split('Negative prompt:', 1)
|
||||||
|
prompt = parts[0].strip()
|
||||||
|
negative_and_params = parts[1] if len(parts) > 1 else ""
|
||||||
|
else:
|
||||||
|
# No negative prompt section
|
||||||
|
param_start = re.search(r'Steps: \d+', user_comment)
|
||||||
|
if param_start:
|
||||||
|
prompt = user_comment[:param_start.start()].strip()
|
||||||
|
negative_and_params = user_comment[param_start.start():]
|
||||||
|
else:
|
||||||
|
prompt = user_comment.strip()
|
||||||
|
negative_and_params = ""
|
||||||
|
|
||||||
|
# Add prompt if it's in GEN_PARAM_KEYS
|
||||||
|
if 'prompt' in GEN_PARAM_KEYS:
|
||||||
|
metadata['prompt'] = prompt
|
||||||
|
|
||||||
|
# Extract negative prompt and parameters
|
||||||
|
if negative_and_params:
|
||||||
|
# If we split on "Negative prompt:", check for params section
|
||||||
|
if "Negative prompt:" in user_comment:
|
||||||
|
param_start = re.search(r'Steps: ', negative_and_params)
|
||||||
|
if param_start:
|
||||||
|
neg_prompt = negative_and_params[:param_start.start()].strip()
|
||||||
|
if 'negative_prompt' in GEN_PARAM_KEYS:
|
||||||
|
metadata['negative_prompt'] = neg_prompt
|
||||||
|
params_section = negative_and_params[param_start.start():]
|
||||||
|
else:
|
||||||
|
if 'negative_prompt' in GEN_PARAM_KEYS:
|
||||||
|
metadata['negative_prompt'] = negative_and_params.strip()
|
||||||
|
params_section = ""
|
||||||
|
else:
|
||||||
|
# No negative prompt, entire section is params
|
||||||
|
params_section = negative_and_params
|
||||||
|
|
||||||
|
# Extract generation parameters
|
||||||
|
if params_section:
|
||||||
|
# Extract basic parameters
|
||||||
|
param_pattern = r'([A-Za-z\s]+): ([^,]+)'
|
||||||
|
params = re.findall(param_pattern, params_section)
|
||||||
|
|
||||||
|
for key, value in params:
|
||||||
|
clean_key = key.strip().lower().replace(' ', '_')
|
||||||
|
|
||||||
|
# Skip if not in recognized gen param keys
|
||||||
|
if clean_key not in GEN_PARAM_KEYS:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Convert numeric values
|
||||||
|
if clean_key in ['steps', 'seed']:
|
||||||
|
try:
|
||||||
|
metadata[clean_key] = int(value.strip())
|
||||||
|
except ValueError:
|
||||||
|
metadata[clean_key] = value.strip()
|
||||||
|
elif clean_key in ['cfg_scale']:
|
||||||
|
try:
|
||||||
|
metadata[clean_key] = float(value.strip())
|
||||||
|
except ValueError:
|
||||||
|
metadata[clean_key] = value.strip()
|
||||||
|
else:
|
||||||
|
metadata[clean_key] = value.strip()
|
||||||
|
|
||||||
|
# Extract size if available and add if a recognized key
|
||||||
|
size_match = re.search(r'Size: (\d+)x(\d+)', params_section)
|
||||||
|
if size_match and 'size' in GEN_PARAM_KEYS:
|
||||||
|
width, height = size_match.groups()
|
||||||
|
metadata['size'] = f"{width}x{height}"
|
||||||
|
|
||||||
|
# Return metadata if we have any entries
|
||||||
|
return metadata if metadata else None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error parsing image metadata: {e}", exc_info=True)
|
||||||
|
return None
|
||||||
318
py/utils/example_images_migration.py
Normal file
318
py/utils/example_images_migration.py
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
from ..services.settings_manager import settings
|
||||||
|
from ..services.service_registry import ServiceRegistry
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
|
from ..utils.example_images_processor import ExampleImagesProcessor
|
||||||
|
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CURRENT_NAMING_VERSION = 2 # Increment this when naming conventions change
|
||||||
|
|
||||||
|
class ExampleImagesMigration:
|
||||||
|
"""Handles migrations for example images naming conventions"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def check_and_run_migrations():
|
||||||
|
"""Check if migrations are needed and run them in background"""
|
||||||
|
example_images_path = settings.get('example_images_path')
|
||||||
|
if not example_images_path or not os.path.exists(example_images_path):
|
||||||
|
logger.debug("No example images path configured or path doesn't exist, skipping migrations")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check current version from progress file
|
||||||
|
current_version = 0
|
||||||
|
progress_file = os.path.join(example_images_path, '.download_progress.json')
|
||||||
|
if os.path.exists(progress_file):
|
||||||
|
try:
|
||||||
|
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||||
|
progress_data = json.load(f)
|
||||||
|
current_version = progress_data.get('naming_version', 0)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load progress file for migration check: {e}")
|
||||||
|
|
||||||
|
# If current version is less than target version, start migration
|
||||||
|
if current_version < CURRENT_NAMING_VERSION:
|
||||||
|
logger.info(f"Starting example images naming migration from v{current_version} to v{CURRENT_NAMING_VERSION}")
|
||||||
|
# Start migration in background task
|
||||||
|
asyncio.create_task(
|
||||||
|
ExampleImagesMigration.run_migrations(example_images_path, current_version, CURRENT_NAMING_VERSION)
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def run_migrations(example_images_path, from_version, to_version):
|
||||||
|
"""Run necessary migrations based on version difference"""
|
||||||
|
try:
|
||||||
|
# Get all model folders
|
||||||
|
model_folders = []
|
||||||
|
for item in os.listdir(example_images_path):
|
||||||
|
item_path = os.path.join(example_images_path, item)
|
||||||
|
if os.path.isdir(item_path) and len(item) == 64: # SHA256 hash is 64 chars
|
||||||
|
model_folders.append(item_path)
|
||||||
|
|
||||||
|
logger.info(f"Found {len(model_folders)} model folders to check for migration")
|
||||||
|
|
||||||
|
# Apply migrations sequentially
|
||||||
|
if from_version < 1 and to_version >= 1:
|
||||||
|
await ExampleImagesMigration._migrate_to_v1(model_folders)
|
||||||
|
|
||||||
|
if from_version < 2 and to_version >= 2:
|
||||||
|
await ExampleImagesMigration._migrate_to_v2(model_folders)
|
||||||
|
|
||||||
|
# Update version in progress file
|
||||||
|
progress_file = os.path.join(example_images_path, '.download_progress.json')
|
||||||
|
try:
|
||||||
|
progress_data = {}
|
||||||
|
if os.path.exists(progress_file):
|
||||||
|
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||||
|
progress_data = json.load(f)
|
||||||
|
|
||||||
|
progress_data['naming_version'] = to_version
|
||||||
|
|
||||||
|
with open(progress_file, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(progress_data, f, indent=2)
|
||||||
|
|
||||||
|
logger.info(f"Example images naming migration to v{to_version} completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to update version in progress file: {e}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during migration: {e}", exc_info=True)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _migrate_to_v1(model_folders):
|
||||||
|
"""Migrate from 1-based to 0-based indexing"""
|
||||||
|
count = 0
|
||||||
|
for folder in model_folders:
|
||||||
|
has_one_based = False
|
||||||
|
has_zero_based = False
|
||||||
|
files_to_rename = []
|
||||||
|
|
||||||
|
# Check naming pattern in this folder
|
||||||
|
for file in os.listdir(folder):
|
||||||
|
if re.match(r'image_1\.\w+$', file):
|
||||||
|
has_one_based = True
|
||||||
|
if re.match(r'image_0\.\w+$', file):
|
||||||
|
has_zero_based = True
|
||||||
|
|
||||||
|
# Only migrate folders with 1-based indexing and no 0-based
|
||||||
|
if has_one_based and not has_zero_based:
|
||||||
|
# Create rename mapping
|
||||||
|
for file in os.listdir(folder):
|
||||||
|
match = re.match(r'image_(\d+)\.(\w+)$', file)
|
||||||
|
if match:
|
||||||
|
index = int(match.group(1))
|
||||||
|
ext = match.group(2)
|
||||||
|
if index > 0: # Only rename if index is positive
|
||||||
|
files_to_rename.append((
|
||||||
|
file,
|
||||||
|
f"image_{index-1}.{ext}"
|
||||||
|
))
|
||||||
|
|
||||||
|
# Use temporary names to avoid conflicts
|
||||||
|
for old_name, new_name in files_to_rename:
|
||||||
|
old_path = os.path.join(folder, old_name)
|
||||||
|
temp_path = os.path.join(folder, f"temp_{old_name}")
|
||||||
|
try:
|
||||||
|
os.rename(old_path, temp_path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to rename {old_path} to {temp_path}: {e}")
|
||||||
|
|
||||||
|
# Rename from temporary names to final names
|
||||||
|
for old_name, new_name in files_to_rename:
|
||||||
|
temp_path = os.path.join(folder, f"temp_{old_name}")
|
||||||
|
new_path = os.path.join(folder, new_name)
|
||||||
|
try:
|
||||||
|
os.rename(temp_path, new_path)
|
||||||
|
logger.debug(f"Renamed {old_name} to {new_name} in {folder}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to rename {temp_path} to {new_path}: {e}")
|
||||||
|
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
# Give other tasks a chance to run
|
||||||
|
if count % 10 == 0:
|
||||||
|
await asyncio.sleep(0)
|
||||||
|
|
||||||
|
logger.info(f"Migrated {count} folders from 1-based to 0-based indexing")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _migrate_to_v2(model_folders):
|
||||||
|
"""
|
||||||
|
Migrate to v2 naming scheme:
|
||||||
|
- Move custom examples from images array to customImages array
|
||||||
|
- Rename files from image_<index>.<ext> to custom_<short_id>.<ext>
|
||||||
|
- Add id field to each custom image entry
|
||||||
|
"""
|
||||||
|
count = 0
|
||||||
|
updated_models = 0
|
||||||
|
migration_errors = 0
|
||||||
|
|
||||||
|
# Get scanner instances
|
||||||
|
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||||
|
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||||
|
|
||||||
|
# Wait until scanners are initialized
|
||||||
|
scanners = [lora_scanner, checkpoint_scanner]
|
||||||
|
for scanner in scanners:
|
||||||
|
if scanner.is_initializing():
|
||||||
|
logger.info("Waiting for scanners to complete initialization before starting migration...")
|
||||||
|
initialized = False
|
||||||
|
retry_count = 0
|
||||||
|
while not initialized and retry_count < 120: # Wait up to 120 seconds
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
initialized = not scanner.is_initializing()
|
||||||
|
retry_count += 1
|
||||||
|
|
||||||
|
if not initialized:
|
||||||
|
logger.warning("Scanner initialization timeout - proceeding with migration anyway")
|
||||||
|
|
||||||
|
logger.info(f"Starting migration to v2 naming scheme for {len(model_folders)} model folders")
|
||||||
|
|
||||||
|
for folder in model_folders:
|
||||||
|
try:
|
||||||
|
# Extract model hash from folder name
|
||||||
|
model_hash = os.path.basename(folder)
|
||||||
|
if not model_hash or len(model_hash) != 64:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Find the model in scanner cache
|
||||||
|
model_data = None
|
||||||
|
scanner = None
|
||||||
|
|
||||||
|
for scan_obj in scanners:
|
||||||
|
if scan_obj.has_hash(model_hash):
|
||||||
|
cache = await scan_obj.get_cached_data()
|
||||||
|
for item in cache.raw_data:
|
||||||
|
if item.get('sha256') == model_hash:
|
||||||
|
model_data = item
|
||||||
|
scanner = scan_obj
|
||||||
|
break
|
||||||
|
if model_data:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not model_data or not scanner:
|
||||||
|
logger.debug(f"Model with hash {model_hash} not found in cache, skipping migration")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Clone model data to avoid modifying the cache directly
|
||||||
|
model_metadata = model_data.copy()
|
||||||
|
|
||||||
|
# Check if model has civitai metadata
|
||||||
|
if not model_metadata.get('civitai'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get images array
|
||||||
|
images = model_metadata.get('civitai', {}).get('images', [])
|
||||||
|
if not images:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Initialize customImages array if it doesn't exist
|
||||||
|
if not model_metadata['civitai'].get('customImages'):
|
||||||
|
model_metadata['civitai']['customImages'] = []
|
||||||
|
|
||||||
|
# Find custom examples (entries with empty url)
|
||||||
|
custom_indices = []
|
||||||
|
for i, image in enumerate(images):
|
||||||
|
if image.get('url') == "":
|
||||||
|
custom_indices.append(i)
|
||||||
|
|
||||||
|
if not custom_indices:
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.debug(f"Found {len(custom_indices)} custom examples in {model_hash}")
|
||||||
|
|
||||||
|
# Process each custom example
|
||||||
|
for index in custom_indices:
|
||||||
|
try:
|
||||||
|
image_entry = images[index]
|
||||||
|
|
||||||
|
# Determine media type based on the entry type
|
||||||
|
media_type = 'videos' if image_entry.get('type') == 'video' else 'images'
|
||||||
|
extensions_to_try = SUPPORTED_MEDIA_EXTENSIONS[media_type]
|
||||||
|
|
||||||
|
# Find the image file by trying possible extensions
|
||||||
|
old_path = None
|
||||||
|
old_filename = None
|
||||||
|
found = False
|
||||||
|
|
||||||
|
for ext in extensions_to_try:
|
||||||
|
test_path = os.path.join(folder, f"image_{index}{ext}")
|
||||||
|
if os.path.exists(test_path):
|
||||||
|
old_path = test_path
|
||||||
|
old_filename = f"image_{index}{ext}"
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
logger.warning(f"Could not find file for index {index} in {model_hash}, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Generate short ID for the custom example
|
||||||
|
short_id = ExampleImagesProcessor.generate_short_id()
|
||||||
|
|
||||||
|
# Get file extension
|
||||||
|
file_ext = os.path.splitext(old_path)[1]
|
||||||
|
|
||||||
|
# Create new filename
|
||||||
|
new_filename = f"custom_{short_id}{file_ext}"
|
||||||
|
new_path = os.path.join(folder, new_filename)
|
||||||
|
|
||||||
|
# Rename the file
|
||||||
|
try:
|
||||||
|
os.rename(old_path, new_path)
|
||||||
|
logger.debug(f"Renamed {old_filename} to {new_filename} in {folder}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to rename {old_path} to {new_path}: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Create a copy of the image entry with the id field
|
||||||
|
custom_entry = image_entry.copy()
|
||||||
|
custom_entry['id'] = short_id
|
||||||
|
|
||||||
|
# Add to customImages array
|
||||||
|
model_metadata['civitai']['customImages'].append(custom_entry)
|
||||||
|
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error migrating custom example at index {index} for {model_hash}: {e}")
|
||||||
|
|
||||||
|
# Remove custom examples from the original images array
|
||||||
|
model_metadata['civitai']['images'] = [
|
||||||
|
img for i, img in enumerate(images) if i not in custom_indices
|
||||||
|
]
|
||||||
|
|
||||||
|
# Save the updated metadata
|
||||||
|
file_path = model_data.get('file_path')
|
||||||
|
if file_path:
|
||||||
|
try:
|
||||||
|
# Create a copy of model data without 'folder' field
|
||||||
|
model_copy = model_metadata.copy()
|
||||||
|
model_copy.pop('folder', None)
|
||||||
|
|
||||||
|
# Save metadata to file
|
||||||
|
await MetadataManager.save_metadata(file_path, model_copy)
|
||||||
|
|
||||||
|
# Update scanner cache
|
||||||
|
await scanner.update_single_model_cache(file_path, file_path, model_metadata)
|
||||||
|
|
||||||
|
updated_models += 1
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save metadata for {model_hash}: {e}")
|
||||||
|
migration_errors += 1
|
||||||
|
|
||||||
|
# Give other tasks a chance to run
|
||||||
|
if count % 10 == 0:
|
||||||
|
await asyncio.sleep(0)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error migrating folder {folder}: {e}")
|
||||||
|
migration_errors += 1
|
||||||
|
|
||||||
|
logger.info(f"Migration to v2 complete: migrated {count} custom examples across {updated_models} models with {migration_errors} errors")
|
||||||
494
py/utils/example_images_processor.py
Normal file
494
py/utils/example_images_processor.py
Normal file
@@ -0,0 +1,494 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import tempfile
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
from aiohttp import web
|
||||||
|
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
|
||||||
|
from ..services.service_registry import ServiceRegistry
|
||||||
|
from ..services.settings_manager import settings
|
||||||
|
from .example_images_metadata import MetadataUpdater
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class ExampleImagesProcessor:
|
||||||
|
"""Processes and manipulates example images"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def generate_short_id(length=8):
|
||||||
|
"""Generate a short random alphanumeric identifier"""
|
||||||
|
chars = string.ascii_lowercase + string.digits
|
||||||
|
return ''.join(random.choice(chars) for _ in range(length))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_civitai_optimized_url(image_url):
|
||||||
|
"""Convert Civitai image URL to its optimized WebP version"""
|
||||||
|
base_pattern = r'(https://image\.civitai\.com/[^/]+/[^/]+)'
|
||||||
|
match = re.match(base_pattern, image_url)
|
||||||
|
|
||||||
|
if match:
|
||||||
|
base_url = match.group(1)
|
||||||
|
return f"{base_url}/optimized=true/image.webp"
|
||||||
|
|
||||||
|
return image_url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, independent_session):
|
||||||
|
"""Download images for a single model
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (success, is_stale_metadata) - whether download was successful, whether metadata is stale
|
||||||
|
"""
|
||||||
|
model_success = True
|
||||||
|
|
||||||
|
for i, image in enumerate(model_images):
|
||||||
|
image_url = image.get('url')
|
||||||
|
if not image_url:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get image filename from URL
|
||||||
|
image_filename = os.path.basename(image_url.split('?')[0])
|
||||||
|
image_ext = os.path.splitext(image_filename)[1].lower()
|
||||||
|
|
||||||
|
# Handle images and videos
|
||||||
|
is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||||
|
is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||||
|
|
||||||
|
if not (is_image or is_video):
|
||||||
|
logger.debug(f"Skipping unsupported file type: {image_filename}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Use 0-based indexing instead of 1-based indexing
|
||||||
|
save_filename = f"image_{i}{image_ext}"
|
||||||
|
|
||||||
|
# If optimizing images and this is a Civitai image, use their pre-optimized WebP version
|
||||||
|
if is_image and optimize and 'civitai.com' in image_url:
|
||||||
|
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
|
||||||
|
save_filename = f"image_{i}.webp"
|
||||||
|
|
||||||
|
# Check if already downloaded
|
||||||
|
save_path = os.path.join(model_dir, save_filename)
|
||||||
|
if os.path.exists(save_path):
|
||||||
|
logger.debug(f"File already exists: {save_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Download the file
|
||||||
|
try:
|
||||||
|
logger.debug(f"Downloading {save_filename} for {model_name}")
|
||||||
|
|
||||||
|
# Download directly using the independent session
|
||||||
|
async with independent_session.get(image_url, timeout=60) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
with open(save_path, 'wb') as f:
|
||||||
|
async for chunk in response.content.iter_chunked(8192):
|
||||||
|
if chunk:
|
||||||
|
f.write(chunk)
|
||||||
|
elif response.status == 404:
|
||||||
|
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||||
|
logger.warning(error_msg)
|
||||||
|
model_success = False # Mark the model as failed due to 404 error
|
||||||
|
# Return early to trigger metadata refresh attempt
|
||||||
|
return False, True # (success, is_metadata_stale)
|
||||||
|
else:
|
||||||
|
error_msg = f"Failed to download file: {image_url}, status code: {response.status}"
|
||||||
|
logger.warning(error_msg)
|
||||||
|
model_success = False # Mark the model as failed
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Error downloading file {image_url}: {str(e)}"
|
||||||
|
logger.error(error_msg)
|
||||||
|
model_success = False # Mark the model as failed
|
||||||
|
|
||||||
|
return model_success, False # (success, is_metadata_stale)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def process_local_examples(model_file_path, model_file_name, model_name, model_dir, optimize):
|
||||||
|
"""Process local example images
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if local images were processed successfully, False otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not model_file_path or not os.path.exists(os.path.dirname(model_file_path)):
|
||||||
|
return False
|
||||||
|
|
||||||
|
model_dir_path = os.path.dirname(model_file_path)
|
||||||
|
local_images = []
|
||||||
|
|
||||||
|
# Look for files with pattern: filename.example.*.ext
|
||||||
|
if model_file_name:
|
||||||
|
example_prefix = f"{model_file_name}.example."
|
||||||
|
|
||||||
|
if os.path.exists(model_dir_path):
|
||||||
|
for file in os.listdir(model_dir_path):
|
||||||
|
file_lower = file.lower()
|
||||||
|
if file_lower.startswith(example_prefix.lower()):
|
||||||
|
file_ext = os.path.splitext(file_lower)[1]
|
||||||
|
is_supported = (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||||
|
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'])
|
||||||
|
|
||||||
|
if is_supported:
|
||||||
|
local_images.append(os.path.join(model_dir_path, file))
|
||||||
|
|
||||||
|
# Process local images if found
|
||||||
|
if local_images:
|
||||||
|
logger.info(f"Found {len(local_images)} local example images for {model_name}")
|
||||||
|
|
||||||
|
for local_image_path in local_images:
|
||||||
|
# Extract index from filename
|
||||||
|
file_name = os.path.basename(local_image_path)
|
||||||
|
example_prefix = f"{model_file_name}.example."
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Extract the part between '.example.' and the file extension
|
||||||
|
index_part = file_name[len(example_prefix):].split('.')[0]
|
||||||
|
# Try to parse it as an integer
|
||||||
|
index = int(index_part)
|
||||||
|
local_ext = os.path.splitext(local_image_path)[1].lower()
|
||||||
|
save_filename = f"image_{index}{local_ext}"
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
# If we can't parse the index, fall back to sequential numbering
|
||||||
|
logger.warning(f"Could not extract index from {file_name}, using sequential numbering")
|
||||||
|
local_ext = os.path.splitext(local_image_path)[1].lower()
|
||||||
|
save_filename = f"image_{len(local_images)}{local_ext}"
|
||||||
|
|
||||||
|
save_path = os.path.join(model_dir, save_filename)
|
||||||
|
|
||||||
|
# Skip if already exists in output directory
|
||||||
|
if os.path.exists(save_path):
|
||||||
|
logger.debug(f"File already exists in output: {save_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Copy the file
|
||||||
|
with open(local_image_path, 'rb') as src_file:
|
||||||
|
with open(save_path, 'wb') as dst_file:
|
||||||
|
dst_file.write(src_file.read())
|
||||||
|
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing local examples for {model_name}: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def import_images(request):
|
||||||
|
"""
|
||||||
|
Import local example images
|
||||||
|
|
||||||
|
Accepts:
|
||||||
|
- multipart/form-data form with model_hash and files fields
|
||||||
|
or
|
||||||
|
- JSON request with model_hash and file_paths
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- Success status and list of imported files
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
model_hash = None
|
||||||
|
files_to_import = []
|
||||||
|
temp_files_to_cleanup = []
|
||||||
|
|
||||||
|
# Check if it's a multipart form-data request (direct file upload)
|
||||||
|
if request.content_type and 'multipart/form-data' in request.content_type:
|
||||||
|
reader = await request.multipart()
|
||||||
|
|
||||||
|
# First get model_hash
|
||||||
|
field = await reader.next()
|
||||||
|
if field.name == 'model_hash':
|
||||||
|
model_hash = await field.text()
|
||||||
|
|
||||||
|
# Then process all files
|
||||||
|
while True:
|
||||||
|
field = await reader.next()
|
||||||
|
if field is None:
|
||||||
|
break
|
||||||
|
|
||||||
|
if field.name == 'files':
|
||||||
|
# Create a temporary file with appropriate suffix for type detection
|
||||||
|
file_name = field.filename
|
||||||
|
file_ext = os.path.splitext(file_name)[1].lower()
|
||||||
|
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as tmp_file:
|
||||||
|
temp_path = tmp_file.name
|
||||||
|
temp_files_to_cleanup.append(temp_path) # Track for cleanup
|
||||||
|
|
||||||
|
# Write chunks to the temporary file
|
||||||
|
while True:
|
||||||
|
chunk = await field.read_chunk()
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
tmp_file.write(chunk)
|
||||||
|
|
||||||
|
# Add to the list of files to process
|
||||||
|
files_to_import.append(temp_path)
|
||||||
|
else:
|
||||||
|
# Parse JSON request (legacy method using file paths)
|
||||||
|
data = await request.json()
|
||||||
|
model_hash = data.get('model_hash')
|
||||||
|
files_to_import = data.get('file_paths', [])
|
||||||
|
|
||||||
|
if not model_hash:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Missing model_hash parameter'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
if not files_to_import:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No files provided to import'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Get example images path
|
||||||
|
example_images_path = settings.get('example_images_path')
|
||||||
|
if not example_images_path:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No example images path configured'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Find the model and get current metadata
|
||||||
|
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||||
|
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||||
|
|
||||||
|
model_data = None
|
||||||
|
scanner = None
|
||||||
|
|
||||||
|
# Check both scanners to find the model
|
||||||
|
for scan_obj in [lora_scanner, checkpoint_scanner]:
|
||||||
|
cache = await scan_obj.get_cached_data()
|
||||||
|
for item in cache.raw_data:
|
||||||
|
if item.get('sha256') == model_hash:
|
||||||
|
model_data = item
|
||||||
|
scanner = scan_obj
|
||||||
|
break
|
||||||
|
if model_data:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not model_data:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Model with hash {model_hash} not found in cache"
|
||||||
|
}, status=404)
|
||||||
|
|
||||||
|
# Create model folder
|
||||||
|
model_folder = os.path.join(example_images_path, model_hash)
|
||||||
|
os.makedirs(model_folder, exist_ok=True)
|
||||||
|
|
||||||
|
imported_files = []
|
||||||
|
errors = []
|
||||||
|
newly_imported_paths = []
|
||||||
|
|
||||||
|
# Process each file path
|
||||||
|
for file_path in files_to_import:
|
||||||
|
try:
|
||||||
|
# Ensure the file exists
|
||||||
|
if not os.path.isfile(file_path):
|
||||||
|
errors.append(f"File not found: {file_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if file type is supported
|
||||||
|
file_ext = os.path.splitext(file_path)[1].lower()
|
||||||
|
if not (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
|
||||||
|
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
|
||||||
|
errors.append(f"Unsupported file type: {file_path}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Generate new filename using short ID instead of UUID
|
||||||
|
short_id = ExampleImagesProcessor.generate_short_id()
|
||||||
|
new_filename = f"custom_{short_id}{file_ext}"
|
||||||
|
|
||||||
|
dest_path = os.path.join(model_folder, new_filename)
|
||||||
|
|
||||||
|
# Copy the file
|
||||||
|
import shutil
|
||||||
|
shutil.copy2(file_path, dest_path)
|
||||||
|
# Store both the dest_path and the short_id
|
||||||
|
newly_imported_paths.append((dest_path, short_id))
|
||||||
|
|
||||||
|
# Add to imported files list
|
||||||
|
imported_files.append({
|
||||||
|
'name': new_filename,
|
||||||
|
'path': f'/example_images_static/{model_hash}/{new_filename}',
|
||||||
|
'extension': file_ext,
|
||||||
|
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
errors.append(f"Error importing {file_path}: {str(e)}")
|
||||||
|
|
||||||
|
# Update metadata with new example images
|
||||||
|
regular_images, custom_images = await MetadataUpdater.update_metadata_after_import(
|
||||||
|
model_hash,
|
||||||
|
model_data,
|
||||||
|
scanner,
|
||||||
|
newly_imported_paths
|
||||||
|
)
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': len(imported_files) > 0,
|
||||||
|
'message': f'Successfully imported {len(imported_files)} files' +
|
||||||
|
(f' with {len(errors)} errors' if errors else ''),
|
||||||
|
'files': imported_files,
|
||||||
|
'errors': errors,
|
||||||
|
'regular_images': regular_images,
|
||||||
|
'custom_images': custom_images,
|
||||||
|
"model_file_path": model_data.get('file_path', ''),
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to import example images: {e}", exc_info=True)
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': str(e)
|
||||||
|
}, status=500)
|
||||||
|
finally:
|
||||||
|
# Clean up temporary files
|
||||||
|
for temp_file in temp_files_to_cleanup:
|
||||||
|
try:
|
||||||
|
os.remove(temp_file)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to remove temporary file {temp_file}: {e}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def delete_custom_image(request):
|
||||||
|
"""
|
||||||
|
Delete a custom example image for a model
|
||||||
|
|
||||||
|
Accepts:
|
||||||
|
- JSON request with model_hash and short_id
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- Success status and updated image lists
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Parse request data
|
||||||
|
data = await request.json()
|
||||||
|
model_hash = data.get('model_hash')
|
||||||
|
short_id = data.get('short_id')
|
||||||
|
|
||||||
|
if not model_hash or not short_id:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'Missing required parameters: model_hash and short_id'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Get example images path
|
||||||
|
example_images_path = settings.get('example_images_path')
|
||||||
|
if not example_images_path:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': 'No example images path configured'
|
||||||
|
}, status=400)
|
||||||
|
|
||||||
|
# Find the model and get current metadata
|
||||||
|
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||||
|
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||||
|
|
||||||
|
model_data = None
|
||||||
|
scanner = None
|
||||||
|
|
||||||
|
# Check both scanners to find the model
|
||||||
|
for scan_obj in [lora_scanner, checkpoint_scanner]:
|
||||||
|
if scan_obj.has_hash(model_hash):
|
||||||
|
cache = await scan_obj.get_cached_data()
|
||||||
|
for item in cache.raw_data:
|
||||||
|
if item.get('sha256') == model_hash:
|
||||||
|
model_data = item
|
||||||
|
scanner = scan_obj
|
||||||
|
break
|
||||||
|
if model_data:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not model_data:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Model with hash {model_hash} not found in cache"
|
||||||
|
}, status=404)
|
||||||
|
|
||||||
|
# Check if model has custom images
|
||||||
|
if not model_data.get('civitai', {}).get('customImages'):
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Model has no custom images"
|
||||||
|
}, status=404)
|
||||||
|
|
||||||
|
# Find the custom image with matching short_id
|
||||||
|
custom_images = model_data['civitai']['customImages']
|
||||||
|
matching_image = None
|
||||||
|
new_custom_images = []
|
||||||
|
|
||||||
|
for image in custom_images:
|
||||||
|
if image.get('id') == short_id:
|
||||||
|
matching_image = image
|
||||||
|
else:
|
||||||
|
new_custom_images.append(image)
|
||||||
|
|
||||||
|
if not matching_image:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Custom image with id {short_id} not found"
|
||||||
|
}, status=404)
|
||||||
|
|
||||||
|
# Find and delete the actual file
|
||||||
|
model_folder = os.path.join(example_images_path, model_hash)
|
||||||
|
file_deleted = False
|
||||||
|
|
||||||
|
if os.path.exists(model_folder):
|
||||||
|
for filename in os.listdir(model_folder):
|
||||||
|
if f"custom_{short_id}" in filename:
|
||||||
|
file_path = os.path.join(model_folder, filename)
|
||||||
|
try:
|
||||||
|
os.remove(file_path)
|
||||||
|
file_deleted = True
|
||||||
|
logger.info(f"Deleted custom example file: {file_path}")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Failed to delete file: {str(e)}"
|
||||||
|
}, status=500)
|
||||||
|
|
||||||
|
if not file_deleted:
|
||||||
|
logger.warning(f"File for custom example with id {short_id} not found, but metadata will still be updated")
|
||||||
|
|
||||||
|
# Update metadata
|
||||||
|
model_data['civitai']['customImages'] = new_custom_images
|
||||||
|
|
||||||
|
# Save updated metadata to file
|
||||||
|
file_path = model_data.get('file_path')
|
||||||
|
if file_path:
|
||||||
|
try:
|
||||||
|
# Create a copy of model data without 'folder' field
|
||||||
|
model_copy = model_data.copy()
|
||||||
|
model_copy.pop('folder', None)
|
||||||
|
|
||||||
|
# Write metadata to file
|
||||||
|
await MetadataManager.save_metadata(file_path, model_copy)
|
||||||
|
logger.debug(f"Saved updated metadata for {model_data.get('model_name')}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save metadata: {str(e)}")
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': f"Failed to save metadata: {str(e)}"
|
||||||
|
}, status=500)
|
||||||
|
|
||||||
|
# Update cache
|
||||||
|
await scanner.update_single_model_cache(file_path, file_path, model_data)
|
||||||
|
|
||||||
|
# Get regular images array (might be None)
|
||||||
|
regular_images = model_data['civitai'].get('images', [])
|
||||||
|
|
||||||
|
return web.json_response({
|
||||||
|
'success': True,
|
||||||
|
'regular_images': regular_images,
|
||||||
|
'custom_images': new_custom_images,
|
||||||
|
'model_file_path': model_data.get('file_path', '')
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to delete custom example image: {e}", exc_info=True)
|
||||||
|
return web.json_response({
|
||||||
|
'success': False,
|
||||||
|
'error': str(e)
|
||||||
|
}, status=500)
|
||||||
@@ -63,199 +63,4 @@ def find_preview_file(base_name: str, dir_path: str) -> str:
|
|||||||
|
|
||||||
def normalize_path(path: str) -> str:
|
def normalize_path(path: str) -> str:
|
||||||
"""Normalize file path to use forward slashes"""
|
"""Normalize file path to use forward slashes"""
|
||||||
return path.replace(os.sep, "/") if path else path
|
return path.replace(os.sep, "/") if path else path
|
||||||
|
|
||||||
async def get_file_info(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
|
||||||
"""Get basic file information as a model metadata object"""
|
|
||||||
# First check if file actually exists and resolve symlinks
|
|
||||||
try:
|
|
||||||
real_path = os.path.realpath(file_path)
|
|
||||||
if not os.path.exists(real_path):
|
|
||||||
return None
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error checking file existence for {file_path}: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
|
||||||
dir_path = os.path.dirname(file_path)
|
|
||||||
|
|
||||||
preview_url = find_preview_file(base_name, dir_path)
|
|
||||||
|
|
||||||
# Check if a .json file exists with SHA256 hash to avoid recalculation
|
|
||||||
json_path = f"{os.path.splitext(file_path)[0]}.json"
|
|
||||||
sha256 = None
|
|
||||||
if os.path.exists(json_path):
|
|
||||||
try:
|
|
||||||
with open(json_path, 'r', encoding='utf-8') as f:
|
|
||||||
json_data = json.load(f)
|
|
||||||
if 'sha256' in json_data:
|
|
||||||
sha256 = json_data['sha256'].lower()
|
|
||||||
logger.debug(f"Using SHA256 from .json file for {file_path}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error reading .json file for {file_path}: {e}")
|
|
||||||
|
|
||||||
# If SHA256 is still not found, check for a .sha256 file
|
|
||||||
if sha256 is None:
|
|
||||||
sha256_file = f"{os.path.splitext(file_path)[0]}.sha256"
|
|
||||||
if os.path.exists(sha256_file):
|
|
||||||
try:
|
|
||||||
with open(sha256_file, 'r', encoding='utf-8') as f:
|
|
||||||
sha256 = f.read().strip().lower()
|
|
||||||
logger.debug(f"Using SHA256 from .sha256 file for {file_path}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error reading .sha256 file for {file_path}: {e}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# If we didn't get SHA256 from the .json file, calculate it
|
|
||||||
if not sha256:
|
|
||||||
start_time = time.time()
|
|
||||||
sha256 = await calculate_sha256(real_path)
|
|
||||||
logger.debug(f"Calculated SHA256 for {file_path} in {time.time() - start_time:.2f} seconds")
|
|
||||||
|
|
||||||
# Create default metadata based on model class
|
|
||||||
if model_class == CheckpointMetadata:
|
|
||||||
metadata = CheckpointMetadata(
|
|
||||||
file_name=base_name,
|
|
||||||
model_name=base_name,
|
|
||||||
file_path=normalize_path(file_path),
|
|
||||||
size=os.path.getsize(real_path),
|
|
||||||
modified=os.path.getmtime(real_path),
|
|
||||||
sha256=sha256,
|
|
||||||
base_model="Unknown", # Will be updated later
|
|
||||||
preview_url=normalize_path(preview_url),
|
|
||||||
tags=[],
|
|
||||||
modelDescription="",
|
|
||||||
model_type="checkpoint"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract checkpoint-specific metadata
|
|
||||||
# model_info = await extract_checkpoint_metadata(real_path)
|
|
||||||
# metadata.base_model = model_info['base_model']
|
|
||||||
# if 'model_type' in model_info:
|
|
||||||
# metadata.model_type = model_info['model_type']
|
|
||||||
|
|
||||||
else: # Default to LoraMetadata
|
|
||||||
metadata = LoraMetadata(
|
|
||||||
file_name=base_name,
|
|
||||||
model_name=base_name,
|
|
||||||
file_path=normalize_path(file_path),
|
|
||||||
size=os.path.getsize(real_path),
|
|
||||||
modified=os.path.getmtime(real_path),
|
|
||||||
sha256=sha256,
|
|
||||||
base_model="Unknown", # Will be updated later
|
|
||||||
usage_tips="{}",
|
|
||||||
preview_url=normalize_path(preview_url),
|
|
||||||
tags=[],
|
|
||||||
modelDescription=""
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract lora-specific metadata
|
|
||||||
model_info = await extract_lora_metadata(real_path)
|
|
||||||
metadata.base_model = model_info['base_model']
|
|
||||||
|
|
||||||
# Save metadata to file
|
|
||||||
await save_metadata(file_path, metadata)
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error getting file info for {file_path}: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def save_metadata(file_path: str, metadata: BaseModelMetadata) -> None:
|
|
||||||
"""Save metadata to .metadata.json file"""
|
|
||||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
|
||||||
try:
|
|
||||||
metadata_dict = metadata.to_dict()
|
|
||||||
metadata_dict['file_path'] = normalize_path(metadata_dict['file_path'])
|
|
||||||
metadata_dict['preview_url'] = normalize_path(metadata_dict['preview_url'])
|
|
||||||
|
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(metadata_dict, f, indent=2, ensure_ascii=False)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error saving metadata to {metadata_path}: {str(e)}")
|
|
||||||
|
|
||||||
async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
|
||||||
"""Load metadata from .metadata.json file"""
|
|
||||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
|
||||||
try:
|
|
||||||
if os.path.exists(metadata_path):
|
|
||||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
|
|
||||||
needs_update = False
|
|
||||||
|
|
||||||
# Check and normalize base model name
|
|
||||||
normalized_base_model = determine_base_model(data['base_model'])
|
|
||||||
if data['base_model'] != normalized_base_model:
|
|
||||||
data['base_model'] = normalized_base_model
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
# Compare paths without extensions
|
|
||||||
stored_path_base = os.path.splitext(data['file_path'])[0]
|
|
||||||
current_path_base = os.path.splitext(normalize_path(file_path))[0]
|
|
||||||
if stored_path_base != current_path_base:
|
|
||||||
data['file_path'] = normalize_path(file_path)
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
# TODO: optimize preview image to webp format if not already done
|
|
||||||
preview_url = data.get('preview_url', '')
|
|
||||||
if not preview_url or not os.path.exists(preview_url):
|
|
||||||
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
|
||||||
dir_path = os.path.dirname(file_path)
|
|
||||||
new_preview_url = normalize_path(find_preview_file(base_name, dir_path))
|
|
||||||
if new_preview_url != preview_url:
|
|
||||||
data['preview_url'] = new_preview_url
|
|
||||||
needs_update = True
|
|
||||||
else:
|
|
||||||
if stored_path_base != current_path_base:
|
|
||||||
# If model location changed, update preview path by replacing old path with new path
|
|
||||||
preview_file = os.path.basename(preview_url)
|
|
||||||
new_preview_url = os.path.join(os.path.dirname(file_path), preview_file)
|
|
||||||
data['preview_url'] = normalize_path(new_preview_url)
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
# Ensure all fields are present
|
|
||||||
if 'tags' not in data:
|
|
||||||
data['tags'] = []
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
if 'modelDescription' not in data:
|
|
||||||
data['modelDescription'] = ""
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
# For checkpoint metadata
|
|
||||||
if model_class == CheckpointMetadata and 'model_type' not in data:
|
|
||||||
data['model_type'] = "checkpoint"
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
# For lora metadata
|
|
||||||
if model_class == LoraMetadata and 'usage_tips' not in data:
|
|
||||||
data['usage_tips'] = "{}"
|
|
||||||
needs_update = True
|
|
||||||
|
|
||||||
# Update preview_nsfw_level if needed
|
|
||||||
civitai_data = data.get('civitai', {})
|
|
||||||
civitai_images = civitai_data.get('images', []) if civitai_data else []
|
|
||||||
if (data.get('preview_url') and
|
|
||||||
data.get('preview_nsfw_level', 0) == 0 and
|
|
||||||
civitai_images and
|
|
||||||
civitai_images[0].get('nsfwLevel', 0) != 0):
|
|
||||||
data['preview_nsfw_level'] = civitai_images[0]['nsfwLevel']
|
|
||||||
# TODO: write to metadata file
|
|
||||||
# needs_update = True
|
|
||||||
|
|
||||||
if needs_update:
|
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
return model_class.from_dict(data)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error loading metadata from {metadata_path}: {str(e)}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def update_civitai_metadata(file_path: str, civitai_data: Dict) -> None:
|
|
||||||
"""Update metadata file with Civitai data"""
|
|
||||||
metadata = await load_metadata(file_path)
|
|
||||||
metadata['civitai'] = civitai_data
|
|
||||||
await save_metadata(file_path, metadata)
|
|
||||||
275
py/utils/metadata_manager.py
Normal file
275
py/utils/metadata_manager.py
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import shutil
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Optional, Type, Union
|
||||||
|
|
||||||
|
from .models import BaseModelMetadata, LoraMetadata
|
||||||
|
from .file_utils import normalize_path, find_preview_file, calculate_sha256
|
||||||
|
from .lora_metadata import extract_lora_metadata, extract_checkpoint_metadata
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class MetadataManager:
|
||||||
|
"""
|
||||||
|
Centralized manager for all metadata operations.
|
||||||
|
|
||||||
|
This class is responsible for:
|
||||||
|
1. Loading metadata safely with fallback mechanisms
|
||||||
|
2. Saving metadata with atomic operations and backups
|
||||||
|
3. Creating default metadata for models
|
||||||
|
4. Handling unknown fields gracefully
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||||
|
"""
|
||||||
|
Load metadata with robust error handling and data preservation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path to the model file
|
||||||
|
model_class: Class to instantiate (LoraMetadata, CheckpointMetadata, etc.)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BaseModelMetadata instance or None if file doesn't exist
|
||||||
|
"""
|
||||||
|
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||||
|
backup_path = f"{metadata_path}.bak"
|
||||||
|
|
||||||
|
# Try loading the main metadata file
|
||||||
|
if os.path.exists(metadata_path):
|
||||||
|
try:
|
||||||
|
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
# Create model instance
|
||||||
|
metadata = model_class.from_dict(data)
|
||||||
|
|
||||||
|
# Normalize paths
|
||||||
|
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# JSON parsing error - try to restore from backup
|
||||||
|
logger.warning(f"Invalid JSON in metadata file: {metadata_path}")
|
||||||
|
return await MetadataManager._restore_from_backup(backup_path, file_path, model_class)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Other errors might be due to unknown fields or schema changes
|
||||||
|
logger.error(f"Error loading metadata from {metadata_path}: {str(e)}")
|
||||||
|
return await MetadataManager._restore_from_backup(backup_path, file_path, model_class)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _restore_from_backup(backup_path: str, file_path: str, model_class: Type[BaseModelMetadata]) -> Optional[BaseModelMetadata]:
|
||||||
|
"""
|
||||||
|
Try to restore metadata from backup file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backup_path: Path to backup file
|
||||||
|
file_path: Path to the original model file
|
||||||
|
model_class: Class to instantiate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BaseModelMetadata instance or None if restoration fails
|
||||||
|
"""
|
||||||
|
if os.path.exists(backup_path):
|
||||||
|
try:
|
||||||
|
logger.info(f"Attempting to restore metadata from backup: {backup_path}")
|
||||||
|
with open(backup_path, 'r', encoding='utf-8') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
# Process data similarly to normal loading
|
||||||
|
metadata = model_class.from_dict(data)
|
||||||
|
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||||
|
return metadata
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to restore from backup: {str(e)}")
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def save_metadata(path: str, metadata: Union[BaseModelMetadata, Dict], create_backup: bool = True) -> bool:
|
||||||
|
"""
|
||||||
|
Save metadata with atomic write operations and backup creation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to the model file or directly to the metadata file
|
||||||
|
metadata: Metadata to save (either BaseModelMetadata object or dict)
|
||||||
|
create_backup: Whether to create a backup of existing file
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: Success or failure
|
||||||
|
"""
|
||||||
|
# Determine if the input is a metadata path or a model file path
|
||||||
|
if path.endswith('.metadata.json'):
|
||||||
|
metadata_path = path
|
||||||
|
else:
|
||||||
|
# Use existing logic for model file paths
|
||||||
|
file_path = path
|
||||||
|
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||||
|
temp_path = f"{metadata_path}.tmp"
|
||||||
|
backup_path = f"{metadata_path}.bak"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create backup if requested and file exists
|
||||||
|
if create_backup and os.path.exists(metadata_path):
|
||||||
|
try:
|
||||||
|
shutil.copy2(metadata_path, backup_path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to create metadata backup: {str(e)}")
|
||||||
|
|
||||||
|
# Convert to dict if needed
|
||||||
|
if isinstance(metadata, BaseModelMetadata):
|
||||||
|
metadata_dict = metadata.to_dict()
|
||||||
|
# Preserve unknown fields if present
|
||||||
|
if hasattr(metadata, '_unknown_fields'):
|
||||||
|
metadata_dict.update(metadata._unknown_fields)
|
||||||
|
else:
|
||||||
|
metadata_dict = metadata.copy()
|
||||||
|
|
||||||
|
# Normalize paths
|
||||||
|
if 'file_path' in metadata_dict:
|
||||||
|
metadata_dict['file_path'] = normalize_path(metadata_dict['file_path'])
|
||||||
|
if 'preview_url' in metadata_dict:
|
||||||
|
metadata_dict['preview_url'] = normalize_path(metadata_dict['preview_url'])
|
||||||
|
|
||||||
|
# Write to temporary file first
|
||||||
|
with open(temp_path, 'w', encoding='utf-8') as f:
|
||||||
|
json.dump(metadata_dict, f, indent=2, ensure_ascii=False)
|
||||||
|
|
||||||
|
# Atomic rename operation
|
||||||
|
os.replace(temp_path, metadata_path)
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error saving metadata to {metadata_path}: {str(e)}")
|
||||||
|
# Clean up temporary file if it exists
|
||||||
|
if os.path.exists(temp_path):
|
||||||
|
try:
|
||||||
|
os.remove(temp_path)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def create_default_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||||
|
"""
|
||||||
|
Create basic metadata structure for a model file.
|
||||||
|
This replaces the old get_file_info function with a more appropriately named method.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file_path: Path to the model file
|
||||||
|
model_class: Class to instantiate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BaseModelMetadata instance or None if file doesn't exist
|
||||||
|
"""
|
||||||
|
# First check if file actually exists and resolve symlinks
|
||||||
|
try:
|
||||||
|
real_path = os.path.realpath(file_path)
|
||||||
|
if not os.path.exists(real_path):
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error checking file existence for {file_path}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||||
|
dir_path = os.path.dirname(file_path)
|
||||||
|
|
||||||
|
# Find preview image
|
||||||
|
preview_url = find_preview_file(base_name, dir_path)
|
||||||
|
|
||||||
|
# Calculate file hash
|
||||||
|
sha256 = await calculate_sha256(real_path)
|
||||||
|
|
||||||
|
# Create instance based on model type
|
||||||
|
if model_class.__name__ == "CheckpointMetadata":
|
||||||
|
metadata = model_class(
|
||||||
|
file_name=base_name,
|
||||||
|
model_name=base_name,
|
||||||
|
file_path=normalize_path(file_path),
|
||||||
|
size=os.path.getsize(real_path),
|
||||||
|
modified=os.path.getmtime(real_path),
|
||||||
|
sha256=sha256,
|
||||||
|
base_model="Unknown",
|
||||||
|
preview_url=normalize_path(preview_url),
|
||||||
|
tags=[],
|
||||||
|
modelDescription="",
|
||||||
|
model_type="checkpoint",
|
||||||
|
from_civitai=False
|
||||||
|
)
|
||||||
|
else: # Default to LoraMetadata
|
||||||
|
metadata = model_class(
|
||||||
|
file_name=base_name,
|
||||||
|
model_name=base_name,
|
||||||
|
file_path=normalize_path(file_path),
|
||||||
|
size=os.path.getsize(real_path),
|
||||||
|
modified=os.path.getmtime(real_path),
|
||||||
|
sha256=sha256,
|
||||||
|
base_model="Unknown",
|
||||||
|
preview_url=normalize_path(preview_url),
|
||||||
|
tags=[],
|
||||||
|
modelDescription="",
|
||||||
|
from_civitai=False,
|
||||||
|
usage_tips="{}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try to extract model-specific metadata
|
||||||
|
await MetadataManager._enrich_metadata(metadata, real_path)
|
||||||
|
|
||||||
|
# Save the created metadata
|
||||||
|
await MetadataManager.save_metadata(file_path, metadata, create_backup=False)
|
||||||
|
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating default metadata for {file_path}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _enrich_metadata(metadata: BaseModelMetadata, file_path: str) -> None:
|
||||||
|
"""
|
||||||
|
Enrich metadata with model-specific information
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metadata: Metadata to enrich
|
||||||
|
file_path: Path to the model file
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if metadata.__class__.__name__ == "LoraMetadata":
|
||||||
|
model_info = await extract_lora_metadata(file_path)
|
||||||
|
metadata.base_model = model_info['base_model']
|
||||||
|
|
||||||
|
elif metadata.__class__.__name__ == "CheckpointMetadata":
|
||||||
|
model_info = await extract_checkpoint_metadata(file_path)
|
||||||
|
metadata.base_model = model_info['base_model']
|
||||||
|
if 'model_type' in model_info:
|
||||||
|
metadata.model_type = model_info['model_type']
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error enriching metadata: {str(e)}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
async def _normalize_metadata_paths(metadata: BaseModelMetadata, file_path: str) -> None:
|
||||||
|
"""
|
||||||
|
Normalize paths in metadata object
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metadata: Metadata object to update
|
||||||
|
file_path: Current file path for the model
|
||||||
|
"""
|
||||||
|
# Check if file path is different from what's in metadata
|
||||||
|
if normalize_path(file_path) != metadata.file_path:
|
||||||
|
metadata.file_path = normalize_path(file_path)
|
||||||
|
|
||||||
|
# Check if preview exists at the current location
|
||||||
|
preview_url = metadata.preview_url
|
||||||
|
if preview_url and not os.path.exists(preview_url):
|
||||||
|
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||||
|
dir_path = os.path.dirname(file_path)
|
||||||
|
new_preview_url = find_preview_file(base_name, dir_path)
|
||||||
|
if new_preview_url:
|
||||||
|
metadata.preview_url = normalize_path(new_preview_url)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from dataclasses import dataclass, asdict
|
from dataclasses import dataclass, asdict, field
|
||||||
from typing import Dict, Optional, List
|
from typing import Dict, Optional, List, Any
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import os
|
import os
|
||||||
from .model_utils import determine_base_model
|
from .model_utils import determine_base_model
|
||||||
@@ -24,6 +24,7 @@ class BaseModelMetadata:
|
|||||||
civitai_deleted: bool = False # Whether deleted from Civitai
|
civitai_deleted: bool = False # Whether deleted from Civitai
|
||||||
favorite: bool = False # Whether the model is a favorite
|
favorite: bool = False # Whether the model is a favorite
|
||||||
exclude: bool = False # Whether to exclude this model from the cache
|
exclude: bool = False # Whether to exclude this model from the cache
|
||||||
|
_unknown_fields: Dict[str, Any] = field(default_factory=dict, repr=False, compare=False) # Store unknown fields
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
# Initialize empty lists to avoid mutable default parameter issue
|
# Initialize empty lists to avoid mutable default parameter issue
|
||||||
@@ -34,11 +35,43 @@ class BaseModelMetadata:
|
|||||||
def from_dict(cls, data: Dict) -> 'BaseModelMetadata':
|
def from_dict(cls, data: Dict) -> 'BaseModelMetadata':
|
||||||
"""Create instance from dictionary"""
|
"""Create instance from dictionary"""
|
||||||
data_copy = data.copy()
|
data_copy = data.copy()
|
||||||
return cls(**data_copy)
|
|
||||||
|
# Use cached fields if available, otherwise compute them
|
||||||
|
if not hasattr(cls, '_known_fields_cache'):
|
||||||
|
known_fields = set()
|
||||||
|
for c in cls.mro():
|
||||||
|
if hasattr(c, '__annotations__'):
|
||||||
|
known_fields.update(c.__annotations__.keys())
|
||||||
|
cls._known_fields_cache = known_fields
|
||||||
|
|
||||||
|
known_fields = cls._known_fields_cache
|
||||||
|
|
||||||
|
# Extract fields that match our class attributes
|
||||||
|
fields_to_use = {k: v for k, v in data_copy.items() if k in known_fields}
|
||||||
|
|
||||||
|
# Store unknown fields separately
|
||||||
|
unknown_fields = {k: v for k, v in data_copy.items() if k not in known_fields and not k.startswith('_')}
|
||||||
|
|
||||||
|
# Create instance with known fields
|
||||||
|
instance = cls(**fields_to_use)
|
||||||
|
|
||||||
|
# Add unknown fields as a separate attribute
|
||||||
|
instance._unknown_fields = unknown_fields
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
def to_dict(self) -> Dict:
|
def to_dict(self) -> Dict:
|
||||||
"""Convert to dictionary for JSON serialization"""
|
"""Convert to dictionary for JSON serialization"""
|
||||||
return asdict(self)
|
result = asdict(self)
|
||||||
|
|
||||||
|
# Remove private fields
|
||||||
|
result = {k: v for k, v in result.items() if not k.startswith('_')}
|
||||||
|
|
||||||
|
# Add back unknown fields if they exist
|
||||||
|
if hasattr(self, '_unknown_fields'):
|
||||||
|
result.update(self._unknown_fields)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def modified_datetime(self) -> datetime:
|
def modified_datetime(self) -> datetime:
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
|||||||
from ..config import config
|
from ..config import config
|
||||||
from ..services.civitai_client import CivitaiClient
|
from ..services.civitai_client import CivitaiClient
|
||||||
from ..utils.exif_utils import ExifUtils
|
from ..utils.exif_utils import ExifUtils
|
||||||
|
from ..utils.metadata_manager import MetadataManager
|
||||||
from ..services.download_manager import DownloadManager
|
from ..services.download_manager import DownloadManager
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -32,14 +33,29 @@ class ModelRouteUtils:
|
|||||||
async def handle_not_found_on_civitai(metadata_path: str, local_metadata: Dict) -> None:
|
async def handle_not_found_on_civitai(metadata_path: str, local_metadata: Dict) -> None:
|
||||||
"""Handle case when model is not found on CivitAI"""
|
"""Handle case when model is not found on CivitAI"""
|
||||||
local_metadata['from_civitai'] = False
|
local_metadata['from_civitai'] = False
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(metadata_path, local_metadata)
|
||||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def update_model_metadata(metadata_path: str, local_metadata: Dict,
|
async def update_model_metadata(metadata_path: str, local_metadata: Dict,
|
||||||
civitai_metadata: Dict, client: CivitaiClient) -> None:
|
civitai_metadata: Dict, client: CivitaiClient) -> None:
|
||||||
"""Update local metadata with CivitAI data"""
|
"""Update local metadata with CivitAI data"""
|
||||||
local_metadata['civitai'] = civitai_metadata
|
# Save existing trainedWords and customImages if they exist
|
||||||
|
existing_civitai = local_metadata.get('civitai') or {} # Use empty dict if None
|
||||||
|
|
||||||
|
# Create a new civitai metadata by updating existing with new
|
||||||
|
merged_civitai = existing_civitai.copy()
|
||||||
|
merged_civitai.update(civitai_metadata)
|
||||||
|
|
||||||
|
# Special handling for trainedWords - ensure we don't lose any existing trained words
|
||||||
|
if 'trainedWords' in existing_civitai:
|
||||||
|
existing_trained_words = existing_civitai.get('trainedWords', [])
|
||||||
|
new_trained_words = civitai_metadata.get('trainedWords', [])
|
||||||
|
# Use a set to combine words without duplicates, then convert back to list
|
||||||
|
merged_trained_words = list(set(existing_trained_words + new_trained_words))
|
||||||
|
merged_civitai['trainedWords'] = merged_trained_words
|
||||||
|
|
||||||
|
# Update local metadata with merged civitai data
|
||||||
|
local_metadata['civitai'] = merged_civitai
|
||||||
local_metadata['from_civitai'] = True
|
local_metadata['from_civitai'] = True
|
||||||
|
|
||||||
# Update model name if available
|
# Update model name if available
|
||||||
@@ -138,8 +154,7 @@ class ModelRouteUtils:
|
|||||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||||
|
|
||||||
# Save updated metadata
|
# Save updated metadata
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(metadata_path, local_metadata)
|
||||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def fetch_and_update_model(
|
async def fetch_and_update_model(
|
||||||
@@ -177,8 +192,7 @@ class ModelRouteUtils:
|
|||||||
# Mark as not from CivitAI if not found
|
# Mark as not from CivitAI if not found
|
||||||
local_metadata['from_civitai'] = False
|
local_metadata['from_civitai'] = False
|
||||||
model_data['from_civitai'] = False
|
model_data['from_civitai'] = False
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(file_path, local_metadata)
|
||||||
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Update metadata
|
# Update metadata
|
||||||
@@ -221,7 +235,7 @@ class ModelRouteUtils:
|
|||||||
fields = [
|
fields = [
|
||||||
"id", "modelId", "name", "createdAt", "updatedAt",
|
"id", "modelId", "name", "createdAt", "updatedAt",
|
||||||
"publishedAt", "trainedWords", "baseModel", "description",
|
"publishedAt", "trainedWords", "baseModel", "description",
|
||||||
"model", "images", "creator"
|
"model", "images", "customImages", "creator"
|
||||||
]
|
]
|
||||||
return {k: data[k] for k in fields if k in data}
|
return {k: data[k] for k in fields if k in data}
|
||||||
|
|
||||||
@@ -270,10 +284,12 @@ class ModelRouteUtils:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_multipart_ext(filename):
|
def get_multipart_ext(filename):
|
||||||
"""Get extension that may have multiple parts like .metadata.json"""
|
"""Get extension that may have multiple parts like .metadata.json or .metadata.json.bak"""
|
||||||
parts = filename.split(".")
|
parts = filename.split(".")
|
||||||
if len(parts) > 2: # If contains multi-part extension
|
if len(parts) == 3: # If contains 2-part extension
|
||||||
return "." + ".".join(parts[-2:]) # Take the last two parts, like ".metadata.json"
|
return "." + ".".join(parts[-2:]) # Take the last two parts, like ".metadata.json"
|
||||||
|
elif len(parts) >= 4: # If contains 3-part or more extensions
|
||||||
|
return "." + ".".join(parts[-3:]) # Take the last three parts, like ".metadata.json.bak"
|
||||||
return os.path.splitext(filename)[1] # Otherwise take the regular extension, like ".safetensors"
|
return os.path.splitext(filename)[1] # Otherwise take the regular extension, like ".safetensors"
|
||||||
|
|
||||||
# New common endpoint handlers
|
# New common endpoint handlers
|
||||||
@@ -393,6 +409,15 @@ class ModelRouteUtils:
|
|||||||
raise ValueError("Expected 'model_path' field")
|
raise ValueError("Expected 'model_path' field")
|
||||||
model_path = (await field.read()).decode()
|
model_path = (await field.read()).decode()
|
||||||
|
|
||||||
|
# Read NSFW level (new parameter)
|
||||||
|
nsfw_level = 0 # Default to 0 (unknown)
|
||||||
|
field = await reader.next()
|
||||||
|
if field and field.name == 'nsfw_level':
|
||||||
|
try:
|
||||||
|
nsfw_level = int((await field.read()).decode())
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
logger.warning("Invalid NSFW level format, using default 0")
|
||||||
|
|
||||||
# Save preview file
|
# Save preview file
|
||||||
base_name = os.path.splitext(os.path.basename(model_path))[0]
|
base_name = os.path.splitext(os.path.basename(model_path))[0]
|
||||||
folder = os.path.dirname(model_path)
|
folder = os.path.dirname(model_path)
|
||||||
@@ -413,33 +438,43 @@ class ModelRouteUtils:
|
|||||||
)
|
)
|
||||||
extension = '.webp' # Use .webp without .preview part
|
extension = '.webp' # Use .webp without .preview part
|
||||||
|
|
||||||
|
# Delete any existing preview files for this model
|
||||||
|
for ext in PREVIEW_EXTENSIONS:
|
||||||
|
existing_preview = os.path.join(folder, base_name + ext)
|
||||||
|
if os.path.exists(existing_preview):
|
||||||
|
try:
|
||||||
|
os.remove(existing_preview)
|
||||||
|
logger.debug(f"Deleted existing preview: {existing_preview}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to delete existing preview {existing_preview}: {e}")
|
||||||
|
|
||||||
preview_path = os.path.join(folder, base_name + extension).replace(os.sep, '/')
|
preview_path = os.path.join(folder, base_name + extension).replace(os.sep, '/')
|
||||||
|
|
||||||
with open(preview_path, 'wb') as f:
|
with open(preview_path, 'wb') as f:
|
||||||
f.write(optimized_data)
|
f.write(optimized_data)
|
||||||
|
|
||||||
# Update preview path in metadata
|
# Update preview path and NSFW level in metadata
|
||||||
metadata_path = os.path.splitext(model_path)[0] + '.metadata.json'
|
metadata_path = os.path.splitext(model_path)[0] + '.metadata.json'
|
||||||
if os.path.exists(metadata_path):
|
if os.path.exists(metadata_path):
|
||||||
try:
|
try:
|
||||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||||
metadata = json.load(f)
|
metadata = json.load(f)
|
||||||
|
|
||||||
# Update preview_url directly in the metadata dict
|
# Update preview_url and preview_nsfw_level in the metadata dict
|
||||||
metadata['preview_url'] = preview_path
|
metadata['preview_url'] = preview_path
|
||||||
|
metadata['preview_nsfw_level'] = nsfw_level
|
||||||
|
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(model_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error updating metadata: {e}")
|
logger.error(f"Error updating metadata: {e}")
|
||||||
|
|
||||||
# Update preview URL in scanner cache
|
# Update preview URL in scanner cache
|
||||||
if hasattr(scanner, 'update_preview_in_cache'):
|
await scanner.update_preview_in_cache(model_path, preview_path, nsfw_level)
|
||||||
await scanner.update_preview_in_cache(model_path, preview_path)
|
|
||||||
|
|
||||||
return web.json_response({
|
return web.json_response({
|
||||||
"success": True,
|
"success": True,
|
||||||
"preview_url": config.get_preview_static_url(preview_path)
|
"preview_url": config.get_preview_static_url(preview_path),
|
||||||
|
"preview_nsfw_level": nsfw_level
|
||||||
})
|
})
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -469,8 +504,7 @@ class ModelRouteUtils:
|
|||||||
metadata['exclude'] = True
|
metadata['exclude'] = True
|
||||||
|
|
||||||
# Save updated metadata
|
# Save updated metadata
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update cache
|
# Update cache
|
||||||
cache = await scanner.get_cached_data()
|
cache = await scanner.get_cached_data()
|
||||||
@@ -759,8 +793,7 @@ class ModelRouteUtils:
|
|||||||
metadata['sha256'] = actual_hash
|
metadata['sha256'] = actual_hash
|
||||||
|
|
||||||
# Save updated metadata
|
# Save updated metadata
|
||||||
with open(metadata_path, 'w', encoding='utf-8') as f:
|
await MetadataManager.save_metadata(file_path, metadata)
|
||||||
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
|
||||||
|
|
||||||
# Update cache
|
# Update cache
|
||||||
await scanner.update_single_model_cache(file_path, file_path, metadata)
|
await scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||||
|
|||||||
@@ -520,4 +520,44 @@
|
|||||||
.card-grid.virtual-scroll {
|
.card-grid.virtual-scroll {
|
||||||
max-width: 2400px;
|
max-width: 2400px;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Add after the existing .lora-card:hover styles */
|
||||||
|
|
||||||
|
@keyframes update-pulse {
|
||||||
|
0% { box-shadow: 0 0 0 0 var(--lora-accent-transparent); }
|
||||||
|
50% { box-shadow: 0 0 0 4px var(--lora-accent-transparent); }
|
||||||
|
100% { box-shadow: 0 0 0 0 var(--lora-accent-transparent); }
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add semi-transparent version of accent color for animation */
|
||||||
|
:root {
|
||||||
|
--lora-accent-transparent: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.6);
|
||||||
|
}
|
||||||
|
|
||||||
|
.lora-card.updated {
|
||||||
|
animation: update-pulse 1.2s ease-out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add a subtle updated tag that fades in and out */
|
||||||
|
.update-indicator {
|
||||||
|
position: absolute;
|
||||||
|
top: 8px;
|
||||||
|
right: 8px;
|
||||||
|
background: var(--lora-accent);
|
||||||
|
color: white;
|
||||||
|
border-radius: var(--border-radius-xs);
|
||||||
|
padding: 3px 6px;
|
||||||
|
font-size: 0.75em;
|
||||||
|
opacity: 0;
|
||||||
|
transform: translateY(-5px);
|
||||||
|
z-index: 4;
|
||||||
|
animation: update-tag 1.8s ease-out forwards;
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes update-tag {
|
||||||
|
0% { opacity: 0; transform: translateY(-5px); }
|
||||||
|
15% { opacity: 1; transform: translateY(0); }
|
||||||
|
85% { opacity: 1; transform: translateY(0); }
|
||||||
|
100% { opacity: 0; transform: translateY(0); }
|
||||||
|
}
|
||||||
@@ -79,14 +79,109 @@
|
|||||||
/* Position the toggle button at the top left of showcase media */
|
/* Position the toggle button at the top left of showcase media */
|
||||||
.showcase-toggle-btn {
|
.showcase-toggle-btn {
|
||||||
position: absolute;
|
position: absolute;
|
||||||
left: var(--space-1);
|
|
||||||
top: var(--space-1);
|
|
||||||
z-index: 3;
|
z-index: 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure media wrapper maintains position: relative for absolute positioning of children */
|
/* Add styles for showcase media controls */
|
||||||
.carousel .media-wrapper {
|
.media-controls {
|
||||||
|
position: absolute;
|
||||||
|
display: flex;
|
||||||
|
gap: 6px;
|
||||||
|
z-index: 4;
|
||||||
|
opacity: 0;
|
||||||
|
transform: translateY(-5px);
|
||||||
|
transition: opacity 0.2s ease, transform 0.2s ease;
|
||||||
|
pointer-events: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-controls.visible {
|
||||||
|
opacity: 1;
|
||||||
|
transform: translateY(0);
|
||||||
|
pointer-events: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn {
|
||||||
|
width: 28px;
|
||||||
|
height: 28px;
|
||||||
|
border-radius: 50%;
|
||||||
|
background: var(--bg-color);
|
||||||
|
border: 1px solid var(--border-color);
|
||||||
|
color: var(--text-color);
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.2s ease;
|
||||||
|
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.15);
|
||||||
|
padding: 0;
|
||||||
position: relative;
|
position: relative;
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn:hover {
|
||||||
|
transform: translateY(-2px);
|
||||||
|
box-shadow: 0 3px 7px rgba(0, 0, 0, 0.2);
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn.set-preview-btn:hover {
|
||||||
|
background: var(--lora-accent);
|
||||||
|
color: white;
|
||||||
|
border-color: var(--lora-accent);
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn.example-delete-btn:hover:not(.disabled) {
|
||||||
|
background: var(--lora-error);
|
||||||
|
color: white;
|
||||||
|
border-color: var(--lora-error);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Disabled state for delete button */
|
||||||
|
.media-control-btn.example-delete-btn.disabled {
|
||||||
|
opacity: 0.5;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Two-step confirmation for delete button */
|
||||||
|
.media-control-btn.example-delete-btn .confirm-icon {
|
||||||
|
position: absolute;
|
||||||
|
top: 0;
|
||||||
|
left: 0;
|
||||||
|
right: 0;
|
||||||
|
bottom: 0;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
background: var(--lora-error);
|
||||||
|
color: white;
|
||||||
|
font-size: 1em;
|
||||||
|
opacity: 0;
|
||||||
|
transition: opacity 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn.example-delete-btn.confirm .fa-trash-alt {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn.example-delete-btn.confirm .confirm-icon {
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
.media-control-btn.example-delete-btn.confirm {
|
||||||
|
background: var(--lora-error);
|
||||||
|
color: white;
|
||||||
|
border-color: var(--lora-error);
|
||||||
|
}
|
||||||
|
|
||||||
|
@keyframes pulse {
|
||||||
|
0% {
|
||||||
|
box-shadow: 0 0 0 0 rgba(220, 53, 69, 0.7);
|
||||||
|
}
|
||||||
|
70% {
|
||||||
|
box-shadow: 0 0 0 5px rgba(220, 53, 69, 0);
|
||||||
|
}
|
||||||
|
100% {
|
||||||
|
box-shadow: 0 0 0 0 rgba(220, 53, 69, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Image Metadata Panel Styles */
|
/* Image Metadata Panel Styles */
|
||||||
@@ -289,4 +384,95 @@
|
|||||||
|
|
||||||
.lazy[src] {
|
.lazy[src] {
|
||||||
opacity: 1;
|
opacity: 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Example Import Area */
|
||||||
|
.example-import-area {
|
||||||
|
margin-top: var(--space-4);
|
||||||
|
padding: var(--space-2);
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-import-area.empty {
|
||||||
|
margin-top: var(--space-2);
|
||||||
|
padding: var(--space-4) var(--space-2);
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-container {
|
||||||
|
border: 2px dashed var(--border-color);
|
||||||
|
border-radius: var(--border-radius-sm);
|
||||||
|
padding: var(--space-4);
|
||||||
|
text-align: center;
|
||||||
|
transition: all 0.3s ease;
|
||||||
|
background: var(--lora-surface);
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-container.highlight {
|
||||||
|
border-color: var(--lora-accent);
|
||||||
|
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||||
|
transform: scale(1.01);
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-placeholder {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
gap: var(--space-1);
|
||||||
|
padding-top: var(--space-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-placeholder i {
|
||||||
|
font-size: 2.5rem;
|
||||||
|
/* color: var(--lora-accent); */
|
||||||
|
opacity: 0.8;
|
||||||
|
margin-bottom: var(--space-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-placeholder h3 {
|
||||||
|
margin: 0 0 var(--space-1);
|
||||||
|
font-size: 1.2rem;
|
||||||
|
font-weight: 500;
|
||||||
|
color: var(--text-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-placeholder p {
|
||||||
|
margin: var(--space-1) 0;
|
||||||
|
color: var(--text-color);
|
||||||
|
opacity: 0.8;
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-placeholder .sub-text {
|
||||||
|
font-size: 0.9em;
|
||||||
|
opacity: 0.6;
|
||||||
|
margin: var(--space-1) 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.import-formats {
|
||||||
|
font-size: 0.8em !important;
|
||||||
|
opacity: 0.6 !important;
|
||||||
|
margin-top: var(--space-2) !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select-files-btn {
|
||||||
|
background: var(--lora-accent);
|
||||||
|
color: var(--lora-text);
|
||||||
|
border: none;
|
||||||
|
border-radius: var(--border-radius-xs);
|
||||||
|
padding: var(--space-2) var(--space-3);
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 0.9em;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 8px;
|
||||||
|
transition: all 0.2s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.select-files-btn:hover {
|
||||||
|
opacity: 0.9;
|
||||||
|
transform: translateY(-1px);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* For dark theme */
|
||||||
|
[data-theme="dark"] .import-container {
|
||||||
|
background: rgba(255, 255, 255, 0.03);
|
||||||
}
|
}
|
||||||
@@ -306,18 +306,6 @@ body.modal-open {
|
|||||||
width: 100%; /* Full width */
|
width: 100%; /* Full width */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Migrate control styling */
|
|
||||||
.migrate-control {
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
gap: 8px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.migrate-control input {
|
|
||||||
flex: 1;
|
|
||||||
min-width: 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* 统一各个 section 的样式 */
|
/* 统一各个 section 的样式 */
|
||||||
.support-section,
|
.support-section,
|
||||||
.changelog-section,
|
.changelog-section,
|
||||||
@@ -375,12 +363,6 @@ body.modal-open {
|
|||||||
background: rgba(255, 255, 255, 0.05);
|
background: rgba(255, 255, 255, 0.05);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add disabled style for setting items */
|
|
||||||
.setting-item[data-requires-centralized="true"].disabled {
|
|
||||||
opacity: 0.6;
|
|
||||||
pointer-events: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Control row with label and input together */
|
/* Control row with label and input together */
|
||||||
.setting-row {
|
.setting-row {
|
||||||
display: flex;
|
display: flex;
|
||||||
@@ -1026,4 +1008,77 @@ input:checked + .toggle-slider:before {
|
|||||||
/* Dark theme adjustments */
|
/* Dark theme adjustments */
|
||||||
[data-theme="dark"] .video-container {
|
[data-theme="dark"] .video-container {
|
||||||
background-color: rgba(255, 255, 255, 0.03);
|
background-color: rgba(255, 255, 255, 0.03);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Example Access Modal */
|
||||||
|
.example-access-modal {
|
||||||
|
max-width: 550px;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-access-options {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
gap: var(--space-2);
|
||||||
|
margin: var(--space-3) 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-option-btn {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
padding: var(--space-2);
|
||||||
|
border-radius: var(--border-radius-sm);
|
||||||
|
border: 1px solid var(--lora-border);
|
||||||
|
background-color: var(--lora-surface);
|
||||||
|
cursor: pointer;
|
||||||
|
transition: all 0.2s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-option-btn:hover {
|
||||||
|
transform: translateY(-2px);
|
||||||
|
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
||||||
|
border-color: var(--lora-accent);
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-option-btn i {
|
||||||
|
font-size: 2em;
|
||||||
|
margin-bottom: var(--space-1);
|
||||||
|
color: var(--lora-accent);
|
||||||
|
}
|
||||||
|
|
||||||
|
.option-title {
|
||||||
|
font-weight: 500;
|
||||||
|
margin-bottom: 4px;
|
||||||
|
font-size: 1.1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.option-desc {
|
||||||
|
font-size: 0.9em;
|
||||||
|
opacity: 0.8;
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-option-btn.disabled {
|
||||||
|
opacity: 0.5;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
|
||||||
|
.example-option-btn.disabled i {
|
||||||
|
color: var(--text-color);
|
||||||
|
opacity: 0.5;
|
||||||
|
}
|
||||||
|
|
||||||
|
.modal-footer-note {
|
||||||
|
font-size: 0.9em;
|
||||||
|
opacity: 0.7;
|
||||||
|
margin-top: var(--space-2);
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
gap: 8px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Dark theme adjustments */
|
||||||
|
[data-theme="dark"] .example-option-btn:hover {
|
||||||
|
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.25);
|
||||||
}
|
}
|
||||||
@@ -542,23 +542,17 @@ export async function excludeModel(filePath, modelType = 'lora') {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Private methods
|
|
||||||
|
|
||||||
// Upload a preview image
|
// Upload a preview image
|
||||||
async function uploadPreview(filePath, file, modelType = 'lora') {
|
export async function uploadPreview(filePath, file, modelType = 'lora', nsfwLevel = 0) {
|
||||||
const loadingOverlay = document.getElementById('loading-overlay');
|
|
||||||
const loadingStatus = document.querySelector('.loading-status');
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (loadingOverlay) loadingOverlay.style.display = 'flex';
|
state.loadingManager.showSimpleLoading('Uploading preview...');
|
||||||
if (loadingStatus) loadingStatus.textContent = 'Uploading preview...';
|
|
||||||
|
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
|
|
||||||
// Use appropriate parameter names and endpoint based on model type
|
|
||||||
// Prepare common form data
|
// Prepare common form data
|
||||||
formData.append('preview_file', file);
|
formData.append('preview_file', file);
|
||||||
formData.append('model_path', filePath);
|
formData.append('model_path', filePath);
|
||||||
|
formData.append('nsfw_level', nsfwLevel.toString()); // Add nsfw_level parameter
|
||||||
|
|
||||||
// Set endpoint based on model type
|
// Set endpoint based on model type
|
||||||
const endpoint = modelType === 'checkpoint'
|
const endpoint = modelType === 'checkpoint'
|
||||||
@@ -575,56 +569,39 @@ async function uploadPreview(filePath, file, modelType = 'lora') {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
|
|
||||||
|
// Get the current page's previewVersions Map based on model type
|
||||||
|
const pageType = modelType === 'checkpoint' ? 'checkpoints' : 'loras';
|
||||||
|
const previewVersions = state.pages[pageType].previewVersions;
|
||||||
|
|
||||||
// Update the card preview in UI
|
// Update the version timestamp
|
||||||
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
|
const timestamp = Date.now();
|
||||||
if (card) {
|
if (previewVersions) {
|
||||||
const previewContainer = card.querySelector('.card-preview');
|
previewVersions.set(filePath, timestamp);
|
||||||
const oldPreview = previewContainer.querySelector('img, video');
|
|
||||||
|
|
||||||
// Get the current page's previewVersions Map based on model type
|
// Save the updated Map to localStorage
|
||||||
const pageType = modelType === 'checkpoint' ? 'checkpoints' : 'loras';
|
const storageKey = modelType === 'checkpoint' ? 'checkpoint_preview_versions' : 'lora_preview_versions';
|
||||||
const previewVersions = state.pages[pageType].previewVersions;
|
saveMapToStorage(storageKey, previewVersions);
|
||||||
|
|
||||||
// Update the version timestamp
|
|
||||||
const timestamp = Date.now();
|
|
||||||
if (previewVersions) {
|
|
||||||
previewVersions.set(filePath, timestamp);
|
|
||||||
|
|
||||||
// Save the updated Map to localStorage
|
|
||||||
const storageKey = modelType === 'checkpoint' ? 'checkpoint_preview_versions' : 'lora_preview_versions';
|
|
||||||
saveMapToStorage(storageKey, previewVersions);
|
|
||||||
}
|
|
||||||
|
|
||||||
const previewUrl = data.preview_url ?
|
|
||||||
`${data.preview_url}?t=${timestamp}` :
|
|
||||||
`/api/model/preview_image?path=${encodeURIComponent(filePath)}&t=${timestamp}`;
|
|
||||||
|
|
||||||
// Create appropriate element based on file type
|
|
||||||
if (file.type.startsWith('video/')) {
|
|
||||||
const video = document.createElement('video');
|
|
||||||
video.controls = true;
|
|
||||||
video.autoplay = true;
|
|
||||||
video.muted = true;
|
|
||||||
video.loop = true;
|
|
||||||
video.src = previewUrl;
|
|
||||||
oldPreview.replaceWith(video);
|
|
||||||
} else {
|
|
||||||
const img = document.createElement('img');
|
|
||||||
img.src = previewUrl;
|
|
||||||
oldPreview.replaceWith(img);
|
|
||||||
}
|
|
||||||
|
|
||||||
showToast('Preview updated successfully', 'success');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const updateData = {
|
||||||
|
preview_url: data.preview_url,
|
||||||
|
preview_nsfw_level: data.preview_nsfw_level // Include nsfw level in update data
|
||||||
|
};
|
||||||
|
|
||||||
|
state.virtualScroller.updateSingleItem(filePath, updateData);
|
||||||
|
|
||||||
|
showToast('Preview updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error uploading preview:', error);
|
console.error('Error uploading preview:', error);
|
||||||
showToast('Failed to upload preview image', 'error');
|
showToast('Failed to upload preview image', 'error');
|
||||||
} finally {
|
} finally {
|
||||||
if (loadingOverlay) loadingOverlay.style.display = 'none';
|
state.loadingManager.hide();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Private methods
|
||||||
|
|
||||||
// Private function to perform the delete operation
|
// Private function to perform the delete operation
|
||||||
async function performDelete(filePath, modelType = 'lora') {
|
async function performDelete(filePath, modelType = 'lora') {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -109,6 +109,9 @@ export async function saveModelMetadata(filePath, data) {
|
|||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
throw new Error('Failed to save metadata');
|
throw new Error('Failed to save metadata');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the virtual scroller with the new metadata
|
||||||
|
state.virtualScroller.updateSingleItem(filePath, data);
|
||||||
|
|
||||||
return response.json();
|
return response.json();
|
||||||
} finally {
|
} finally {
|
||||||
|
|||||||
@@ -36,6 +36,9 @@ export async function saveModelMetadata(filePath, data) {
|
|||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
throw new Error('Failed to save metadata');
|
throw new Error('Failed to save metadata');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the virtual scroller with the new data
|
||||||
|
state.virtualScroller.updateSingleItem(filePath, data);
|
||||||
|
|
||||||
return response.json();
|
return response.json();
|
||||||
} finally {
|
} finally {
|
||||||
|
|||||||
@@ -219,18 +219,9 @@ export function createCheckpointCard(checkpoint) {
|
|||||||
favorite: newFavoriteState
|
favorite: newFavoriteState
|
||||||
});
|
});
|
||||||
|
|
||||||
// Update the UI
|
|
||||||
if (newFavoriteState) {
|
if (newFavoriteState) {
|
||||||
starIcon.classList.remove('far');
|
|
||||||
starIcon.classList.add('fas', 'favorite-active');
|
|
||||||
starIcon.title = 'Remove from favorites';
|
|
||||||
card.dataset.favorite = 'true';
|
|
||||||
showToast('Added to favorites', 'success');
|
showToast('Added to favorites', 'success');
|
||||||
} else {
|
} else {
|
||||||
starIcon.classList.remove('fas', 'favorite-active');
|
|
||||||
starIcon.classList.add('far');
|
|
||||||
starIcon.title = 'Add to favorites';
|
|
||||||
card.dataset.favorite = 'false';
|
|
||||||
showToast('Removed from favorites', 'success');
|
showToast('Removed from favorites', 'success');
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
|
|||||||
import { refreshSingleCheckpointMetadata, saveModelMetadata, replaceCheckpointPreview, resetAndReload } from '../../api/checkpointApi.js';
|
import { refreshSingleCheckpointMetadata, saveModelMetadata, replaceCheckpointPreview, resetAndReload } from '../../api/checkpointApi.js';
|
||||||
import { showToast } from '../../utils/uiHelpers.js';
|
import { showToast } from '../../utils/uiHelpers.js';
|
||||||
import { showExcludeModal } from '../../utils/modalUtils.js';
|
import { showExcludeModal } from '../../utils/modalUtils.js';
|
||||||
import { state } from '../../state/index.js';
|
|
||||||
|
|
||||||
export class CheckpointContextMenu extends BaseContextMenu {
|
export class CheckpointContextMenu extends BaseContextMenu {
|
||||||
constructor() {
|
constructor() {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import { BaseContextMenu } from './BaseContextMenu.js';
|
import { BaseContextMenu } from './BaseContextMenu.js';
|
||||||
import { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
|
import { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
|
||||||
import { refreshSingleLoraMetadata, saveModelMetadata, replacePreview, resetAndReload } from '../../api/loraApi.js';
|
import { refreshSingleLoraMetadata, saveModelMetadata, replacePreview, resetAndReload } from '../../api/loraApi.js';
|
||||||
import { showToast, copyToClipboard, sendLoraToWorkflow } from '../../utils/uiHelpers.js';
|
import { copyToClipboard, sendLoraToWorkflow } from '../../utils/uiHelpers.js';
|
||||||
import { showExcludeModal, showDeleteModal } from '../../utils/modalUtils.js';
|
import { showExcludeModal, showDeleteModal } from '../../utils/modalUtils.js';
|
||||||
|
|
||||||
export class LoraContextMenu extends BaseContextMenu {
|
export class LoraContextMenu extends BaseContextMenu {
|
||||||
|
|||||||
@@ -26,24 +26,6 @@ export const ModelContextMenuMixin = {
|
|||||||
try {
|
try {
|
||||||
await this.saveModelMetadata(filePath, { preview_nsfw_level: level });
|
await this.saveModelMetadata(filePath, { preview_nsfw_level: level });
|
||||||
|
|
||||||
// Update card data
|
|
||||||
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
|
|
||||||
if (card) {
|
|
||||||
let metaData = {};
|
|
||||||
try {
|
|
||||||
metaData = JSON.parse(card.dataset.meta || '{}');
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Error parsing metadata:', err);
|
|
||||||
}
|
|
||||||
|
|
||||||
metaData.preview_nsfw_level = level;
|
|
||||||
card.dataset.meta = JSON.stringify(metaData);
|
|
||||||
card.dataset.nsfwLevel = level.toString();
|
|
||||||
|
|
||||||
// Apply blur effect immediately
|
|
||||||
this.updateCardBlurEffect(card, level);
|
|
||||||
}
|
|
||||||
|
|
||||||
showToast(`Content rating set to ${getNSFWLevelName(level)}`, 'success');
|
showToast(`Content rating set to ${getNSFWLevelName(level)}`, 'success');
|
||||||
this.nsfwSelector.style.display = 'none';
|
this.nsfwSelector.style.display = 'none';
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ function handleLoraCardEvent(event) {
|
|||||||
|
|
||||||
if (event.target.closest('.fa-folder-open')) {
|
if (event.target.closest('.fa-folder-open')) {
|
||||||
event.stopPropagation();
|
event.stopPropagation();
|
||||||
openExampleImagesFolder(card.dataset.sha256);
|
handleExampleImagesAccess(card);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,18 +162,9 @@ async function toggleFavorite(card) {
|
|||||||
favorite: newFavoriteState
|
favorite: newFavoriteState
|
||||||
});
|
});
|
||||||
|
|
||||||
// Update the UI
|
|
||||||
if (newFavoriteState) {
|
if (newFavoriteState) {
|
||||||
starIcon.classList.remove('far');
|
|
||||||
starIcon.classList.add('fas', 'favorite-active');
|
|
||||||
starIcon.title = 'Remove from favorites';
|
|
||||||
card.dataset.favorite = 'true';
|
|
||||||
showToast('Added to favorites', 'success');
|
showToast('Added to favorites', 'success');
|
||||||
} else {
|
} else {
|
||||||
starIcon.classList.remove('fas', 'favorite-active');
|
|
||||||
starIcon.classList.add('far');
|
|
||||||
starIcon.title = 'Add to favorites';
|
|
||||||
card.dataset.favorite = 'false';
|
|
||||||
showToast('Removed from favorites', 'success');
|
showToast('Removed from favorites', 'success');
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -200,6 +191,142 @@ function copyLoraSyntax(card) {
|
|||||||
copyToClipboard(loraSyntax, 'LoRA syntax copied to clipboard');
|
copyToClipboard(loraSyntax, 'LoRA syntax copied to clipboard');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New function to handle example images access
|
||||||
|
async function handleExampleImagesAccess(card) {
|
||||||
|
const modelHash = card.dataset.sha256;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check if example images exist
|
||||||
|
const response = await fetch(`/api/has-example-images?model_hash=${modelHash}`);
|
||||||
|
const data = await response.json();
|
||||||
|
|
||||||
|
if (data.has_images) {
|
||||||
|
// If images exist, open the folder directly (existing behavior)
|
||||||
|
openExampleImagesFolder(modelHash);
|
||||||
|
} else {
|
||||||
|
// If no images exist, show the new modal
|
||||||
|
showExampleAccessModal(card);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error checking for example images:', error);
|
||||||
|
showToast('Error checking for example images', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to show the example access modal
|
||||||
|
function showExampleAccessModal(card) {
|
||||||
|
const modal = document.getElementById('exampleAccessModal');
|
||||||
|
if (!modal) return;
|
||||||
|
|
||||||
|
// Get download button and determine if download should be enabled
|
||||||
|
const downloadBtn = modal.querySelector('#downloadExamplesBtn');
|
||||||
|
let hasRemoteExamples = false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const metaData = JSON.parse(card.dataset.meta || '{}');
|
||||||
|
hasRemoteExamples = metaData.images &&
|
||||||
|
Array.isArray(metaData.images) &&
|
||||||
|
metaData.images.length > 0 &&
|
||||||
|
metaData.images[0].url;
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Error parsing meta data:', e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable or disable download button
|
||||||
|
if (downloadBtn) {
|
||||||
|
if (hasRemoteExamples) {
|
||||||
|
downloadBtn.classList.remove('disabled');
|
||||||
|
downloadBtn.removeAttribute('title'); // Remove any previous tooltip
|
||||||
|
downloadBtn.onclick = () => {
|
||||||
|
modalManager.closeModal('exampleAccessModal');
|
||||||
|
// Open settings modal and scroll to example images section
|
||||||
|
const settingsModal = document.getElementById('settingsModal');
|
||||||
|
if (settingsModal) {
|
||||||
|
modalManager.showModal('settingsModal');
|
||||||
|
// Scroll to example images section after modal is visible
|
||||||
|
setTimeout(() => {
|
||||||
|
const exampleSection = settingsModal.querySelector('.settings-section:nth-child(5)'); // Example Images section
|
||||||
|
if (exampleSection) {
|
||||||
|
exampleSection.scrollIntoView({ behavior: 'smooth' });
|
||||||
|
}
|
||||||
|
}, 300);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
downloadBtn.classList.add('disabled');
|
||||||
|
downloadBtn.setAttribute('title', 'No remote example images available for this model on Civitai');
|
||||||
|
downloadBtn.onclick = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up import button
|
||||||
|
const importBtn = modal.querySelector('#importExamplesBtn');
|
||||||
|
if (importBtn) {
|
||||||
|
importBtn.onclick = () => {
|
||||||
|
modalManager.closeModal('exampleAccessModal');
|
||||||
|
|
||||||
|
// Get the lora data from card dataset
|
||||||
|
const loraMeta = {
|
||||||
|
sha256: card.dataset.sha256,
|
||||||
|
file_path: card.dataset.filepath,
|
||||||
|
model_name: card.dataset.name,
|
||||||
|
file_name: card.dataset.file_name,
|
||||||
|
// Other properties needed for showLoraModal
|
||||||
|
folder: card.dataset.folder,
|
||||||
|
modified: card.dataset.modified,
|
||||||
|
file_size: card.dataset.file_size,
|
||||||
|
from_civitai: card.dataset.from_civitai === 'true',
|
||||||
|
base_model: card.dataset.base_model,
|
||||||
|
usage_tips: card.dataset.usage_tips,
|
||||||
|
notes: card.dataset.notes,
|
||||||
|
favorite: card.dataset.favorite === 'true',
|
||||||
|
civitai: (() => {
|
||||||
|
try {
|
||||||
|
return JSON.parse(card.dataset.meta || '{}');
|
||||||
|
} catch (e) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
})(),
|
||||||
|
tags: JSON.parse(card.dataset.tags || '[]'),
|
||||||
|
modelDescription: card.dataset.modelDescription || ''
|
||||||
|
};
|
||||||
|
|
||||||
|
// Show the lora modal
|
||||||
|
showLoraModal(loraMeta);
|
||||||
|
|
||||||
|
// Scroll to import area after modal is visible
|
||||||
|
setTimeout(() => {
|
||||||
|
const importArea = document.querySelector('.example-import-area');
|
||||||
|
if (importArea) {
|
||||||
|
const showcaseTab = document.getElementById('showcase-tab');
|
||||||
|
if (showcaseTab) {
|
||||||
|
// First make sure showcase tab is visible
|
||||||
|
const tabBtn = document.querySelector('.tab-btn[data-tab="showcase"]');
|
||||||
|
if (tabBtn && !tabBtn.classList.contains('active')) {
|
||||||
|
tabBtn.click();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then toggle showcase if collapsed
|
||||||
|
const carousel = showcaseTab.querySelector('.carousel');
|
||||||
|
if (carousel && carousel.classList.contains('collapsed')) {
|
||||||
|
const scrollIndicator = showcaseTab.querySelector('.scroll-indicator');
|
||||||
|
if (scrollIndicator) {
|
||||||
|
scrollIndicator.click();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally scroll to the import area
|
||||||
|
importArea.scrollIntoView({ behavior: 'smooth' });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, 500);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show the modal
|
||||||
|
modalManager.showModal('exampleAccessModal');
|
||||||
|
}
|
||||||
|
|
||||||
export function createLoraCard(lora) {
|
export function createLoraCard(lora) {
|
||||||
const card = document.createElement('div');
|
const card = document.createElement('div');
|
||||||
card.className = 'lora-card';
|
card.className = 'lora-card';
|
||||||
|
|||||||
@@ -114,16 +114,6 @@ export function setupModelNameEditing(filePath) {
|
|||||||
|
|
||||||
await saveModelMetadata(filePath, { model_name: newModelName });
|
await saveModelMetadata(filePath, { model_name: newModelName });
|
||||||
|
|
||||||
// Update the corresponding checkpoint card's dataset and display
|
|
||||||
updateModelCard(filePath, { model_name: newModelName });
|
|
||||||
|
|
||||||
// BUGFIX: Directly update the card's dataset.name attribute to ensure
|
|
||||||
// it's correctly read when reopening the modal
|
|
||||||
const checkpointCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
|
|
||||||
if (checkpointCard) {
|
|
||||||
checkpointCard.dataset.name = newModelName;
|
|
||||||
}
|
|
||||||
|
|
||||||
showToast('Model name updated successfully', 'success');
|
showToast('Model name updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error updating model name:', error);
|
console.error('Error updating model name:', error);
|
||||||
@@ -300,9 +290,6 @@ async function saveBaseModel(filePath, originalValue) {
|
|||||||
try {
|
try {
|
||||||
await saveModelMetadata(filePath, { base_model: newBaseModel });
|
await saveModelMetadata(filePath, { base_model: newBaseModel });
|
||||||
|
|
||||||
// Update the card with the new base model
|
|
||||||
updateModelCard(filePath, { base_model: newBaseModel });
|
|
||||||
|
|
||||||
showToast('Base model updated successfully', 'success');
|
showToast('Base model updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
showToast('Failed to update base model', 'error');
|
showToast('Failed to update base model', 'error');
|
||||||
|
|||||||
@@ -463,12 +463,6 @@ async function saveTags() {
|
|||||||
// Exit edit mode
|
// Exit edit mode
|
||||||
editBtn.click();
|
editBtn.click();
|
||||||
|
|
||||||
// Update the checkpoint card's dataset
|
|
||||||
const checkpointCard = document.querySelector(`.checkpoint-card[data-filepath="${filePath}"]`);
|
|
||||||
if (checkpointCard) {
|
|
||||||
checkpointCard.dataset.tags = JSON.stringify(tags);
|
|
||||||
}
|
|
||||||
|
|
||||||
showToast('Tags updated successfully', 'success');
|
showToast('Tags updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error saving tags:', error);
|
console.error('Error saving tags:', error);
|
||||||
|
|||||||
@@ -1,346 +0,0 @@
|
|||||||
/**
|
|
||||||
* ShowcaseView.js
|
|
||||||
* Handles showcase content (images, videos) display for checkpoint modal
|
|
||||||
*/
|
|
||||||
import {
|
|
||||||
toggleShowcase,
|
|
||||||
setupShowcaseScroll,
|
|
||||||
scrollToTop
|
|
||||||
} from '../../utils/uiHelpers.js';
|
|
||||||
import { state } from '../../state/index.js';
|
|
||||||
import { NSFW_LEVELS } from '../../utils/constants.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Render showcase content
|
|
||||||
* @param {Array} images - Array of images/videos to show
|
|
||||||
* @param {string} modelHash - Model hash for identifying local files
|
|
||||||
* @param {Array} exampleFiles - Local example files already fetched
|
|
||||||
* @returns {string} HTML content
|
|
||||||
*/
|
|
||||||
export function renderShowcaseContent(images, exampleFiles = []) {
|
|
||||||
if (!images?.length) return '<div class="no-examples">No example images available</div>';
|
|
||||||
|
|
||||||
// Filter images based on SFW setting
|
|
||||||
const showOnlySFW = state.settings.show_only_sfw;
|
|
||||||
let filteredImages = images;
|
|
||||||
let hiddenCount = 0;
|
|
||||||
|
|
||||||
if (showOnlySFW) {
|
|
||||||
filteredImages = images.filter(img => {
|
|
||||||
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
|
|
||||||
const isSfw = nsfwLevel < NSFW_LEVELS.R;
|
|
||||||
if (!isSfw) hiddenCount++;
|
|
||||||
return isSfw;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show message if no images are available after filtering
|
|
||||||
if (filteredImages.length === 0) {
|
|
||||||
return `
|
|
||||||
<div class="no-examples">
|
|
||||||
<p>All example images are filtered due to NSFW content settings</p>
|
|
||||||
<p class="nsfw-filter-info">Your settings are currently set to show only safe-for-work content</p>
|
|
||||||
<p>You can change this in Settings <i class="fas fa-cog"></i></p>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show hidden content notification if applicable
|
|
||||||
const hiddenNotification = hiddenCount > 0 ?
|
|
||||||
`<div class="nsfw-filter-notification">
|
|
||||||
<i class="fas fa-eye-slash"></i> ${hiddenCount} ${hiddenCount === 1 ? 'image' : 'images'} hidden due to SFW-only setting
|
|
||||||
</div>` : '';
|
|
||||||
|
|
||||||
return `
|
|
||||||
<div class="scroll-indicator" onclick="toggleShowcase(this)">
|
|
||||||
<i class="fas fa-chevron-down"></i>
|
|
||||||
<span>Scroll or click to show ${filteredImages.length} examples</span>
|
|
||||||
</div>
|
|
||||||
<div class="carousel collapsed">
|
|
||||||
${hiddenNotification}
|
|
||||||
<div class="carousel-container">
|
|
||||||
${filteredImages.map((img, index) => {
|
|
||||||
// Find matching file in our list of actual files
|
|
||||||
let localFile = null;
|
|
||||||
if (exampleFiles.length > 0) {
|
|
||||||
// Try to find the corresponding file by index first
|
|
||||||
localFile = exampleFiles.find(file => {
|
|
||||||
const match = file.name.match(/image_(\d+)\./);
|
|
||||||
return match && parseInt(match[1]) === index;
|
|
||||||
});
|
|
||||||
|
|
||||||
// If not found by index, just use the same position in the array if available
|
|
||||||
if (!localFile && index < exampleFiles.length) {
|
|
||||||
localFile = exampleFiles[index];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const remoteUrl = img.url || '';
|
|
||||||
const localUrl = localFile ? localFile.path : '';
|
|
||||||
const isVideo = localFile ? localFile.is_video :
|
|
||||||
remoteUrl.endsWith('.mp4') || remoteUrl.endsWith('.webm');
|
|
||||||
|
|
||||||
// Calculate appropriate aspect ratio
|
|
||||||
const aspectRatio = (img.height / img.width) * 100;
|
|
||||||
const containerWidth = 800; // modal content maximum width
|
|
||||||
const minHeightPercent = 40;
|
|
||||||
const maxHeightPercent = (window.innerHeight * 0.6 / containerWidth) * 100;
|
|
||||||
const heightPercent = Math.max(
|
|
||||||
minHeightPercent,
|
|
||||||
Math.min(maxHeightPercent, aspectRatio)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if media should be blurred
|
|
||||||
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
|
|
||||||
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
|
|
||||||
|
|
||||||
// Determine NSFW warning text based on level
|
|
||||||
let nsfwText = "Mature Content";
|
|
||||||
if (nsfwLevel >= NSFW_LEVELS.XXX) {
|
|
||||||
nsfwText = "XXX-rated Content";
|
|
||||||
} else if (nsfwLevel >= NSFW_LEVELS.X) {
|
|
||||||
nsfwText = "X-rated Content";
|
|
||||||
} else if (nsfwLevel >= NSFW_LEVELS.R) {
|
|
||||||
nsfwText = "R-rated Content";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract metadata from the image
|
|
||||||
const meta = img.meta || {};
|
|
||||||
const prompt = meta.prompt || '';
|
|
||||||
const negativePrompt = meta.negative_prompt || meta.negativePrompt || '';
|
|
||||||
const size = meta.Size || `${img.width}x${img.height}`;
|
|
||||||
const seed = meta.seed || '';
|
|
||||||
const model = meta.Model || '';
|
|
||||||
const steps = meta.steps || '';
|
|
||||||
const sampler = meta.sampler || '';
|
|
||||||
const cfgScale = meta.cfgScale || '';
|
|
||||||
const clipSkip = meta.clipSkip || '';
|
|
||||||
|
|
||||||
// Check if we have any meaningful generation parameters
|
|
||||||
const hasParams = seed || model || steps || sampler || cfgScale || clipSkip;
|
|
||||||
const hasPrompts = prompt || negativePrompt;
|
|
||||||
|
|
||||||
// Create metadata panel content
|
|
||||||
const metadataPanel = generateMetadataPanel(
|
|
||||||
hasParams, hasPrompts,
|
|
||||||
prompt, negativePrompt,
|
|
||||||
size, seed, model, steps, sampler, cfgScale, clipSkip
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if this is a video or image
|
|
||||||
if (isVideo) {
|
|
||||||
return generateVideoWrapper(
|
|
||||||
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
|
|
||||||
localUrl, remoteUrl
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return generateImageWrapper(
|
|
||||||
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
|
|
||||||
localUrl, remoteUrl
|
|
||||||
);
|
|
||||||
}).join('')}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate media wrapper HTML for an image or video
|
|
||||||
* @param {Object} media - Media object with image or video data
|
|
||||||
* @returns {string} HTML content
|
|
||||||
*/
|
|
||||||
function generateMediaWrapper(media, urls) {
|
|
||||||
// Calculate appropriate aspect ratio
|
|
||||||
const aspectRatio = (media.height / media.width) * 100;
|
|
||||||
const containerWidth = 800; // modal content maximum width
|
|
||||||
const minHeightPercent = 40;
|
|
||||||
const maxHeightPercent = (window.innerHeight * 0.6 / containerWidth) * 100;
|
|
||||||
const heightPercent = Math.max(
|
|
||||||
minHeightPercent,
|
|
||||||
Math.min(maxHeightPercent, aspectRatio)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if media should be blurred
|
|
||||||
const nsfwLevel = media.nsfwLevel !== undefined ? media.nsfwLevel : 0;
|
|
||||||
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
|
|
||||||
|
|
||||||
// Determine NSFW warning text based on level
|
|
||||||
let nsfwText = "Mature Content";
|
|
||||||
if (nsfwLevel >= NSFW_LEVELS.XXX) {
|
|
||||||
nsfwText = "XXX-rated Content";
|
|
||||||
} else if (nsfwLevel >= NSFW_LEVELS.X) {
|
|
||||||
nsfwText = "X-rated Content";
|
|
||||||
} else if (nsfwLevel >= NSFW_LEVELS.R) {
|
|
||||||
nsfwText = "R-rated Content";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract metadata from the media
|
|
||||||
const meta = media.meta || {};
|
|
||||||
const prompt = meta.prompt || '';
|
|
||||||
const negativePrompt = meta.negative_prompt || meta.negativePrompt || '';
|
|
||||||
const size = meta.Size || `${media.width}x${media.height}`;
|
|
||||||
const seed = meta.seed || '';
|
|
||||||
const model = meta.Model || '';
|
|
||||||
const steps = meta.steps || '';
|
|
||||||
const sampler = meta.sampler || '';
|
|
||||||
const cfgScale = meta.cfgScale || '';
|
|
||||||
const clipSkip = meta.clipSkip || '';
|
|
||||||
|
|
||||||
// Check if we have any meaningful generation parameters
|
|
||||||
const hasParams = seed || model || steps || sampler || cfgScale || clipSkip;
|
|
||||||
const hasPrompts = prompt || negativePrompt;
|
|
||||||
|
|
||||||
// Create metadata panel content
|
|
||||||
const metadataPanel = generateMetadataPanel(
|
|
||||||
hasParams, hasPrompts,
|
|
||||||
prompt, negativePrompt,
|
|
||||||
size, seed, model, steps, sampler, cfgScale, clipSkip
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if this is a video or image
|
|
||||||
if (media.type === 'video') {
|
|
||||||
return generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, urls);
|
|
||||||
}
|
|
||||||
|
|
||||||
return generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, urls);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate metadata panel HTML
|
|
||||||
*/
|
|
||||||
function generateMetadataPanel(hasParams, hasPrompts, prompt, negativePrompt, size, seed, model, steps, sampler, cfgScale, clipSkip) {
|
|
||||||
// Create unique IDs for prompt copying
|
|
||||||
const promptIndex = Math.random().toString(36).substring(2, 15);
|
|
||||||
const negPromptIndex = Math.random().toString(36).substring(2, 15);
|
|
||||||
|
|
||||||
let content = '<div class="image-metadata-panel"><div class="metadata-content">';
|
|
||||||
|
|
||||||
if (hasParams) {
|
|
||||||
content += `
|
|
||||||
<div class="params-tags">
|
|
||||||
${size ? `<div class="param-tag"><span class="param-name">Size:</span><span class="param-value">${size}</span></div>` : ''}
|
|
||||||
${seed ? `<div class="param-tag"><span class="param-name">Seed:</span><span class="param-value">${seed}</span></div>` : ''}
|
|
||||||
${model ? `<div class="param-tag"><span class="param-name">Model:</span><span class="param-value">${model}</span></div>` : ''}
|
|
||||||
${steps ? `<div class="param-tag"><span class="param-name">Steps:</span><span class="param-value">${steps}</span></div>` : ''}
|
|
||||||
${sampler ? `<div class="param-tag"><span class="param-name">Sampler:</span><span class="param-value">${sampler}</span></div>` : ''}
|
|
||||||
${cfgScale ? `<div class="param-tag"><span class="param-name">CFG:</span><span class="param-value">${cfgScale}</span></div>` : ''}
|
|
||||||
${clipSkip ? `<div class="param-tag"><span class="param-name">Clip Skip:</span><span class="param-value">${clipSkip}</span></div>` : ''}
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!hasParams && !hasPrompts) {
|
|
||||||
content += `
|
|
||||||
<div class="no-metadata-message">
|
|
||||||
<i class="fas fa-info-circle"></i>
|
|
||||||
<span>No generation parameters available</span>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prompt) {
|
|
||||||
content += `
|
|
||||||
<div class="metadata-row prompt-row">
|
|
||||||
<span class="metadata-label">Prompt:</span>
|
|
||||||
<div class="metadata-prompt-wrapper">
|
|
||||||
<div class="metadata-prompt">${prompt}</div>
|
|
||||||
<button class="copy-prompt-btn" data-prompt-index="${promptIndex}">
|
|
||||||
<i class="fas fa-copy"></i>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="hidden-prompt" id="prompt-${promptIndex}" style="display:none;">${prompt}</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (negativePrompt) {
|
|
||||||
content += `
|
|
||||||
<div class="metadata-row prompt-row">
|
|
||||||
<span class="metadata-label">Negative Prompt:</span>
|
|
||||||
<div class="metadata-prompt-wrapper">
|
|
||||||
<div class="metadata-prompt">${negativePrompt}</div>
|
|
||||||
<button class="copy-prompt-btn" data-prompt-index="${negPromptIndex}">
|
|
||||||
<i class="fas fa-copy"></i>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="hidden-prompt" id="prompt-${negPromptIndex}" style="display:none;">${negativePrompt}</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
content += '</div></div>';
|
|
||||||
return content;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate video wrapper HTML
|
|
||||||
*/
|
|
||||||
function generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl) {
|
|
||||||
return `
|
|
||||||
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%">
|
|
||||||
${shouldBlur ? `
|
|
||||||
<button class="toggle-blur-btn showcase-toggle-btn" title="Toggle blur">
|
|
||||||
<i class="fas fa-eye"></i>
|
|
||||||
</button>
|
|
||||||
` : ''}
|
|
||||||
<video controls autoplay muted loop crossorigin="anonymous"
|
|
||||||
referrerpolicy="no-referrer"
|
|
||||||
data-local-src="${localUrl || ''}"
|
|
||||||
data-remote-src="${remoteUrl}"
|
|
||||||
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
|
||||||
<source data-local-src="${localUrl || ''}" data-remote-src="${remoteUrl}" type="video/mp4">
|
|
||||||
Your browser does not support video playback
|
|
||||||
</video>
|
|
||||||
${shouldBlur ? `
|
|
||||||
<div class="nsfw-overlay">
|
|
||||||
<div class="nsfw-warning">
|
|
||||||
<p>${nsfwText}</p>
|
|
||||||
<button class="show-content-btn">Show</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
` : ''}
|
|
||||||
${metadataPanel}
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate image wrapper HTML
|
|
||||||
*/
|
|
||||||
function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl) {
|
|
||||||
return `
|
|
||||||
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%">
|
|
||||||
${shouldBlur ? `
|
|
||||||
<button class="toggle-blur-btn showcase-toggle-btn" title="Toggle blur">
|
|
||||||
<i class="fas fa-eye"></i>
|
|
||||||
</button>
|
|
||||||
` : ''}
|
|
||||||
<img data-local-src="${localUrl || ''}"
|
|
||||||
data-remote-src="${remoteUrl}"
|
|
||||||
alt="Preview"
|
|
||||||
crossorigin="anonymous"
|
|
||||||
referrerpolicy="no-referrer"
|
|
||||||
width="${media.width}"
|
|
||||||
height="${media.height}"
|
|
||||||
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
|
||||||
${shouldBlur ? `
|
|
||||||
<div class="nsfw-overlay">
|
|
||||||
<div class="nsfw-warning">
|
|
||||||
<p>${nsfwText}</p>
|
|
||||||
<button class="show-content-btn">Show</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
` : ''}
|
|
||||||
${metadataPanel}
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the shared setupShowcaseScroll function with the correct modal ID
|
|
||||||
export { setupShowcaseScroll, scrollToTop, toggleShowcase };
|
|
||||||
|
|
||||||
// Initialize the showcase scroll when this module is imported
|
|
||||||
document.addEventListener('DOMContentLoaded', () => {
|
|
||||||
setupShowcaseScroll('checkpointModal');
|
|
||||||
});
|
|
||||||
@@ -3,9 +3,14 @@
|
|||||||
*
|
*
|
||||||
* Modularized checkpoint modal component that handles checkpoint model details display
|
* Modularized checkpoint modal component that handles checkpoint model details display
|
||||||
*/
|
*/
|
||||||
import { showToast, getExampleImageFiles, initLazyLoading, initNsfwBlurHandlers, initMetadataPanelHandlers } from '../../utils/uiHelpers.js';
|
import { showToast } from '../../utils/uiHelpers.js';
|
||||||
import { modalManager } from '../../managers/ModalManager.js';
|
import { modalManager } from '../../managers/ModalManager.js';
|
||||||
import { renderShowcaseContent, toggleShowcase, setupShowcaseScroll, scrollToTop } from './ShowcaseView.js';
|
import {
|
||||||
|
toggleShowcase,
|
||||||
|
setupShowcaseScroll,
|
||||||
|
scrollToTop,
|
||||||
|
loadExampleImages
|
||||||
|
} from '../shared/showcase/ShowcaseView.js';
|
||||||
import { setupTabSwitching, loadModelDescription } from './ModelDescription.js';
|
import { setupTabSwitching, loadModelDescription } from './ModelDescription.js';
|
||||||
import {
|
import {
|
||||||
setupModelNameEditing,
|
setupModelNameEditing,
|
||||||
@@ -15,8 +20,6 @@ import {
|
|||||||
import { setupTagEditMode } from './ModelTags.js'; // Add import for tag editing
|
import { setupTagEditMode } from './ModelTags.js'; // Add import for tag editing
|
||||||
import { saveModelMetadata } from '../../api/checkpointApi.js';
|
import { saveModelMetadata } from '../../api/checkpointApi.js';
|
||||||
import { renderCompactTags, setupTagTooltip, formatFileSize } from './utils.js';
|
import { renderCompactTags, setupTagTooltip, formatFileSize } from './utils.js';
|
||||||
import { updateModelCard } from '../../utils/cardUpdater.js';
|
|
||||||
import { state } from '../../state/index.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Display the checkpoint modal with the given checkpoint data
|
* Display the checkpoint modal with the given checkpoint data
|
||||||
@@ -103,7 +106,7 @@ export function showCheckpointModal(checkpoint) {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="showcase-section" data-checkpoint-id="${checkpoint.civitai?.modelId || ''}">
|
<div class="showcase-section" data-model-hash="${checkpoint.sha256 || ''}" data-filepath="${checkpoint.file_path}">
|
||||||
<div class="showcase-tabs">
|
<div class="showcase-tabs">
|
||||||
<button class="tab-btn active" data-tab="showcase">Examples</button>
|
<button class="tab-btn active" data-tab="showcase">Examples</button>
|
||||||
<button class="tab-btn" data-tab="description">Model Description</button>
|
<button class="tab-btn" data-tab="description">Model Description</button>
|
||||||
@@ -138,7 +141,7 @@ export function showCheckpointModal(checkpoint) {
|
|||||||
|
|
||||||
modalManager.showModal('checkpointModal', content);
|
modalManager.showModal('checkpointModal', content);
|
||||||
setupEditableFields(checkpoint.file_path);
|
setupEditableFields(checkpoint.file_path);
|
||||||
setupShowcaseScroll();
|
setupShowcaseScroll('checkpointModal');
|
||||||
setupTabSwitching();
|
setupTabSwitching();
|
||||||
setupTagTooltip();
|
setupTagTooltip();
|
||||||
setupTagEditMode(); // Initialize tag editing functionality
|
setupTagEditMode(); // Initialize tag editing functionality
|
||||||
@@ -151,68 +154,12 @@ export function showCheckpointModal(checkpoint) {
|
|||||||
loadModelDescription(checkpoint.civitai.modelId, checkpoint.file_path);
|
loadModelDescription(checkpoint.civitai.modelId, checkpoint.file_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load example images asynchronously
|
// Load example images asynchronously - merge regular and custom images
|
||||||
loadExampleImages(checkpoint.civitai?.images, checkpoint.sha256, checkpoint.file_path);
|
const regularImages = checkpoint.civitai?.images || [];
|
||||||
}
|
const customImages = checkpoint.civitai?.customImages || [];
|
||||||
|
// Combine images - regular images first, then custom images
|
||||||
/**
|
const allImages = [...regularImages, ...customImages];
|
||||||
* Load example images asynchronously
|
loadExampleImages(allImages, checkpoint.sha256);
|
||||||
* @param {Array} images - Array of image objects
|
|
||||||
* @param {string} modelHash - Model hash for fetching local files
|
|
||||||
* @param {string} filePath - File path for fetching local files
|
|
||||||
*/
|
|
||||||
async function loadExampleImages(images, modelHash, filePath) {
|
|
||||||
try {
|
|
||||||
const showcaseTab = document.getElementById('showcase-tab');
|
|
||||||
if (!showcaseTab) return;
|
|
||||||
|
|
||||||
// First fetch local example files
|
|
||||||
let localFiles = [];
|
|
||||||
try {
|
|
||||||
// Choose endpoint based on centralized examples setting
|
|
||||||
const useCentralized = state.global.settings.useCentralizedExamples !== false;
|
|
||||||
const endpoint = useCentralized ? '/api/example-image-files' : '/api/model-example-files';
|
|
||||||
|
|
||||||
// Use different params based on endpoint
|
|
||||||
const params = useCentralized ?
|
|
||||||
`model_hash=${modelHash}` :
|
|
||||||
`file_path=${encodeURIComponent(filePath)}`;
|
|
||||||
|
|
||||||
const response = await fetch(`${endpoint}?${params}`);
|
|
||||||
const result = await response.json();
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
localFiles = result.files;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to get example files:", error);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then render with both remote images and local files
|
|
||||||
showcaseTab.innerHTML = renderShowcaseContent(images, localFiles);
|
|
||||||
|
|
||||||
// Re-initialize the showcase event listeners
|
|
||||||
const carousel = showcaseTab.querySelector('.carousel');
|
|
||||||
if (carousel) {
|
|
||||||
// Only initialize if we actually have examples and they're expanded
|
|
||||||
if (!carousel.classList.contains('collapsed')) {
|
|
||||||
initLazyLoading(carousel);
|
|
||||||
initNsfwBlurHandlers(carousel);
|
|
||||||
initMetadataPanelHandlers(carousel);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Error loading example images:', error);
|
|
||||||
const showcaseTab = document.getElementById('showcase-tab');
|
|
||||||
if (showcaseTab) {
|
|
||||||
showcaseTab.innerHTML = `
|
|
||||||
<div class="error-message">
|
|
||||||
<i class="fas fa-exclamation-circle"></i>
|
|
||||||
Error loading example images
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -263,9 +210,6 @@ async function saveNotes(filePath) {
|
|||||||
try {
|
try {
|
||||||
await saveModelMetadata(filePath, { notes: content });
|
await saveModelMetadata(filePath, { notes: content });
|
||||||
|
|
||||||
// Update the corresponding checkpoint card's dataset
|
|
||||||
updateModelCard(filePath, { notes: content });
|
|
||||||
|
|
||||||
showToast('Notes saved successfully', 'success');
|
showToast('Notes saved successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
showToast('Failed to save notes', 'error');
|
showToast('Failed to save notes', 'error');
|
||||||
|
|||||||
@@ -115,16 +115,6 @@ export function setupModelNameEditing(filePath) {
|
|||||||
|
|
||||||
await saveModelMetadata(filePath, { model_name: newModelName });
|
await saveModelMetadata(filePath, { model_name: newModelName });
|
||||||
|
|
||||||
// Update the corresponding lora card's dataset and display
|
|
||||||
updateModelCard(filePath, { model_name: newModelName });
|
|
||||||
|
|
||||||
// BUGFIX: Directly update the card's dataset.name attribute to ensure
|
|
||||||
// it's correctly read when reopening the modal
|
|
||||||
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
|
|
||||||
if (loraCard) {
|
|
||||||
loraCard.dataset.name = newModelName;
|
|
||||||
}
|
|
||||||
|
|
||||||
showToast('Model name updated successfully', 'success');
|
showToast('Model name updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error updating model name:', error);
|
console.error('Error updating model name:', error);
|
||||||
@@ -304,9 +294,6 @@ async function saveBaseModel(filePath, originalValue) {
|
|||||||
try {
|
try {
|
||||||
await saveModelMetadata(filePath, { base_model: newBaseModel });
|
await saveModelMetadata(filePath, { base_model: newBaseModel });
|
||||||
|
|
||||||
// Update the corresponding lora card's dataset
|
|
||||||
updateModelCard(filePath, { base_model: newBaseModel });
|
|
||||||
|
|
||||||
showToast('Base model updated successfully', 'success');
|
showToast('Base model updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
showToast('Failed to update base model', 'error');
|
showToast('Failed to update base model', 'error');
|
||||||
|
|||||||
@@ -463,15 +463,6 @@ async function saveTags() {
|
|||||||
// Exit edit mode
|
// Exit edit mode
|
||||||
editBtn.click();
|
editBtn.click();
|
||||||
|
|
||||||
// Update the LoRA card's dataset
|
|
||||||
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
|
|
||||||
if (loraCard) {
|
|
||||||
loraCard.dataset.tags = JSON.stringify(tags);
|
|
||||||
|
|
||||||
// Also update the card in the DOM
|
|
||||||
// updateLoraCard(loraCard, { tags: tags });
|
|
||||||
}
|
|
||||||
|
|
||||||
showToast('Tags updated successfully', 'success');
|
showToast('Tags updated successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error saving tags:', error);
|
console.error('Error saving tags:', error);
|
||||||
|
|||||||
@@ -62,6 +62,5 @@ window.removePreset = async function(key) {
|
|||||||
usage_tips: newPresetsJson
|
usage_tips: newPresetsJson
|
||||||
});
|
});
|
||||||
|
|
||||||
loraCard.dataset.usage_tips = newPresetsJson;
|
|
||||||
document.querySelector('.preset-tags').innerHTML = renderPresetTags(currentPresets);
|
document.querySelector('.preset-tags').innerHTML = renderPresetTags(currentPresets);
|
||||||
};
|
};
|
||||||
@@ -1,280 +0,0 @@
|
|||||||
/**
|
|
||||||
* ShowcaseView.js
|
|
||||||
* 处理LoRA模型展示内容(图片、视频)的功能模块
|
|
||||||
*/
|
|
||||||
import {
|
|
||||||
toggleShowcase,
|
|
||||||
setupShowcaseScroll,
|
|
||||||
scrollToTop
|
|
||||||
} from '../../utils/uiHelpers.js';
|
|
||||||
import { state } from '../../state/index.js';
|
|
||||||
import { NSFW_LEVELS } from '../../utils/constants.js';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 获取展示内容并进行渲染
|
|
||||||
* @param {Array} images - 要展示的图片/视频数组
|
|
||||||
* @param {Array} exampleFiles - Local example files already fetched
|
|
||||||
* @returns {Promise<string>} HTML内容
|
|
||||||
*/
|
|
||||||
export function renderShowcaseContent(images, exampleFiles = []) {
|
|
||||||
if (!images?.length) return '<div class="no-examples">No example images available</div>';
|
|
||||||
|
|
||||||
// Filter images based on SFW setting
|
|
||||||
const showOnlySFW = state.settings.show_only_sfw;
|
|
||||||
let filteredImages = images;
|
|
||||||
let hiddenCount = 0;
|
|
||||||
|
|
||||||
if (showOnlySFW) {
|
|
||||||
filteredImages = images.filter(img => {
|
|
||||||
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
|
|
||||||
const isSfw = nsfwLevel < NSFW_LEVELS.R;
|
|
||||||
if (!isSfw) hiddenCount++;
|
|
||||||
return isSfw;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show message if no images are available after filtering
|
|
||||||
if (filteredImages.length === 0) {
|
|
||||||
return `
|
|
||||||
<div class="no-examples">
|
|
||||||
<p>All example images are filtered due to NSFW content settings</p>
|
|
||||||
<p class="nsfw-filter-info">Your settings are currently set to show only safe-for-work content</p>
|
|
||||||
<p>You can change this in Settings <i class="fas fa-cog"></i></p>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Show hidden content notification if applicable
|
|
||||||
const hiddenNotification = hiddenCount > 0 ?
|
|
||||||
`<div class="nsfw-filter-notification">
|
|
||||||
<i class="fas fa-eye-slash"></i> ${hiddenCount} ${hiddenCount === 1 ? 'image' : 'images'} hidden due to SFW-only setting
|
|
||||||
</div>` : '';
|
|
||||||
|
|
||||||
return `
|
|
||||||
<div class="scroll-indicator" onclick="toggleShowcase(this)">
|
|
||||||
<i class="fas fa-chevron-down"></i>
|
|
||||||
<span>Scroll or click to show ${filteredImages.length} examples</span>
|
|
||||||
</div>
|
|
||||||
<div class="carousel collapsed">
|
|
||||||
${hiddenNotification}
|
|
||||||
<div class="carousel-container">
|
|
||||||
${filteredImages.map((img, index) => {
|
|
||||||
// Find matching file in our list of actual files
|
|
||||||
let localFile = null;
|
|
||||||
if (exampleFiles.length > 0) {
|
|
||||||
// Try to find the corresponding file by index first
|
|
||||||
localFile = exampleFiles.find(file => {
|
|
||||||
const match = file.name.match(/image_(\d+)\./);
|
|
||||||
return match && parseInt(match[1]) === index;
|
|
||||||
});
|
|
||||||
|
|
||||||
// If not found by index, just use the same position in the array if available
|
|
||||||
if (!localFile && index < exampleFiles.length) {
|
|
||||||
localFile = exampleFiles[index];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const remoteUrl = img.url || '';
|
|
||||||
const localUrl = localFile ? localFile.path : '';
|
|
||||||
const isVideo = localFile ? localFile.is_video :
|
|
||||||
remoteUrl.endsWith('.mp4') || remoteUrl.endsWith('.webm');
|
|
||||||
|
|
||||||
// 计算适当的展示高度
|
|
||||||
const aspectRatio = (img.height / img.width) * 100;
|
|
||||||
const containerWidth = 800;
|
|
||||||
const minHeightPercent = 40;
|
|
||||||
const maxHeightPercent = (window.innerHeight * 0.6 / containerWidth) * 100;
|
|
||||||
const heightPercent = Math.max(
|
|
||||||
minHeightPercent,
|
|
||||||
Math.min(maxHeightPercent, aspectRatio)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if image should be blurred
|
|
||||||
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
|
|
||||||
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
|
|
||||||
|
|
||||||
// Determine NSFW warning text based on level
|
|
||||||
let nsfwText = "Mature Content";
|
|
||||||
if (nsfwLevel >= NSFW_LEVELS.XXX) {
|
|
||||||
nsfwText = "XXX-rated Content";
|
|
||||||
} else if (nsfwLevel >= NSFW_LEVELS.X) {
|
|
||||||
nsfwText = "X-rated Content";
|
|
||||||
} else if (nsfwLevel >= NSFW_LEVELS.R) {
|
|
||||||
nsfwText = "R-rated Content";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract metadata from the image
|
|
||||||
const meta = img.meta || {};
|
|
||||||
const prompt = meta.prompt || '';
|
|
||||||
const negativePrompt = meta.negative_prompt || meta.negativePrompt || '';
|
|
||||||
const size = meta.Size || `${img.width}x${img.height}`;
|
|
||||||
const seed = meta.seed || '';
|
|
||||||
const model = meta.Model || '';
|
|
||||||
const steps = meta.steps || '';
|
|
||||||
const sampler = meta.sampler || '';
|
|
||||||
const cfgScale = meta.cfgScale || '';
|
|
||||||
const clipSkip = meta.clipSkip || '';
|
|
||||||
|
|
||||||
const hasParams = seed || model || steps || sampler || cfgScale || clipSkip;
|
|
||||||
const hasPrompts = prompt || negativePrompt;
|
|
||||||
|
|
||||||
const metadataPanel = generateMetadataPanel(
|
|
||||||
hasParams, hasPrompts,
|
|
||||||
prompt, negativePrompt,
|
|
||||||
size, seed, model, steps, sampler, cfgScale, clipSkip
|
|
||||||
);
|
|
||||||
|
|
||||||
if (isVideo) {
|
|
||||||
return generateVideoWrapper(
|
|
||||||
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
|
|
||||||
localUrl, remoteUrl
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return generateImageWrapper(
|
|
||||||
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
|
|
||||||
localUrl, remoteUrl
|
|
||||||
);
|
|
||||||
}).join('')}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate metadata panel HTML
|
|
||||||
*/
|
|
||||||
function generateMetadataPanel(hasParams, hasPrompts, prompt, negativePrompt, size, seed, model, steps, sampler, cfgScale, clipSkip) {
|
|
||||||
// Create unique IDs for prompt copying
|
|
||||||
const promptIndex = Math.random().toString(36).substring(2, 15);
|
|
||||||
const negPromptIndex = Math.random().toString(36).substring(2, 15);
|
|
||||||
|
|
||||||
let content = '<div class="image-metadata-panel"><div class="metadata-content">';
|
|
||||||
|
|
||||||
if (hasParams) {
|
|
||||||
content += `
|
|
||||||
<div class="params-tags">
|
|
||||||
${size ? `<div class="param-tag"><span class="param-name">Size:</span><span class="param-value">${size}</span></div>` : ''}
|
|
||||||
${seed ? `<div class="param-tag"><span class="param-name">Seed:</span><span class="param-value">${seed}</span></div>` : ''}
|
|
||||||
${model ? `<div class="param-tag"><span class="param-name">Model:</span><span class="param-value">${model}</span></div>` : ''}
|
|
||||||
${steps ? `<div class="param-tag"><span class="param-name">Steps:</span><span class="param-value">${steps}</span></div>` : ''}
|
|
||||||
${sampler ? `<div class="param-tag"><span class="param-name">Sampler:</span><span class="param-value">${sampler}</span></div>` : ''}
|
|
||||||
${cfgScale ? `<div class="param-tag"><span class="param-name">CFG:</span><span class="param-value">${cfgScale}</span></div>` : ''}
|
|
||||||
${clipSkip ? `<div class="param-tag"><span class="param-name">Clip Skip:</span><span class="param-value">${clipSkip}</span></div>` : ''}
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!hasParams && !hasPrompts) {
|
|
||||||
content += `
|
|
||||||
<div class="no-metadata-message">
|
|
||||||
<i class="fas fa-info-circle"></i>
|
|
||||||
<span>No generation parameters available</span>
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prompt) {
|
|
||||||
content += `
|
|
||||||
<div class="metadata-row prompt-row">
|
|
||||||
<span class="metadata-label">Prompt:</span>
|
|
||||||
<div class="metadata-prompt-wrapper">
|
|
||||||
<div class="metadata-prompt">${prompt}</div>
|
|
||||||
<button class="copy-prompt-btn" data-prompt-index="${promptIndex}">
|
|
||||||
<i class="fas fa-copy"></i>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="hidden-prompt" id="prompt-${promptIndex}" style="display:none;">${prompt}</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (negativePrompt) {
|
|
||||||
content += `
|
|
||||||
<div class="metadata-row prompt-row">
|
|
||||||
<span class="metadata-label">Negative Prompt:</span>
|
|
||||||
<div class="metadata-prompt-wrapper">
|
|
||||||
<div class="metadata-prompt">${negativePrompt}</div>
|
|
||||||
<button class="copy-prompt-btn" data-prompt-index="${negPromptIndex}">
|
|
||||||
<i class="fas fa-copy"></i>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="hidden-prompt" id="prompt-${negPromptIndex}" style="display:none;">${negativePrompt}</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
content += '</div></div>';
|
|
||||||
return content;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 生成视频包装HTML
|
|
||||||
*/
|
|
||||||
function generateVideoWrapper(img, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl) {
|
|
||||||
return `
|
|
||||||
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%">
|
|
||||||
${shouldBlur ? `
|
|
||||||
<button class="toggle-blur-btn showcase-toggle-btn" title="Toggle blur">
|
|
||||||
<i class="fas fa-eye"></i>
|
|
||||||
</button>
|
|
||||||
` : ''}
|
|
||||||
<video controls autoplay muted loop crossorigin="anonymous"
|
|
||||||
referrerpolicy="no-referrer"
|
|
||||||
data-local-src="${localUrl || ''}"
|
|
||||||
data-remote-src="${remoteUrl}"
|
|
||||||
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
|
||||||
<source data-local-src="${localUrl || ''}" data-remote-src="${remoteUrl}" type="video/mp4">
|
|
||||||
Your browser does not support video playback
|
|
||||||
</video>
|
|
||||||
${shouldBlur ? `
|
|
||||||
<div class="nsfw-overlay">
|
|
||||||
<div class="nsfw-warning">
|
|
||||||
<p>${nsfwText}</p>
|
|
||||||
<button class="show-content-btn">Show</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
` : ''}
|
|
||||||
${metadataPanel}
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 生成图片包装HTML
|
|
||||||
*/
|
|
||||||
function generateImageWrapper(img, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl) {
|
|
||||||
return `
|
|
||||||
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%">
|
|
||||||
${shouldBlur ? `
|
|
||||||
<button class="toggle-blur-btn showcase-toggle-btn" title="Toggle blur">
|
|
||||||
<i class="fas fa-eye"></i>
|
|
||||||
</button>
|
|
||||||
` : ''}
|
|
||||||
<img data-local-src="${localUrl || ''}"
|
|
||||||
data-remote-src="${remoteUrl}"
|
|
||||||
alt="Preview"
|
|
||||||
crossorigin="anonymous"
|
|
||||||
referrerpolicy="no-referrer"
|
|
||||||
width="${img.width}"
|
|
||||||
height="${img.height}"
|
|
||||||
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
|
||||||
${shouldBlur ? `
|
|
||||||
<div class="nsfw-overlay">
|
|
||||||
<div class="nsfw-warning">
|
|
||||||
<p>${nsfwText}</p>
|
|
||||||
<button class="show-content-btn">Show</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
` : ''}
|
|
||||||
${metadataPanel}
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the shared setupShowcaseScroll function with the correct modal ID
|
|
||||||
export { setupShowcaseScroll, scrollToTop, toggleShowcase };
|
|
||||||
|
|
||||||
// Initialize the showcase scroll when this module is imported
|
|
||||||
document.addEventListener('DOMContentLoaded', () => {
|
|
||||||
setupShowcaseScroll('loraModal');
|
|
||||||
});
|
|
||||||
@@ -620,28 +620,6 @@ async function saveTriggerWords() {
|
|||||||
// Exit edit mode without restoring original trigger words
|
// Exit edit mode without restoring original trigger words
|
||||||
editBtn.click();
|
editBtn.click();
|
||||||
|
|
||||||
// Update the LoRA card's dataset
|
|
||||||
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
|
|
||||||
if (loraCard) {
|
|
||||||
try {
|
|
||||||
// Create a proper structure for civitai data
|
|
||||||
let civitaiData = {};
|
|
||||||
|
|
||||||
// Parse existing data if available
|
|
||||||
if (loraCard.dataset.meta) {
|
|
||||||
civitaiData = JSON.parse(loraCard.dataset.meta);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update trainedWords property
|
|
||||||
civitaiData.trainedWords = words;
|
|
||||||
|
|
||||||
// Update the meta dataset attribute with the full civitai data
|
|
||||||
loraCard.dataset.meta = JSON.stringify(civitaiData);
|
|
||||||
} catch (e) {
|
|
||||||
console.error('Error updating civitai data:', e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we saved an empty array and there's a no-trigger-words element, show it
|
// If we saved an empty array and there's a no-trigger-words element, show it
|
||||||
const noTriggerWords = triggerWordsSection.querySelector('.no-trigger-words');
|
const noTriggerWords = triggerWordsSection.querySelector('.no-trigger-words');
|
||||||
const tagsContainer = triggerWordsSection.querySelector('.trigger-words-tags');
|
const tagsContainer = triggerWordsSection.querySelector('.trigger-words-tags');
|
||||||
|
|||||||
@@ -3,9 +3,13 @@
|
|||||||
*
|
*
|
||||||
* 将原始的LoraModal.js拆分成多个功能模块后的主入口文件
|
* 将原始的LoraModal.js拆分成多个功能模块后的主入口文件
|
||||||
*/
|
*/
|
||||||
import { showToast, copyToClipboard, getExampleImageFiles } from '../../utils/uiHelpers.js';
|
import { showToast } from '../../utils/uiHelpers.js';
|
||||||
import { modalManager } from '../../managers/ModalManager.js';
|
import { modalManager } from '../../managers/ModalManager.js';
|
||||||
import { renderShowcaseContent, toggleShowcase, setupShowcaseScroll, scrollToTop } from './ShowcaseView.js';
|
import {
|
||||||
|
setupShowcaseScroll,
|
||||||
|
scrollToTop,
|
||||||
|
loadExampleImages
|
||||||
|
} from '../shared/showcase/ShowcaseView.js';
|
||||||
import { setupTabSwitching, loadModelDescription } from './ModelDescription.js';
|
import { setupTabSwitching, loadModelDescription } from './ModelDescription.js';
|
||||||
import { renderTriggerWords, setupTriggerWordsEditMode } from './TriggerWords.js';
|
import { renderTriggerWords, setupTriggerWordsEditMode } from './TriggerWords.js';
|
||||||
import { parsePresets, renderPresetTags } from './PresetTags.js';
|
import { parsePresets, renderPresetTags } from './PresetTags.js';
|
||||||
@@ -18,8 +22,6 @@ import {
|
|||||||
} from './ModelMetadata.js';
|
} from './ModelMetadata.js';
|
||||||
import { saveModelMetadata } from '../../api/loraApi.js';
|
import { saveModelMetadata } from '../../api/loraApi.js';
|
||||||
import { renderCompactTags, setupTagTooltip, formatFileSize } from './utils.js';
|
import { renderCompactTags, setupTagTooltip, formatFileSize } from './utils.js';
|
||||||
import { updateModelCard } from '../../utils/cardUpdater.js';
|
|
||||||
import { state } from '../../state/index.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 显示LoRA模型弹窗
|
* 显示LoRA模型弹窗
|
||||||
@@ -117,7 +119,7 @@ export function showLoraModal(lora) {
|
|||||||
<label>Additional Notes</label>
|
<label>Additional Notes</label>
|
||||||
<div class="editable-field">
|
<div class="editable-field">
|
||||||
<div class="notes-content" contenteditable="true" spellcheck="false">${lora.notes || 'Add your notes here...'}</div>
|
<div class="notes-content" contenteditable="true" spellcheck="false">${lora.notes || 'Add your notes here...'}</div>
|
||||||
<button class="save-btn" onclick="saveNotes('${lora.file_path}')">
|
<button class="save-btn" data-action="save-notes">
|
||||||
<i class="fas fa-save"></i>
|
<i class="fas fa-save"></i>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -129,7 +131,7 @@ export function showLoraModal(lora) {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="showcase-section" data-lora-id="${lora.civitai?.modelId || ''}">
|
<div class="showcase-section" data-model-hash="${lora.sha256 || ''}" data-filepath="${lora.file_path}">
|
||||||
<div class="showcase-tabs">
|
<div class="showcase-tabs">
|
||||||
<button class="tab-btn active" data-tab="showcase">Examples</button>
|
<button class="tab-btn active" data-tab="showcase">Examples</button>
|
||||||
<button class="tab-btn" data-tab="description">Model Description</button>
|
<button class="tab-btn" data-tab="description">Model Description</button>
|
||||||
@@ -161,7 +163,7 @@ export function showLoraModal(lora) {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<button class="back-to-top" onclick="scrollToTop(this)">
|
<button class="back-to-top" data-action="scroll-to-top">
|
||||||
<i class="fas fa-arrow-up"></i>
|
<i class="fas fa-arrow-up"></i>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -171,7 +173,7 @@ export function showLoraModal(lora) {
|
|||||||
|
|
||||||
modalManager.showModal('loraModal', content);
|
modalManager.showModal('loraModal', content);
|
||||||
setupEditableFields(lora.file_path);
|
setupEditableFields(lora.file_path);
|
||||||
setupShowcaseScroll();
|
setupShowcaseScroll('loraModal');
|
||||||
setupTabSwitching();
|
setupTabSwitching();
|
||||||
setupTagTooltip();
|
setupTagTooltip();
|
||||||
setupTriggerWordsEditMode();
|
setupTriggerWordsEditMode();
|
||||||
@@ -179,6 +181,7 @@ export function showLoraModal(lora) {
|
|||||||
setupBaseModelEditing(lora.file_path);
|
setupBaseModelEditing(lora.file_path);
|
||||||
setupFileNameEditing(lora.file_path);
|
setupFileNameEditing(lora.file_path);
|
||||||
setupTagEditMode(); // Initialize tag editing functionality
|
setupTagEditMode(); // Initialize tag editing functionality
|
||||||
|
setupEventHandlers(lora.file_path);
|
||||||
|
|
||||||
// If we have a model ID but no description, fetch it
|
// If we have a model ID but no description, fetch it
|
||||||
if (lora.civitai?.modelId && !lora.modelDescription) {
|
if (lora.civitai?.modelId && !lora.modelDescription) {
|
||||||
@@ -188,90 +191,49 @@ export function showLoraModal(lora) {
|
|||||||
// Load recipes for this Lora
|
// Load recipes for this Lora
|
||||||
loadRecipesForLora(lora.model_name, lora.sha256);
|
loadRecipesForLora(lora.model_name, lora.sha256);
|
||||||
|
|
||||||
// Load example images asynchronously
|
// Load example images asynchronously - merge regular and custom images
|
||||||
loadExampleImages(lora.civitai?.images, lora.sha256, lora.file_path);
|
const regularImages = lora.civitai?.images || [];
|
||||||
|
const customImages = lora.civitai?.customImages || [];
|
||||||
|
// Combine images - regular images first, then custom images
|
||||||
|
const allImages = [...regularImages, ...customImages];
|
||||||
|
loadExampleImages(allImages, lora.sha256);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Load example images asynchronously
|
* Sets up event handlers using event delegation
|
||||||
* @param {Array} images - Array of image objects
|
* @param {string} filePath - Path to the model file
|
||||||
* @param {string} modelHash - Model hash for fetching local files
|
|
||||||
* @param {string} filePath - File path for fetching local files
|
|
||||||
*/
|
*/
|
||||||
async function loadExampleImages(images, modelHash, filePath) {
|
function setupEventHandlers(filePath) {
|
||||||
try {
|
const modalElement = document.getElementById('loraModal');
|
||||||
const showcaseTab = document.getElementById('showcase-tab');
|
|
||||||
if (!showcaseTab) return;
|
// Use event delegation to handle clicks
|
||||||
|
modalElement.addEventListener('click', async (event) => {
|
||||||
|
const target = event.target.closest('[data-action]');
|
||||||
|
if (!target) return;
|
||||||
|
|
||||||
// First fetch local example files
|
const action = target.dataset.action;
|
||||||
let localFiles = [];
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Choose endpoint based on centralized examples setting
|
|
||||||
const useCentralized = state.global.settings.useCentralizedExamples !== false;
|
|
||||||
const endpoint = useCentralized ? '/api/example-image-files' : '/api/model-example-files';
|
|
||||||
|
|
||||||
// Use different params based on endpoint
|
|
||||||
const params = useCentralized ?
|
|
||||||
`model_hash=${modelHash}` :
|
|
||||||
`file_path=${encodeURIComponent(filePath)}`;
|
|
||||||
|
|
||||||
const response = await fetch(`${endpoint}?${params}`);
|
|
||||||
const result = await response.json();
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
localFiles = result.files;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to get example files:", error);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then render with both remote images and local files
|
switch (action) {
|
||||||
showcaseTab.innerHTML = renderShowcaseContent(images, localFiles);
|
case 'close-modal':
|
||||||
|
modalManager.closeModal('loraModal');
|
||||||
// Re-initialize the showcase event listeners
|
break;
|
||||||
const carousel = showcaseTab.querySelector('.carousel');
|
|
||||||
if (carousel) {
|
case 'save-notes':
|
||||||
// Only initialize if we actually have examples and they're expanded
|
await saveNotes(filePath);
|
||||||
if (!carousel.classList.contains('collapsed')) {
|
break;
|
||||||
initLazyLoading(carousel);
|
|
||||||
initNsfwBlurHandlers(carousel);
|
case 'scroll-to-top':
|
||||||
initMetadataPanelHandlers(carousel);
|
scrollToTop(target);
|
||||||
}
|
break;
|
||||||
}
|
}
|
||||||
} catch (error) {
|
});
|
||||||
console.error('Error loading example images:', error);
|
|
||||||
const showcaseTab = document.getElementById('showcase-tab');
|
|
||||||
if (showcaseTab) {
|
|
||||||
showcaseTab.innerHTML = `
|
|
||||||
<div class="error-message">
|
|
||||||
<i class="fas fa-exclamation-circle"></i>
|
|
||||||
Error loading example images
|
|
||||||
</div>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy file name function
|
async function saveNotes(filePath) {
|
||||||
window.copyFileName = async function(fileName) {
|
|
||||||
try {
|
|
||||||
await copyToClipboard(fileName, 'File name copied');
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Copy failed:', err);
|
|
||||||
showToast('Copy failed', 'error');
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add save note function
|
|
||||||
window.saveNotes = async function(filePath) {
|
|
||||||
const content = document.querySelector('.notes-content').textContent;
|
const content = document.querySelector('.notes-content').textContent;
|
||||||
try {
|
try {
|
||||||
await saveModelMetadata(filePath, { notes: content });
|
await saveModelMetadata(filePath, { notes: content });
|
||||||
|
|
||||||
// Update the corresponding lora card's dataset
|
|
||||||
updateModelCard(filePath, { notes: content });
|
|
||||||
|
|
||||||
showToast('Notes saved successfully', 'success');
|
showToast('Notes saved successfully', 'success');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
showToast('Failed to save notes', 'error');
|
showToast('Failed to save notes', 'error');
|
||||||
@@ -336,9 +298,6 @@ function setupEditableFields(filePath) {
|
|||||||
usage_tips: newPresetsJson
|
usage_tips: newPresetsJson
|
||||||
});
|
});
|
||||||
|
|
||||||
// Update the card with the new usage tips
|
|
||||||
updateModelCard(filePath, { usage_tips: newPresetsJson });
|
|
||||||
|
|
||||||
presetTags.innerHTML = renderPresetTags(currentPresets);
|
presetTags.innerHTML = renderPresetTags(currentPresets);
|
||||||
|
|
||||||
presetSelector.value = '';
|
presetSelector.value = '';
|
||||||
@@ -368,7 +327,4 @@ function setupEditableFields(filePath) {
|
|||||||
addPresetBtn.click();
|
addPresetBtn.click();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export functions for global access
|
|
||||||
export { toggleShowcase, scrollToTop };
|
|
||||||
94
static/js/components/shared/showcase/MediaRenderers.js
Normal file
94
static/js/components/shared/showcase/MediaRenderers.js
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
/**
|
||||||
|
* MediaRenderers.js
|
||||||
|
* HTML generators for media items (images/videos) in the showcase
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate video wrapper HTML
|
||||||
|
* @param {Object} media - Media metadata
|
||||||
|
* @param {number} heightPercent - Height percentage for container
|
||||||
|
* @param {boolean} shouldBlur - Whether content should be blurred
|
||||||
|
* @param {string} nsfwText - NSFW warning text
|
||||||
|
* @param {string} metadataPanel - Metadata panel HTML
|
||||||
|
* @param {string} localUrl - Local file URL
|
||||||
|
* @param {string} remoteUrl - Remote file URL
|
||||||
|
* @param {string} mediaControlsHtml - HTML for media control buttons
|
||||||
|
* @returns {string} HTML content
|
||||||
|
*/
|
||||||
|
export function generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl, mediaControlsHtml = '') {
|
||||||
|
const nsfwLevel = media.nsfwLevel !== undefined ? media.nsfwLevel : 0;
|
||||||
|
|
||||||
|
return `
|
||||||
|
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%" data-short-id="${media.id || ''}" data-nsfw-level="${nsfwLevel}">
|
||||||
|
${shouldBlur ? `
|
||||||
|
<button class="toggle-blur-btn showcase-toggle-btn" title="Toggle blur">
|
||||||
|
<i class="fas fa-eye"></i>
|
||||||
|
</button>
|
||||||
|
` : ''}
|
||||||
|
${mediaControlsHtml}
|
||||||
|
<video controls autoplay muted loop crossorigin="anonymous"
|
||||||
|
referrerpolicy="no-referrer"
|
||||||
|
data-local-src="${localUrl || ''}"
|
||||||
|
data-remote-src="${remoteUrl}"
|
||||||
|
data-nsfw-level="${nsfwLevel}"
|
||||||
|
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
||||||
|
<source data-local-src="${localUrl || ''}" data-remote-src="${remoteUrl}" type="video/mp4">
|
||||||
|
Your browser does not support video playback
|
||||||
|
</video>
|
||||||
|
${shouldBlur ? `
|
||||||
|
<div class="nsfw-overlay">
|
||||||
|
<div class="nsfw-warning">
|
||||||
|
<p>${nsfwText}</p>
|
||||||
|
<button class="show-content-btn">Show</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
` : ''}
|
||||||
|
${metadataPanel}
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate image wrapper HTML
|
||||||
|
* @param {Object} media - Media metadata
|
||||||
|
* @param {number} heightPercent - Height percentage for container
|
||||||
|
* @param {boolean} shouldBlur - Whether content should be blurred
|
||||||
|
* @param {string} nsfwText - NSFW warning text
|
||||||
|
* @param {string} metadataPanel - Metadata panel HTML
|
||||||
|
* @param {string} localUrl - Local file URL
|
||||||
|
* @param {string} remoteUrl - Remote file URL
|
||||||
|
* @param {string} mediaControlsHtml - HTML for media control buttons
|
||||||
|
* @returns {string} HTML content
|
||||||
|
*/
|
||||||
|
export function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl, mediaControlsHtml = '') {
|
||||||
|
const nsfwLevel = media.nsfwLevel !== undefined ? media.nsfwLevel : 0;
|
||||||
|
|
||||||
|
return `
|
||||||
|
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%" data-short-id="${media.id || ''}" data-nsfw-level="${nsfwLevel}">
|
||||||
|
${shouldBlur ? `
|
||||||
|
<button class="toggle-blur-btn showcase-toggle-btn" title="Toggle blur">
|
||||||
|
<i class="fas fa-eye"></i>
|
||||||
|
</button>
|
||||||
|
` : ''}
|
||||||
|
${mediaControlsHtml}
|
||||||
|
<img data-local-src="${localUrl || ''}"
|
||||||
|
data-remote-src="${remoteUrl}"
|
||||||
|
data-nsfw-level="${nsfwLevel}"
|
||||||
|
alt="Preview"
|
||||||
|
crossorigin="anonymous"
|
||||||
|
referrerpolicy="no-referrer"
|
||||||
|
width="${media.width}"
|
||||||
|
height="${media.height}"
|
||||||
|
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
||||||
|
${shouldBlur ? `
|
||||||
|
<div class="nsfw-overlay">
|
||||||
|
<div class="nsfw-warning">
|
||||||
|
<p>${nsfwText}</p>
|
||||||
|
<button class="show-content-btn">Show</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
` : ''}
|
||||||
|
${metadataPanel}
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
592
static/js/components/shared/showcase/MediaUtils.js
Normal file
592
static/js/components/shared/showcase/MediaUtils.js
Normal file
@@ -0,0 +1,592 @@
|
|||||||
|
/**
|
||||||
|
* MediaUtils.js
|
||||||
|
* Media-specific utility functions for showcase components
|
||||||
|
* (Moved from uiHelpers.js to better organize code)
|
||||||
|
*/
|
||||||
|
import { showToast, copyToClipboard } from '../../../utils/uiHelpers.js';
|
||||||
|
import { state } from '../../../state/index.js';
|
||||||
|
import { uploadPreview } from '../../../api/baseModelApi.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to load local image first, fall back to remote if local fails
|
||||||
|
* @param {HTMLImageElement} imgElement - The image element to update
|
||||||
|
* @param {Object} urls - Object with local URLs {primary, fallback} and remote URL
|
||||||
|
*/
|
||||||
|
export function tryLocalImageOrFallbackToRemote(imgElement, urls) {
|
||||||
|
const { primary: localUrl, fallback: fallbackUrl } = urls.local || {};
|
||||||
|
const remoteUrl = urls.remote;
|
||||||
|
|
||||||
|
// If no local options, use remote directly
|
||||||
|
if (!localUrl) {
|
||||||
|
imgElement.src = remoteUrl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try primary local URL
|
||||||
|
const testImg = new Image();
|
||||||
|
testImg.onload = () => {
|
||||||
|
// Primary local image loaded successfully
|
||||||
|
imgElement.src = localUrl;
|
||||||
|
};
|
||||||
|
testImg.onerror = () => {
|
||||||
|
// Try fallback URL if available
|
||||||
|
if (fallbackUrl) {
|
||||||
|
const fallbackImg = new Image();
|
||||||
|
fallbackImg.onload = () => {
|
||||||
|
imgElement.src = fallbackUrl;
|
||||||
|
};
|
||||||
|
fallbackImg.onerror = () => {
|
||||||
|
// Both local options failed, use remote
|
||||||
|
imgElement.src = remoteUrl;
|
||||||
|
};
|
||||||
|
fallbackImg.src = fallbackUrl;
|
||||||
|
} else {
|
||||||
|
// No fallback, use remote
|
||||||
|
imgElement.src = remoteUrl;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
testImg.src = localUrl;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to load local video first, fall back to remote if local fails
|
||||||
|
* @param {HTMLVideoElement} videoElement - The video element to update
|
||||||
|
* @param {Object} urls - Object with local URLs {primary} and remote URL
|
||||||
|
*/
|
||||||
|
export function tryLocalVideoOrFallbackToRemote(videoElement, urls) {
|
||||||
|
const { primary: localUrl } = urls.local || {};
|
||||||
|
const remoteUrl = urls.remote;
|
||||||
|
|
||||||
|
// Only try local if we have a local path
|
||||||
|
if (localUrl) {
|
||||||
|
// Try to fetch local file headers to see if it exists
|
||||||
|
fetch(localUrl, { method: 'HEAD' })
|
||||||
|
.then(response => {
|
||||||
|
if (response.ok) {
|
||||||
|
// Local video exists, use it
|
||||||
|
videoElement.src = localUrl;
|
||||||
|
const source = videoElement.querySelector('source');
|
||||||
|
if (source) source.src = localUrl;
|
||||||
|
} else {
|
||||||
|
// Local video doesn't exist, use remote
|
||||||
|
videoElement.src = remoteUrl;
|
||||||
|
const source = videoElement.querySelector('source');
|
||||||
|
if (source) source.src = remoteUrl;
|
||||||
|
}
|
||||||
|
videoElement.load();
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
// Error fetching, use remote
|
||||||
|
videoElement.src = remoteUrl;
|
||||||
|
const source = videoElement.querySelector('source');
|
||||||
|
if (source) source.src = remoteUrl;
|
||||||
|
videoElement.load();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// No local path, use remote directly
|
||||||
|
videoElement.src = remoteUrl;
|
||||||
|
const source = videoElement.querySelector('source');
|
||||||
|
if (source) source.src = remoteUrl;
|
||||||
|
videoElement.load();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize lazy loading for images and videos in a container
|
||||||
|
* @param {HTMLElement} container - The container with lazy-loadable elements
|
||||||
|
*/
|
||||||
|
export function initLazyLoading(container) {
|
||||||
|
const lazyElements = container.querySelectorAll('.lazy');
|
||||||
|
|
||||||
|
const lazyLoad = (element) => {
|
||||||
|
// Get URLs from data attributes
|
||||||
|
const localUrls = {
|
||||||
|
primary: element.dataset.localSrc || null,
|
||||||
|
fallback: element.dataset.localFallbackSrc || null
|
||||||
|
};
|
||||||
|
const remoteUrl = element.dataset.remoteSrc;
|
||||||
|
|
||||||
|
const urls = {
|
||||||
|
local: localUrls,
|
||||||
|
remote: remoteUrl
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check if element is a video or image
|
||||||
|
if (element.tagName.toLowerCase() === 'video') {
|
||||||
|
tryLocalVideoOrFallbackToRemote(element, urls);
|
||||||
|
} else {
|
||||||
|
tryLocalImageOrFallbackToRemote(element, urls);
|
||||||
|
}
|
||||||
|
|
||||||
|
element.classList.remove('lazy');
|
||||||
|
};
|
||||||
|
|
||||||
|
const observer = new IntersectionObserver((entries) => {
|
||||||
|
entries.forEach(entry => {
|
||||||
|
if (entry.isIntersecting) {
|
||||||
|
lazyLoad(entry.target);
|
||||||
|
observer.unobserve(entry.target);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
lazyElements.forEach(element => observer.observe(element));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the actual rendered rectangle of a media element with object-fit: contain
|
||||||
|
* @param {HTMLElement} mediaElement - The img or video element
|
||||||
|
* @param {number} containerWidth - Width of the container
|
||||||
|
* @param {number} containerHeight - Height of the container
|
||||||
|
* @returns {Object} - Rect with left, top, right, bottom coordinates
|
||||||
|
*/
|
||||||
|
export function getRenderedMediaRect(mediaElement, containerWidth, containerHeight) {
|
||||||
|
// Get natural dimensions of the media
|
||||||
|
const naturalWidth = mediaElement.naturalWidth || mediaElement.videoWidth || mediaElement.clientWidth;
|
||||||
|
const naturalHeight = mediaElement.naturalHeight || mediaElement.videoHeight || mediaElement.clientHeight;
|
||||||
|
|
||||||
|
if (!naturalWidth || !naturalHeight) {
|
||||||
|
// Fallback if dimensions cannot be determined
|
||||||
|
return { left: 0, top: 0, right: containerWidth, bottom: containerHeight };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate aspect ratios
|
||||||
|
const containerRatio = containerWidth / containerHeight;
|
||||||
|
const mediaRatio = naturalWidth / naturalHeight;
|
||||||
|
|
||||||
|
let renderedWidth, renderedHeight, left = 0, top = 0;
|
||||||
|
|
||||||
|
// Apply object-fit: contain logic
|
||||||
|
if (containerRatio > mediaRatio) {
|
||||||
|
// Container is wider than media - will have empty space on sides
|
||||||
|
renderedHeight = containerHeight;
|
||||||
|
renderedWidth = renderedHeight * mediaRatio;
|
||||||
|
left = (containerWidth - renderedWidth) / 2;
|
||||||
|
} else {
|
||||||
|
// Container is taller than media - will have empty space top/bottom
|
||||||
|
renderedWidth = containerWidth;
|
||||||
|
renderedHeight = renderedWidth / mediaRatio;
|
||||||
|
top = (containerHeight - renderedHeight) / 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
left,
|
||||||
|
top,
|
||||||
|
right: left + renderedWidth,
|
||||||
|
bottom: top + renderedHeight
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize metadata panel interaction handlers
|
||||||
|
* @param {HTMLElement} container - Container element with media wrappers
|
||||||
|
*/
|
||||||
|
export function initMetadataPanelHandlers(container) {
|
||||||
|
const mediaWrappers = container.querySelectorAll('.media-wrapper');
|
||||||
|
|
||||||
|
mediaWrappers.forEach(wrapper => {
|
||||||
|
// Get the metadata panel and media element (img or video)
|
||||||
|
const metadataPanel = wrapper.querySelector('.image-metadata-panel');
|
||||||
|
const mediaControls = wrapper.querySelector('.media-controls');
|
||||||
|
const mediaElement = wrapper.querySelector('img, video');
|
||||||
|
|
||||||
|
if (!mediaElement) return;
|
||||||
|
|
||||||
|
let isOverMetadataPanel = false;
|
||||||
|
|
||||||
|
// Add event listeners to the wrapper for mouse tracking
|
||||||
|
wrapper.addEventListener('mousemove', (e) => {
|
||||||
|
// Get mouse position relative to wrapper
|
||||||
|
const rect = wrapper.getBoundingClientRect();
|
||||||
|
const mouseX = e.clientX - rect.left;
|
||||||
|
const mouseY = e.clientY - rect.top;
|
||||||
|
|
||||||
|
// Get the actual displayed dimensions of the media element
|
||||||
|
const mediaRect = getRenderedMediaRect(mediaElement, rect.width, rect.height);
|
||||||
|
|
||||||
|
// Check if mouse is over the actual media content
|
||||||
|
const isOverMedia = (
|
||||||
|
mouseX >= mediaRect.left &&
|
||||||
|
mouseX <= mediaRect.right &&
|
||||||
|
mouseY >= mediaRect.top &&
|
||||||
|
mouseY <= mediaRect.bottom
|
||||||
|
);
|
||||||
|
|
||||||
|
// Show metadata panel and controls when over media content or metadata panel itself
|
||||||
|
if (isOverMedia || isOverMetadataPanel) {
|
||||||
|
if (metadataPanel) metadataPanel.classList.add('visible');
|
||||||
|
if (mediaControls) mediaControls.classList.add('visible');
|
||||||
|
} else {
|
||||||
|
if (metadataPanel) metadataPanel.classList.remove('visible');
|
||||||
|
if (mediaControls) mediaControls.classList.remove('visible');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
wrapper.addEventListener('mouseleave', () => {
|
||||||
|
if (!isOverMetadataPanel) {
|
||||||
|
if (metadataPanel) metadataPanel.classList.remove('visible');
|
||||||
|
if (mediaControls) mediaControls.classList.remove('visible');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add mouse enter/leave events for the metadata panel itself
|
||||||
|
if (metadataPanel) {
|
||||||
|
metadataPanel.addEventListener('mouseenter', () => {
|
||||||
|
isOverMetadataPanel = true;
|
||||||
|
metadataPanel.classList.add('visible');
|
||||||
|
if (mediaControls) mediaControls.classList.add('visible');
|
||||||
|
});
|
||||||
|
|
||||||
|
metadataPanel.addEventListener('mouseleave', () => {
|
||||||
|
isOverMetadataPanel = false;
|
||||||
|
// Only hide if mouse is not over the media
|
||||||
|
const rect = wrapper.getBoundingClientRect();
|
||||||
|
const mediaRect = getRenderedMediaRect(mediaElement, rect.width, rect.height);
|
||||||
|
const mouseX = event.clientX - rect.left;
|
||||||
|
const mouseY = event.clientY - rect.top;
|
||||||
|
|
||||||
|
const isOverMedia = (
|
||||||
|
mouseX >= mediaRect.left &&
|
||||||
|
mouseX <= mediaRect.right &&
|
||||||
|
mouseY >= mediaRect.top &&
|
||||||
|
mouseY <= mediaRect.bottom
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!isOverMedia) {
|
||||||
|
metadataPanel.classList.remove('visible');
|
||||||
|
if (mediaControls) mediaControls.classList.remove('visible');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Prevent events from bubbling
|
||||||
|
metadataPanel.addEventListener('click', (e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle copy prompt buttons
|
||||||
|
const copyBtns = metadataPanel.querySelectorAll('.copy-prompt-btn');
|
||||||
|
copyBtns.forEach(copyBtn => {
|
||||||
|
const promptIndex = copyBtn.dataset.promptIndex;
|
||||||
|
const promptElement = wrapper.querySelector(`#prompt-${promptIndex}`);
|
||||||
|
|
||||||
|
copyBtn.addEventListener('click', async (e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
|
||||||
|
if (!promptElement) return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await copyToClipboard(promptElement.textContent, 'Prompt copied to clipboard');
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Copy failed:', err);
|
||||||
|
showToast('Copy failed', 'error');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Prevent panel scroll from causing modal scroll
|
||||||
|
metadataPanel.addEventListener('wheel', (e) => {
|
||||||
|
const isAtTop = metadataPanel.scrollTop === 0;
|
||||||
|
const isAtBottom = metadataPanel.scrollHeight - metadataPanel.scrollTop === metadataPanel.clientHeight;
|
||||||
|
|
||||||
|
// Only prevent default if scrolling would cause the panel to scroll
|
||||||
|
if ((e.deltaY < 0 && !isAtTop) || (e.deltaY > 0 && !isAtBottom)) {
|
||||||
|
e.stopPropagation();
|
||||||
|
}
|
||||||
|
}, { passive: true });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize NSFW content blur toggle handlers
|
||||||
|
* @param {HTMLElement} container - Container element with media wrappers
|
||||||
|
*/
|
||||||
|
export function initNsfwBlurHandlers(container) {
|
||||||
|
// Handle toggle blur buttons
|
||||||
|
const toggleButtons = container.querySelectorAll('.toggle-blur-btn');
|
||||||
|
toggleButtons.forEach(btn => {
|
||||||
|
btn.addEventListener('click', (e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
const wrapper = btn.closest('.media-wrapper');
|
||||||
|
const media = wrapper.querySelector('img, video');
|
||||||
|
const isBlurred = media.classList.toggle('blurred');
|
||||||
|
const icon = btn.querySelector('i');
|
||||||
|
|
||||||
|
// Update the icon based on blur state
|
||||||
|
if (isBlurred) {
|
||||||
|
icon.className = 'fas fa-eye';
|
||||||
|
} else {
|
||||||
|
icon.className = 'fas fa-eye-slash';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle the overlay visibility
|
||||||
|
const overlay = wrapper.querySelector('.nsfw-overlay');
|
||||||
|
if (overlay) {
|
||||||
|
overlay.style.display = isBlurred ? 'flex' : 'none';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle "Show" buttons in overlays
|
||||||
|
const showButtons = container.querySelectorAll('.show-content-btn');
|
||||||
|
showButtons.forEach(btn => {
|
||||||
|
btn.addEventListener('click', (e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
const wrapper = btn.closest('.media-wrapper');
|
||||||
|
const media = wrapper.querySelector('img, video');
|
||||||
|
media.classList.remove('blurred');
|
||||||
|
|
||||||
|
// Update the toggle button icon
|
||||||
|
const toggleBtn = wrapper.querySelector('.toggle-blur-btn');
|
||||||
|
if (toggleBtn) {
|
||||||
|
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hide the overlay
|
||||||
|
const overlay = wrapper.querySelector('.nsfw-overlay');
|
||||||
|
if (overlay) {
|
||||||
|
overlay.style.display = 'none';
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize media control buttons event handlers
|
||||||
|
* @param {HTMLElement} container - Container with media wrappers
|
||||||
|
*/
|
||||||
|
export function initMediaControlHandlers(container) {
|
||||||
|
// Find all delete buttons in the container
|
||||||
|
const deleteButtons = container.querySelectorAll('.example-delete-btn');
|
||||||
|
|
||||||
|
deleteButtons.forEach(btn => {
|
||||||
|
// Set initial state
|
||||||
|
btn.dataset.state = 'initial';
|
||||||
|
|
||||||
|
btn.addEventListener('click', async function(e) {
|
||||||
|
e.stopPropagation();
|
||||||
|
|
||||||
|
// Explicitly check for disabled state
|
||||||
|
if (this.classList.contains('disabled')) {
|
||||||
|
return; // Don't do anything if button is disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
const shortId = this.dataset.shortId;
|
||||||
|
const btnState = this.dataset.state;
|
||||||
|
|
||||||
|
if (!shortId) return;
|
||||||
|
|
||||||
|
// Handle two-step confirmation
|
||||||
|
if (btnState === 'initial') {
|
||||||
|
// First click: show confirmation state
|
||||||
|
this.dataset.state = 'confirm';
|
||||||
|
this.classList.add('confirm');
|
||||||
|
this.title = 'Click again to confirm deletion';
|
||||||
|
|
||||||
|
// Auto-reset after 3 seconds
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.dataset.state === 'confirm') {
|
||||||
|
this.dataset.state = 'initial';
|
||||||
|
this.classList.remove('confirm');
|
||||||
|
this.title = 'Delete this example';
|
||||||
|
}
|
||||||
|
}, 3000);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second click within 3 seconds: proceed with deletion
|
||||||
|
if (btnState === 'confirm') {
|
||||||
|
this.disabled = true;
|
||||||
|
this.classList.remove('confirm');
|
||||||
|
this.innerHTML = '<i class="fas fa-spinner fa-spin"></i>';
|
||||||
|
|
||||||
|
// Get model hash from URL or data attribute
|
||||||
|
const mediaWrapper = this.closest('.media-wrapper');
|
||||||
|
const modelHashAttr = document.querySelector('.showcase-section')?.dataset;
|
||||||
|
const modelHash = modelHashAttr?.modelHash;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Call the API to delete the custom example
|
||||||
|
const response = await fetch('/api/delete-example-image', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
},
|
||||||
|
body: JSON.stringify({
|
||||||
|
model_hash: modelHash,
|
||||||
|
short_id: shortId
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
// Success: remove the media wrapper from the DOM
|
||||||
|
mediaWrapper.style.opacity = '0';
|
||||||
|
mediaWrapper.style.height = '0';
|
||||||
|
mediaWrapper.style.transition = 'opacity 0.3s ease, height 0.3s ease 0.3s';
|
||||||
|
|
||||||
|
setTimeout(() => {
|
||||||
|
mediaWrapper.remove();
|
||||||
|
}, 600);
|
||||||
|
|
||||||
|
// Show success toast
|
||||||
|
showToast('Example image deleted', 'success');
|
||||||
|
|
||||||
|
// Create an update object with only the necessary properties
|
||||||
|
const updateData = {
|
||||||
|
civitai: {
|
||||||
|
customImages: result.custom_images || []
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Update the item in the virtual scroller
|
||||||
|
state.virtualScroller.updateSingleItem(result.model_file_path, updateData);
|
||||||
|
} else {
|
||||||
|
// Show error message
|
||||||
|
showToast(result.error || 'Failed to delete example image', 'error');
|
||||||
|
|
||||||
|
// Reset button state
|
||||||
|
this.disabled = false;
|
||||||
|
this.dataset.state = 'initial';
|
||||||
|
this.classList.remove('confirm');
|
||||||
|
this.innerHTML = '<i class="fas fa-trash-alt"></i>';
|
||||||
|
this.title = 'Delete this example';
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error deleting example image:', error);
|
||||||
|
showToast('Failed to delete example image', 'error');
|
||||||
|
|
||||||
|
// Reset button state
|
||||||
|
this.disabled = false;
|
||||||
|
this.dataset.state = 'initial';
|
||||||
|
this.classList.remove('confirm');
|
||||||
|
this.innerHTML = '<i class="fas fa-trash-alt"></i>';
|
||||||
|
this.title = 'Delete this example';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initialize set preview buttons
|
||||||
|
initSetPreviewHandlers(container);
|
||||||
|
|
||||||
|
// Media control visibility is now handled in initMetadataPanelHandlers
|
||||||
|
// Any click handlers or other functionality can still be added here
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize set preview button handlers
|
||||||
|
* @param {HTMLElement} container - Container with media wrappers
|
||||||
|
*/
|
||||||
|
function initSetPreviewHandlers(container) {
|
||||||
|
const previewButtons = container.querySelectorAll('.set-preview-btn');
|
||||||
|
const modelType = state.currentPageType == 'loras' ? 'lora' : 'checkpoint';
|
||||||
|
|
||||||
|
previewButtons.forEach(btn => {
|
||||||
|
btn.addEventListener('click', async function(e) {
|
||||||
|
e.stopPropagation();
|
||||||
|
|
||||||
|
// Show loading state
|
||||||
|
this.innerHTML = '<i class="fas fa-spinner fa-spin"></i>';
|
||||||
|
this.disabled = true;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get the model file path from showcase section data attribute
|
||||||
|
const showcaseSection = document.querySelector('.showcase-section');
|
||||||
|
const modelHash = showcaseSection?.dataset.modelHash;
|
||||||
|
const modelFilePath = showcaseSection?.dataset.filepath;
|
||||||
|
|
||||||
|
if (!modelFilePath) {
|
||||||
|
throw new Error('Could not determine model file path');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the media wrapper and media element
|
||||||
|
const mediaWrapper = this.closest('.media-wrapper');
|
||||||
|
const mediaElement = mediaWrapper.querySelector('img, video');
|
||||||
|
|
||||||
|
if (!mediaElement) {
|
||||||
|
throw new Error('Media element not found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get NSFW level from the wrapper or media element
|
||||||
|
const nsfwLevel = parseInt(mediaWrapper.dataset.nsfwLevel || mediaElement.dataset.nsfwLevel || '0', 10);
|
||||||
|
|
||||||
|
// Get local file path if available
|
||||||
|
const useLocalFile = mediaElement.dataset.localSrc && !mediaElement.dataset.localSrc.includes('undefined');
|
||||||
|
|
||||||
|
if (useLocalFile) {
|
||||||
|
// We have a local file, use it directly
|
||||||
|
const response = await fetch(mediaElement.dataset.localSrc);
|
||||||
|
const blob = await response.blob();
|
||||||
|
const file = new File([blob], 'preview.jpg', { type: blob.type });
|
||||||
|
|
||||||
|
// Use the existing baseModelApi uploadPreview method with nsfw level
|
||||||
|
await uploadPreview(modelFilePath, file, modelType, nsfwLevel);
|
||||||
|
} else {
|
||||||
|
// We need to download the remote file first
|
||||||
|
const response = await fetch(mediaElement.src);
|
||||||
|
const blob = await response.blob();
|
||||||
|
const file = new File([blob], 'preview.jpg', { type: blob.type });
|
||||||
|
|
||||||
|
// Use the existing baseModelApi uploadPreview method with nsfw level
|
||||||
|
await uploadPreview(modelFilePath, file, modelType, nsfwLevel);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error setting preview:', error);
|
||||||
|
showToast('Failed to set preview image', 'error');
|
||||||
|
} finally {
|
||||||
|
// Restore button state
|
||||||
|
this.innerHTML = '<i class="fas fa-image"></i>';
|
||||||
|
this.disabled = false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Position media controls within the actual rendered media rectangle
|
||||||
|
* @param {HTMLElement} mediaWrapper - The wrapper containing the media and controls
|
||||||
|
*/
|
||||||
|
export function positionMediaControlsInMediaRect(mediaWrapper) {
|
||||||
|
const mediaElement = mediaWrapper.querySelector('img, video');
|
||||||
|
const controlsElement = mediaWrapper.querySelector('.media-controls');
|
||||||
|
|
||||||
|
if (!mediaElement || !controlsElement) return;
|
||||||
|
|
||||||
|
// Get wrapper dimensions
|
||||||
|
const wrapperRect = mediaWrapper.getBoundingClientRect();
|
||||||
|
|
||||||
|
// Calculate the actual rendered media rectangle
|
||||||
|
const mediaRect = getRenderedMediaRect(
|
||||||
|
mediaElement,
|
||||||
|
wrapperRect.width,
|
||||||
|
wrapperRect.height
|
||||||
|
);
|
||||||
|
|
||||||
|
// Calculate the position for controls - place them inside the actual media area
|
||||||
|
const padding = 8; // Padding from the edge of the media
|
||||||
|
|
||||||
|
// Position at top-right inside the actual media rectangle
|
||||||
|
controlsElement.style.top = `${mediaRect.top + padding}px`;
|
||||||
|
controlsElement.style.right = `${wrapperRect.width - mediaRect.right + padding}px`;
|
||||||
|
|
||||||
|
// Also position any toggle blur buttons in the same way but on the left
|
||||||
|
const toggleBlurBtn = mediaWrapper.querySelector('.toggle-blur-btn');
|
||||||
|
if (toggleBlurBtn) {
|
||||||
|
toggleBlurBtn.style.top = `${mediaRect.top + padding}px`;
|
||||||
|
toggleBlurBtn.style.left = `${mediaRect.left + padding}px`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Position all media controls in a container
|
||||||
|
* @param {HTMLElement} container - Container with media wrappers
|
||||||
|
*/
|
||||||
|
export function positionAllMediaControls(container) {
|
||||||
|
const mediaWrappers = container.querySelectorAll('.media-wrapper');
|
||||||
|
mediaWrappers.forEach(wrapper => {
|
||||||
|
positionMediaControlsInMediaRect(wrapper);
|
||||||
|
});
|
||||||
|
}
|
||||||
83
static/js/components/shared/showcase/MetadataPanel.js
Normal file
83
static/js/components/shared/showcase/MetadataPanel.js
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
/**
|
||||||
|
* MetadataPanel.js
|
||||||
|
* Generates metadata panels for showcase media items
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate metadata panel HTML
|
||||||
|
* @param {boolean} hasParams - Whether there are generation parameters
|
||||||
|
* @param {boolean} hasPrompts - Whether there are prompts
|
||||||
|
* @param {string} prompt - Prompt text
|
||||||
|
* @param {string} negativePrompt - Negative prompt text
|
||||||
|
* @param {string} size - Image size
|
||||||
|
* @param {string} seed - Generation seed
|
||||||
|
* @param {string} model - Model used
|
||||||
|
* @param {string} steps - Steps used
|
||||||
|
* @param {string} sampler - Sampler used
|
||||||
|
* @param {string} cfgScale - CFG scale
|
||||||
|
* @param {string} clipSkip - Clip skip value
|
||||||
|
* @returns {string} HTML content
|
||||||
|
*/
|
||||||
|
export function generateMetadataPanel(hasParams, hasPrompts, prompt, negativePrompt, size, seed, model, steps, sampler, cfgScale, clipSkip) {
|
||||||
|
// Create unique IDs for prompt copying
|
||||||
|
const promptIndex = Math.random().toString(36).substring(2, 15);
|
||||||
|
const negPromptIndex = Math.random().toString(36).substring(2, 15);
|
||||||
|
|
||||||
|
let content = '<div class="image-metadata-panel"><div class="metadata-content">';
|
||||||
|
|
||||||
|
if (hasParams) {
|
||||||
|
content += `
|
||||||
|
<div class="params-tags">
|
||||||
|
${size ? `<div class="param-tag"><span class="param-name">Size:</span><span class="param-value">${size}</span></div>` : ''}
|
||||||
|
${seed ? `<div class="param-tag"><span class="param-name">Seed:</span><span class="param-value">${seed}</span></div>` : ''}
|
||||||
|
${model ? `<div class="param-tag"><span class="param-name">Model:</span><span class="param-value">${model}</span></div>` : ''}
|
||||||
|
${steps ? `<div class="param-tag"><span class="param-name">Steps:</span><span class="param-value">${steps}</span></div>` : ''}
|
||||||
|
${sampler ? `<div class="param-tag"><span class="param-name">Sampler:</span><span class="param-value">${sampler}</span></div>` : ''}
|
||||||
|
${cfgScale ? `<div class="param-tag"><span class="param-name">CFG:</span><span class="param-value">${cfgScale}</span></div>` : ''}
|
||||||
|
${clipSkip ? `<div class="param-tag"><span class="param-name">Clip Skip:</span><span class="param-value">${clipSkip}</span></div>` : ''}
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hasParams && !hasPrompts) {
|
||||||
|
content += `
|
||||||
|
<div class="no-metadata-message">
|
||||||
|
<i class="fas fa-info-circle"></i>
|
||||||
|
<span>No generation parameters available</span>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prompt) {
|
||||||
|
content += `
|
||||||
|
<div class="metadata-row prompt-row">
|
||||||
|
<span class="metadata-label">Prompt:</span>
|
||||||
|
<div class="metadata-prompt-wrapper">
|
||||||
|
<div class="metadata-prompt">${prompt}</div>
|
||||||
|
<button class="copy-prompt-btn" data-prompt-index="${promptIndex}">
|
||||||
|
<i class="fas fa-copy"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="hidden-prompt" id="prompt-${promptIndex}" style="display:none;">${prompt}</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (negativePrompt) {
|
||||||
|
content += `
|
||||||
|
<div class="metadata-row prompt-row">
|
||||||
|
<span class="metadata-label">Negative Prompt:</span>
|
||||||
|
<div class="metadata-prompt-wrapper">
|
||||||
|
<div class="metadata-prompt">${negativePrompt}</div>
|
||||||
|
<button class="copy-prompt-btn" data-prompt-index="${negPromptIndex}">
|
||||||
|
<i class="fas fa-copy"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="hidden-prompt" id="prompt-${negPromptIndex}" style="display:none;">${negativePrompt}</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
content += '</div></div>';
|
||||||
|
return content;
|
||||||
|
}
|
||||||
586
static/js/components/shared/showcase/ShowcaseView.js
Normal file
586
static/js/components/shared/showcase/ShowcaseView.js
Normal file
@@ -0,0 +1,586 @@
|
|||||||
|
/**
|
||||||
|
* ShowcaseView.js
|
||||||
|
* Shared showcase component for displaying examples in model modals (Lora/Checkpoint)
|
||||||
|
*/
|
||||||
|
import { showToast } from '../../../utils/uiHelpers.js';
|
||||||
|
import { state } from '../../../state/index.js';
|
||||||
|
import { NSFW_LEVELS } from '../../../utils/constants.js';
|
||||||
|
import {
|
||||||
|
initLazyLoading,
|
||||||
|
initNsfwBlurHandlers,
|
||||||
|
initMetadataPanelHandlers,
|
||||||
|
initMediaControlHandlers,
|
||||||
|
positionAllMediaControls
|
||||||
|
} from './MediaUtils.js';
|
||||||
|
import { generateMetadataPanel } from './MetadataPanel.js';
|
||||||
|
import { generateImageWrapper, generateVideoWrapper } from './MediaRenderers.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load example images asynchronously
|
||||||
|
* @param {Array} images - Array of image objects (both regular and custom)
|
||||||
|
* @param {string} modelHash - Model hash for fetching local files
|
||||||
|
*/
|
||||||
|
export async function loadExampleImages(images, modelHash) {
|
||||||
|
try {
|
||||||
|
const showcaseTab = document.getElementById('showcase-tab');
|
||||||
|
if (!showcaseTab) return;
|
||||||
|
|
||||||
|
// First fetch local example files
|
||||||
|
let localFiles = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const endpoint = '/api/example-image-files';
|
||||||
|
const params = `model_hash=${modelHash}`;
|
||||||
|
|
||||||
|
const response = await fetch(`${endpoint}?${params}`);
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
localFiles = result.files;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to get example files:", error);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then render with both remote images and local files
|
||||||
|
showcaseTab.innerHTML = renderShowcaseContent(images, localFiles);
|
||||||
|
|
||||||
|
// Re-initialize the showcase event listeners
|
||||||
|
const carousel = showcaseTab.querySelector('.carousel');
|
||||||
|
if (carousel && !carousel.classList.contains('collapsed')) {
|
||||||
|
initShowcaseContent(carousel);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the example import functionality
|
||||||
|
initExampleImport(modelHash, showcaseTab);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error loading example images:', error);
|
||||||
|
const showcaseTab = document.getElementById('showcase-tab');
|
||||||
|
if (showcaseTab) {
|
||||||
|
showcaseTab.innerHTML = `
|
||||||
|
<div class="error-message">
|
||||||
|
<i class="fas fa-exclamation-circle"></i>
|
||||||
|
Error loading example images
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render showcase content
|
||||||
|
* @param {Array} images - Array of images/videos to show
|
||||||
|
* @param {Array} exampleFiles - Local example files
|
||||||
|
* @param {Object} options - Options for rendering
|
||||||
|
* @returns {string} HTML content
|
||||||
|
*/
|
||||||
|
export function renderShowcaseContent(images, exampleFiles = []) {
|
||||||
|
if (!images?.length) {
|
||||||
|
// Show empty state with import interface
|
||||||
|
return renderImportInterface(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter images based on SFW setting
|
||||||
|
const showOnlySFW = state.settings.show_only_sfw;
|
||||||
|
let filteredImages = images;
|
||||||
|
let hiddenCount = 0;
|
||||||
|
|
||||||
|
if (showOnlySFW) {
|
||||||
|
filteredImages = images.filter(img => {
|
||||||
|
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
|
||||||
|
const isSfw = nsfwLevel < NSFW_LEVELS.R;
|
||||||
|
if (!isSfw) hiddenCount++;
|
||||||
|
return isSfw;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show message if no images are available after filtering
|
||||||
|
if (filteredImages.length === 0) {
|
||||||
|
return `
|
||||||
|
<div class="no-examples">
|
||||||
|
<p>All example images are filtered due to NSFW content settings</p>
|
||||||
|
<p class="nsfw-filter-info">Your settings are currently set to show only safe-for-work content</p>
|
||||||
|
<p>You can change this in Settings <i class="fas fa-cog"></i></p>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show hidden content notification if applicable
|
||||||
|
const hiddenNotification = hiddenCount > 0 ?
|
||||||
|
`<div class="nsfw-filter-notification">
|
||||||
|
<i class="fas fa-eye-slash"></i> ${hiddenCount} ${hiddenCount === 1 ? 'image' : 'images'} hidden due to SFW-only setting
|
||||||
|
</div>` : '';
|
||||||
|
|
||||||
|
return `
|
||||||
|
<div class="scroll-indicator" onclick="toggleShowcase(this)">
|
||||||
|
<i class="fas fa-chevron-down"></i>
|
||||||
|
<span>Scroll or click to show ${filteredImages.length} examples</span>
|
||||||
|
</div>
|
||||||
|
<div class="carousel collapsed">
|
||||||
|
${hiddenNotification}
|
||||||
|
<div class="carousel-container">
|
||||||
|
${filteredImages.map((img, index) => renderMediaItem(img, index, exampleFiles)).join('')}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
${renderImportInterface(false)}
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render a single media item (image or video)
|
||||||
|
* @param {Object} img - Image/video metadata
|
||||||
|
* @param {number} index - Index in the array
|
||||||
|
* @param {Array} exampleFiles - Local files
|
||||||
|
* @returns {string} HTML for the media item
|
||||||
|
*/
|
||||||
|
function renderMediaItem(img, index, exampleFiles) {
|
||||||
|
// Find matching file in our list of actual files
|
||||||
|
let localFile = findLocalFile(img, index, exampleFiles);
|
||||||
|
|
||||||
|
const remoteUrl = img.url || '';
|
||||||
|
const localUrl = localFile ? localFile.path : '';
|
||||||
|
const isVideo = localFile ? localFile.is_video :
|
||||||
|
remoteUrl.endsWith('.mp4') || remoteUrl.endsWith('.webm');
|
||||||
|
|
||||||
|
// Calculate appropriate aspect ratio
|
||||||
|
const aspectRatio = (img.height / img.width) * 100;
|
||||||
|
const containerWidth = 800; // modal content maximum width
|
||||||
|
const minHeightPercent = 40;
|
||||||
|
const maxHeightPercent = (window.innerHeight * 0.6 / containerWidth) * 100;
|
||||||
|
const heightPercent = Math.max(
|
||||||
|
minHeightPercent,
|
||||||
|
Math.min(maxHeightPercent, aspectRatio)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check if media should be blurred
|
||||||
|
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
|
||||||
|
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
|
||||||
|
|
||||||
|
// Determine NSFW warning text based on level
|
||||||
|
let nsfwText = "Mature Content";
|
||||||
|
if (nsfwLevel >= NSFW_LEVELS.XXX) {
|
||||||
|
nsfwText = "XXX-rated Content";
|
||||||
|
} else if (nsfwLevel >= NSFW_LEVELS.X) {
|
||||||
|
nsfwText = "X-rated Content";
|
||||||
|
} else if (nsfwLevel >= NSFW_LEVELS.R) {
|
||||||
|
nsfwText = "R-rated Content";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract metadata from the image
|
||||||
|
const meta = img.meta || {};
|
||||||
|
const prompt = meta.prompt || '';
|
||||||
|
const negativePrompt = meta.negative_prompt || meta.negativePrompt || '';
|
||||||
|
const size = meta.Size || `${img.width}x${img.height}`;
|
||||||
|
const seed = meta.seed || '';
|
||||||
|
const model = meta.Model || '';
|
||||||
|
const steps = meta.steps || '';
|
||||||
|
const sampler = meta.sampler || '';
|
||||||
|
const cfgScale = meta.cfgScale || '';
|
||||||
|
const clipSkip = meta.clipSkip || '';
|
||||||
|
|
||||||
|
// Check if we have any meaningful generation parameters
|
||||||
|
const hasParams = seed || model || steps || sampler || cfgScale || clipSkip;
|
||||||
|
const hasPrompts = prompt || negativePrompt;
|
||||||
|
|
||||||
|
// Create metadata panel content
|
||||||
|
const metadataPanel = generateMetadataPanel(
|
||||||
|
hasParams, hasPrompts,
|
||||||
|
prompt, negativePrompt,
|
||||||
|
size, seed, model, steps, sampler, cfgScale, clipSkip
|
||||||
|
);
|
||||||
|
|
||||||
|
// Determine if this is a custom image (has id property)
|
||||||
|
const isCustomImage = Boolean(img.id);
|
||||||
|
|
||||||
|
// Create the media control buttons HTML
|
||||||
|
const mediaControlsHtml = `
|
||||||
|
<div class="media-controls">
|
||||||
|
<button class="media-control-btn set-preview-btn" title="Set as preview">
|
||||||
|
<i class="fas fa-image"></i>
|
||||||
|
</button>
|
||||||
|
<button class="media-control-btn example-delete-btn ${!isCustomImage ? 'disabled' : ''}"
|
||||||
|
title="${isCustomImage ? 'Delete this example' : 'Only custom images can be deleted'}"
|
||||||
|
data-short-id="${img.id || ''}"
|
||||||
|
${!isCustomImage ? 'disabled' : ''}>
|
||||||
|
<i class="fas fa-trash-alt"></i>
|
||||||
|
<i class="fas fa-check confirm-icon"></i>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
|
||||||
|
// Generate the appropriate wrapper based on media type
|
||||||
|
if (isVideo) {
|
||||||
|
return generateVideoWrapper(
|
||||||
|
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
|
||||||
|
localUrl, remoteUrl, mediaControlsHtml
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return generateImageWrapper(
|
||||||
|
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
|
||||||
|
localUrl, remoteUrl, mediaControlsHtml
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find the matching local file for an image
|
||||||
|
* @param {Object} img - Image metadata
|
||||||
|
* @param {number} index - Image index
|
||||||
|
* @param {Array} exampleFiles - Array of local files
|
||||||
|
* @returns {Object|null} Matching local file or null
|
||||||
|
*/
|
||||||
|
function findLocalFile(img, index, exampleFiles) {
|
||||||
|
if (!exampleFiles || exampleFiles.length === 0) return null;
|
||||||
|
|
||||||
|
let localFile = null;
|
||||||
|
|
||||||
|
if (img.id) {
|
||||||
|
// This is a custom image, find by custom_<id>
|
||||||
|
const customPrefix = `custom_${img.id}`;
|
||||||
|
localFile = exampleFiles.find(file => file.name.startsWith(customPrefix));
|
||||||
|
} else {
|
||||||
|
// This is a regular image from civitai, find by index
|
||||||
|
localFile = exampleFiles.find(file => {
|
||||||
|
const match = file.name.match(/image_(\d+)\./);
|
||||||
|
return match && parseInt(match[1]) === index;
|
||||||
|
});
|
||||||
|
|
||||||
|
// If not found by index, just use the same position in the array if available
|
||||||
|
if (!localFile && index < exampleFiles.length) {
|
||||||
|
localFile = exampleFiles[index];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return localFile;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render the import interface for example images
|
||||||
|
* @param {boolean} isEmpty - Whether there are no existing examples
|
||||||
|
* @returns {string} HTML content for import interface
|
||||||
|
*/
|
||||||
|
function renderImportInterface(isEmpty) {
|
||||||
|
return `
|
||||||
|
<div class="example-import-area ${isEmpty ? 'empty' : ''}">
|
||||||
|
<div class="import-container" id="exampleImportContainer">
|
||||||
|
<div class="import-placeholder">
|
||||||
|
<i class="fas fa-cloud-upload-alt"></i>
|
||||||
|
<h3>${isEmpty ? 'No example images available' : 'Add more examples'}</h3>
|
||||||
|
<p>Drag & drop images or videos here</p>
|
||||||
|
<p class="sub-text">or</p>
|
||||||
|
<button class="select-files-btn" id="selectExampleFilesBtn">
|
||||||
|
<i class="fas fa-folder-open"></i> Select Files
|
||||||
|
</button>
|
||||||
|
<p class="import-formats">Supported formats: jpg, png, gif, webp, mp4, webm</p>
|
||||||
|
</div>
|
||||||
|
<input type="file" id="exampleFilesInput" multiple accept="image/*,video/mp4,video/webm" style="display: none;">
|
||||||
|
<div class="import-progress-container" style="display: none;">
|
||||||
|
<div class="import-progress">
|
||||||
|
<div class="progress-bar"></div>
|
||||||
|
</div>
|
||||||
|
<span class="progress-text">Importing files...</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize the example import functionality
|
||||||
|
* @param {string} modelHash - The SHA256 hash of the model
|
||||||
|
* @param {Element} container - The container element for the import area
|
||||||
|
*/
|
||||||
|
export function initExampleImport(modelHash, container) {
|
||||||
|
if (!container) return;
|
||||||
|
|
||||||
|
const importContainer = container.querySelector('#exampleImportContainer');
|
||||||
|
const fileInput = container.querySelector('#exampleFilesInput');
|
||||||
|
const selectFilesBtn = container.querySelector('#selectExampleFilesBtn');
|
||||||
|
|
||||||
|
// Set up file selection button
|
||||||
|
if (selectFilesBtn) {
|
||||||
|
selectFilesBtn.addEventListener('click', () => {
|
||||||
|
fileInput.click();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle file selection
|
||||||
|
if (fileInput) {
|
||||||
|
fileInput.addEventListener('change', (e) => {
|
||||||
|
if (e.target.files.length > 0) {
|
||||||
|
handleImportFiles(Array.from(e.target.files), modelHash, importContainer);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up drag and drop
|
||||||
|
if (importContainer) {
|
||||||
|
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
|
||||||
|
importContainer.addEventListener(eventName, preventDefaults, false);
|
||||||
|
});
|
||||||
|
|
||||||
|
function preventDefaults(e) {
|
||||||
|
e.preventDefault();
|
||||||
|
e.stopPropagation();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Highlight drop area on drag over
|
||||||
|
['dragenter', 'dragover'].forEach(eventName => {
|
||||||
|
importContainer.addEventListener(eventName, () => {
|
||||||
|
importContainer.classList.add('highlight');
|
||||||
|
}, false);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Remove highlight on drag leave
|
||||||
|
['dragleave', 'drop'].forEach(eventName => {
|
||||||
|
importContainer.addEventListener(eventName, () => {
|
||||||
|
importContainer.classList.remove('highlight');
|
||||||
|
}, false);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle dropped files
|
||||||
|
importContainer.addEventListener('drop', (e) => {
|
||||||
|
const files = Array.from(e.dataTransfer.files);
|
||||||
|
handleImportFiles(files, modelHash, importContainer);
|
||||||
|
}, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle the file import process
|
||||||
|
* @param {File[]} files - Array of files to import
|
||||||
|
* @param {string} modelHash - The SHA256 hash of the model
|
||||||
|
* @param {Element} importContainer - The container element for import UI
|
||||||
|
*/
|
||||||
|
async function handleImportFiles(files, modelHash, importContainer) {
|
||||||
|
// Filter for supported file types
|
||||||
|
const supportedImages = ['.jpg', '.jpeg', '.png', '.gif', '.webp'];
|
||||||
|
const supportedVideos = ['.mp4', '.webm'];
|
||||||
|
const supportedExtensions = [...supportedImages, ...supportedVideos];
|
||||||
|
|
||||||
|
const validFiles = files.filter(file => {
|
||||||
|
const ext = '.' + file.name.split('.').pop().toLowerCase();
|
||||||
|
return supportedExtensions.includes(ext);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (validFiles.length === 0) {
|
||||||
|
alert('No supported files selected. Please select image or video files.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Use FormData to upload files
|
||||||
|
const formData = new FormData();
|
||||||
|
formData.append('model_hash', modelHash);
|
||||||
|
|
||||||
|
validFiles.forEach(file => {
|
||||||
|
formData.append('files', file);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Call API to import files
|
||||||
|
const response = await fetch('/api/import-example-images', {
|
||||||
|
method: 'POST',
|
||||||
|
body: formData
|
||||||
|
});
|
||||||
|
|
||||||
|
const result = await response.json();
|
||||||
|
|
||||||
|
if (!result.success) {
|
||||||
|
throw new Error(result.error || 'Failed to import example files');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get updated local files
|
||||||
|
const updatedFilesResponse = await fetch(`/api/example-image-files?model_hash=${modelHash}`);
|
||||||
|
const updatedFilesResult = await updatedFilesResponse.json();
|
||||||
|
|
||||||
|
if (!updatedFilesResult.success) {
|
||||||
|
throw new Error(updatedFilesResult.error || 'Failed to get updated file list');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-render the showcase content
|
||||||
|
const showcaseTab = document.getElementById('showcase-tab');
|
||||||
|
if (showcaseTab) {
|
||||||
|
// Get the updated images from the result
|
||||||
|
const regularImages = result.regular_images || [];
|
||||||
|
const customImages = result.custom_images || [];
|
||||||
|
// Combine both arrays for rendering
|
||||||
|
const allImages = [...regularImages, ...customImages];
|
||||||
|
showcaseTab.innerHTML = renderShowcaseContent(allImages, updatedFilesResult.files);
|
||||||
|
|
||||||
|
// Re-initialize showcase functionality
|
||||||
|
const carousel = showcaseTab.querySelector('.carousel');
|
||||||
|
if (carousel && !carousel.classList.contains('collapsed')) {
|
||||||
|
initShowcaseContent(carousel);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the import UI for the new content
|
||||||
|
initExampleImport(modelHash, showcaseTab);
|
||||||
|
|
||||||
|
showToast('Example images imported successfully', 'success');
|
||||||
|
|
||||||
|
// Update VirtualScroller if available
|
||||||
|
if (state.virtualScroller && result.model_file_path) {
|
||||||
|
// Create an update object with only the necessary properties
|
||||||
|
const updateData = {
|
||||||
|
civitai: {
|
||||||
|
images: regularImages,
|
||||||
|
customImages: customImages
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Update the item in the virtual scroller
|
||||||
|
state.virtualScroller.updateSingleItem(result.model_file_path, updateData);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error importing examples:', error);
|
||||||
|
showToast('Failed to import example images', 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Toggle showcase expansion
|
||||||
|
* @param {HTMLElement} element - The scroll indicator element
|
||||||
|
*/
|
||||||
|
export function toggleShowcase(element) {
|
||||||
|
const carousel = element.nextElementSibling;
|
||||||
|
const isCollapsed = carousel.classList.contains('collapsed');
|
||||||
|
const indicator = element.querySelector('span');
|
||||||
|
const icon = element.querySelector('i');
|
||||||
|
|
||||||
|
carousel.classList.toggle('collapsed');
|
||||||
|
|
||||||
|
if (isCollapsed) {
|
||||||
|
const count = carousel.querySelectorAll('.media-wrapper').length;
|
||||||
|
indicator.textContent = `Scroll or click to hide examples`;
|
||||||
|
icon.classList.replace('fa-chevron-down', 'fa-chevron-up');
|
||||||
|
initShowcaseContent(carousel);
|
||||||
|
} else {
|
||||||
|
const count = carousel.querySelectorAll('.media-wrapper').length;
|
||||||
|
indicator.textContent = `Scroll or click to show ${count} examples`;
|
||||||
|
icon.classList.replace('fa-chevron-up', 'fa-chevron-down');
|
||||||
|
|
||||||
|
// Make sure any open metadata panels get closed
|
||||||
|
const carouselContainer = carousel.querySelector('.carousel-container');
|
||||||
|
if (carouselContainer) {
|
||||||
|
carouselContainer.style.height = '0';
|
||||||
|
setTimeout(() => {
|
||||||
|
carouselContainer.style.height = '';
|
||||||
|
}, 300);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize all showcase content interactions
|
||||||
|
* @param {HTMLElement} carousel - The carousel element
|
||||||
|
*/
|
||||||
|
export function initShowcaseContent(carousel) {
|
||||||
|
if (!carousel) return;
|
||||||
|
|
||||||
|
initLazyLoading(carousel);
|
||||||
|
initNsfwBlurHandlers(carousel);
|
||||||
|
initMetadataPanelHandlers(carousel);
|
||||||
|
initMediaControlHandlers(carousel);
|
||||||
|
positionAllMediaControls(carousel);
|
||||||
|
|
||||||
|
// Add window resize handler
|
||||||
|
const resizeHandler = () => positionAllMediaControls(carousel);
|
||||||
|
window.removeEventListener('resize', resizeHandler);
|
||||||
|
window.addEventListener('resize', resizeHandler);
|
||||||
|
|
||||||
|
// Handle images loading which might change dimensions
|
||||||
|
const mediaElements = carousel.querySelectorAll('img, video');
|
||||||
|
mediaElements.forEach(media => {
|
||||||
|
media.addEventListener('load', () => positionAllMediaControls(carousel));
|
||||||
|
if (media.tagName === 'VIDEO') {
|
||||||
|
media.addEventListener('loadedmetadata', () => positionAllMediaControls(carousel));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scroll to top of modal content
|
||||||
|
* @param {HTMLElement} button - Back to top button
|
||||||
|
*/
|
||||||
|
export function scrollToTop(button) {
|
||||||
|
const modalContent = button.closest('.modal-content');
|
||||||
|
if (modalContent) {
|
||||||
|
modalContent.scrollTo({
|
||||||
|
top: 0,
|
||||||
|
behavior: 'smooth'
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set up showcase scroll functionality
|
||||||
|
* @param {string} modalId - ID of the modal element
|
||||||
|
*/
|
||||||
|
export function setupShowcaseScroll(modalId) {
|
||||||
|
// Listen for wheel events
|
||||||
|
document.addEventListener('wheel', (event) => {
|
||||||
|
const modalContent = document.querySelector(`#${modalId} .modal-content`);
|
||||||
|
if (!modalContent) return;
|
||||||
|
|
||||||
|
const showcase = modalContent.querySelector('.showcase-section');
|
||||||
|
if (!showcase) return;
|
||||||
|
|
||||||
|
const carousel = showcase.querySelector('.carousel');
|
||||||
|
const scrollIndicator = showcase.querySelector('.scroll-indicator');
|
||||||
|
|
||||||
|
if (carousel?.classList.contains('collapsed') && event.deltaY > 0) {
|
||||||
|
const isNearBottom = modalContent.scrollHeight - modalContent.scrollTop - modalContent.clientHeight < 100;
|
||||||
|
|
||||||
|
if (isNearBottom) {
|
||||||
|
toggleShowcase(scrollIndicator);
|
||||||
|
event.preventDefault();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, { passive: false });
|
||||||
|
|
||||||
|
// Use MutationObserver to set up back-to-top button when modal content is added
|
||||||
|
const observer = new MutationObserver((mutations) => {
|
||||||
|
for (const mutation of mutations) {
|
||||||
|
if (mutation.type === 'childList' && mutation.addedNodes.length) {
|
||||||
|
const modal = document.getElementById(modalId);
|
||||||
|
if (modal && modal.querySelector('.modal-content')) {
|
||||||
|
setupBackToTopButton(modal.querySelector('.modal-content'));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
observer.observe(document.body, { childList: true, subtree: true });
|
||||||
|
|
||||||
|
// Try to set up the button immediately in case the modal is already open
|
||||||
|
const modalContent = document.querySelector(`#${modalId} .modal-content`);
|
||||||
|
if (modalContent) {
|
||||||
|
setupBackToTopButton(modalContent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set up back-to-top button
|
||||||
|
* @param {HTMLElement} modalContent - Modal content element
|
||||||
|
*/
|
||||||
|
function setupBackToTopButton(modalContent) {
|
||||||
|
// Remove any existing scroll listeners to avoid duplicates
|
||||||
|
modalContent.onscroll = null;
|
||||||
|
|
||||||
|
// Add new scroll listener
|
||||||
|
modalContent.addEventListener('scroll', () => {
|
||||||
|
const backToTopBtn = modalContent.querySelector('.back-to-top');
|
||||||
|
if (backToTopBtn) {
|
||||||
|
if (modalContent.scrollTop > 300) {
|
||||||
|
backToTopBtn.classList.add('visible');
|
||||||
|
} else {
|
||||||
|
backToTopBtn.classList.remove('visible');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Trigger a scroll event to check initial position
|
||||||
|
modalContent.dispatchEvent(new Event('scroll'));
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import { appCore } from './core.js';
|
import { appCore } from './core.js';
|
||||||
import { state } from './state/index.js';
|
import { state } from './state/index.js';
|
||||||
import { showLoraModal, toggleShowcase, scrollToTop } from './components/loraModal/index.js';
|
import { showLoraModal } from './components/loraModal/index.js';
|
||||||
import { loadMoreLoras } from './api/loraApi.js';
|
import { loadMoreLoras } from './api/loraApi.js';
|
||||||
import { updateCardsForBulkMode } from './components/LoraCard.js';
|
import { updateCardsForBulkMode } from './components/LoraCard.js';
|
||||||
import { bulkManager } from './managers/BulkManager.js';
|
import { bulkManager } from './managers/BulkManager.js';
|
||||||
@@ -43,8 +43,6 @@ class LoraPageManager {
|
|||||||
window.closeExcludeModal = closeExcludeModal;
|
window.closeExcludeModal = closeExcludeModal;
|
||||||
window.downloadManager = this.downloadManager;
|
window.downloadManager = this.downloadManager;
|
||||||
window.moveManager = moveManager;
|
window.moveManager = moveManager;
|
||||||
window.toggleShowcase = toggleShowcase;
|
|
||||||
window.scrollToTop = scrollToTop;
|
|
||||||
|
|
||||||
// Bulk operations
|
// Bulk operations
|
||||||
window.toggleBulkMode = () => bulkManager.toggleBulkMode();
|
window.toggleBulkMode = () => bulkManager.toggleBulkMode();
|
||||||
|
|||||||
@@ -36,6 +36,11 @@ export class BulkManager {
|
|||||||
document.addEventListener('keydown', (e) => {
|
document.addEventListener('keydown', (e) => {
|
||||||
// Check if it's Ctrl+A (or Cmd+A on Mac)
|
// Check if it's Ctrl+A (or Cmd+A on Mac)
|
||||||
if ((e.ctrlKey || e.metaKey) && e.key === 'a') {
|
if ((e.ctrlKey || e.metaKey) && e.key === 'a') {
|
||||||
|
// First check if any modal is currently open - if so, don't handle Ctrl+A
|
||||||
|
if (modalManager.isAnyModalOpen()) {
|
||||||
|
return; // Exit early - let the browser handle Ctrl+A within the modal
|
||||||
|
}
|
||||||
|
|
||||||
// Prevent default browser "Select All" behavior
|
// Prevent default browser "Select All" behavior
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
|
|
||||||
|
|||||||
@@ -48,12 +48,6 @@ class ExampleImagesManager {
|
|||||||
if (collapseBtn) {
|
if (collapseBtn) {
|
||||||
collapseBtn.onclick = () => this.toggleProgressPanel();
|
collapseBtn.onclick = () => this.toggleProgressPanel();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize migration button handler
|
|
||||||
const migrateBtn = document.getElementById('exampleImagesMigrateBtn');
|
|
||||||
if (migrateBtn) {
|
|
||||||
migrateBtn.onclick = () => this.handleMigrateButton();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize event listeners for buttons
|
// Initialize event listeners for buttons
|
||||||
@@ -149,95 +143,6 @@ class ExampleImagesManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Method to handle migrate button click
|
|
||||||
async handleMigrateButton() {
|
|
||||||
if (this.isDownloading || this.isMigrating) {
|
|
||||||
if (this.isPaused) {
|
|
||||||
// If paused, resume
|
|
||||||
this.resumeDownload();
|
|
||||||
} else {
|
|
||||||
showToast('Migration or download already in progress', 'info');
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start migration
|
|
||||||
this.startMigrate();
|
|
||||||
}
|
|
||||||
|
|
||||||
async startMigrate() {
|
|
||||||
try {
|
|
||||||
const outputDir = document.getElementById('exampleImagesPath').value || '';
|
|
||||||
|
|
||||||
if (!outputDir) {
|
|
||||||
showToast('Please enter a download location first', 'warning');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update path in backend settings before starting migration
|
|
||||||
try {
|
|
||||||
const pathUpdateResponse = await fetch('/api/settings', {
|
|
||||||
method: 'POST',
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
example_images_path: outputDir
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!pathUpdateResponse.ok) {
|
|
||||||
throw new Error(`HTTP error! Status: ${pathUpdateResponse.status}`);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Failed to update example images path:', error);
|
|
||||||
}
|
|
||||||
|
|
||||||
const pattern = document.getElementById('exampleImagesMigratePattern').value || '{model}.example.{index}.{ext}';
|
|
||||||
const optimize = document.getElementById('optimizeExampleImages').checked;
|
|
||||||
|
|
||||||
const response = await fetch('/api/migrate-example-images', {
|
|
||||||
method: 'POST',
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
output_dir: outputDir,
|
|
||||||
pattern: pattern,
|
|
||||||
optimize: optimize,
|
|
||||||
model_types: ['lora', 'checkpoint']
|
|
||||||
})
|
|
||||||
});
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
|
|
||||||
if (data.success) {
|
|
||||||
this.isDownloading = true;
|
|
||||||
this.isMigrating = true;
|
|
||||||
this.isPaused = false;
|
|
||||||
this.hasShownCompletionToast = false; // Reset toast flag when starting new migration
|
|
||||||
this.startTime = new Date();
|
|
||||||
this.updateUI(data.status);
|
|
||||||
this.showProgressPanel();
|
|
||||||
this.startProgressUpdates();
|
|
||||||
// Update button text
|
|
||||||
const btnTextElement = document.getElementById('exampleDownloadBtnText');
|
|
||||||
if (btnTextElement) {
|
|
||||||
btnTextElement.textContent = "Resume";
|
|
||||||
}
|
|
||||||
showToast('Example images migration started', 'success');
|
|
||||||
|
|
||||||
// Close settings modal
|
|
||||||
modalManager.closeModal('settingsModal');
|
|
||||||
} else {
|
|
||||||
showToast(data.error || 'Failed to start migration', 'error');
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Failed to start migration:', error);
|
|
||||||
showToast('Failed to start migration', 'error');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async checkDownloadStatus() {
|
async checkDownloadStatus() {
|
||||||
try {
|
try {
|
||||||
const response = await fetch('/api/example-images-status');
|
const response = await fetch('/api/example-images-status');
|
||||||
|
|||||||
@@ -234,6 +234,19 @@ export class ModalManager {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add exampleAccessModal registration
|
||||||
|
const exampleAccessModal = document.getElementById('exampleAccessModal');
|
||||||
|
if (exampleAccessModal) {
|
||||||
|
this.registerModal('exampleAccessModal', {
|
||||||
|
element: exampleAccessModal,
|
||||||
|
onClose: () => {
|
||||||
|
this.getModal('exampleAccessModal').element.style.display = 'none';
|
||||||
|
document.body.classList.remove('modal-open');
|
||||||
|
},
|
||||||
|
closeOnOutsideClick: true
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
document.addEventListener('keydown', this.boundHandleEscape);
|
document.addEventListener('keydown', this.boundHandleEscape);
|
||||||
this.initialized = true;
|
this.initialized = true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,11 +36,6 @@ export class SettingsManager {
|
|||||||
if (state.global.settings.optimizeExampleImages === undefined) {
|
if (state.global.settings.optimizeExampleImages === undefined) {
|
||||||
state.global.settings.optimizeExampleImages = true;
|
state.global.settings.optimizeExampleImages = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set default for useCentralizedExamples if undefined
|
|
||||||
if (state.global.settings.useCentralizedExamples === undefined) {
|
|
||||||
state.global.settings.useCentralizedExamples = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert old boolean compactMode to new displayDensity string
|
// Convert old boolean compactMode to new displayDensity string
|
||||||
if (typeof state.global.settings.displayDensity === 'undefined') {
|
if (typeof state.global.settings.displayDensity === 'undefined') {
|
||||||
@@ -114,14 +109,6 @@ export class SettingsManager {
|
|||||||
optimizeExampleImagesCheckbox.checked = state.global.settings.optimizeExampleImages || false;
|
optimizeExampleImagesCheckbox.checked = state.global.settings.optimizeExampleImages || false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set centralized examples setting
|
|
||||||
const useCentralizedExamplesCheckbox = document.getElementById('useCentralizedExamples');
|
|
||||||
if (useCentralizedExamplesCheckbox) {
|
|
||||||
useCentralizedExamplesCheckbox.checked = state.global.settings.useCentralizedExamples !== false;
|
|
||||||
// Update dependent controls
|
|
||||||
this.updateExamplesControlsState();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load default lora root
|
// Load default lora root
|
||||||
await this.loadLoraRoots();
|
await this.loadLoraRoots();
|
||||||
|
|
||||||
@@ -196,10 +183,6 @@ export class SettingsManager {
|
|||||||
state.global.settings.optimizeExampleImages = value;
|
state.global.settings.optimizeExampleImages = value;
|
||||||
} else if (settingKey === 'compact_mode') {
|
} else if (settingKey === 'compact_mode') {
|
||||||
state.global.settings.compactMode = value;
|
state.global.settings.compactMode = value;
|
||||||
} else if (settingKey === 'use_centralized_examples') {
|
|
||||||
state.global.settings.useCentralizedExamples = value;
|
|
||||||
// Update dependent controls state
|
|
||||||
this.updateExamplesControlsState();
|
|
||||||
} else {
|
} else {
|
||||||
// For any other settings that might be added in the future
|
// For any other settings that might be added in the future
|
||||||
state.global.settings[settingKey] = value;
|
state.global.settings[settingKey] = value;
|
||||||
@@ -523,42 +506,6 @@ export class SettingsManager {
|
|||||||
// Add the appropriate density class
|
// Add the appropriate density class
|
||||||
grid.classList.add(`${density}-density`);
|
grid.classList.add(`${density}-density`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply centralized examples toggle state
|
|
||||||
this.updateExamplesControlsState();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add new method to update example control states
|
|
||||||
updateExamplesControlsState() {
|
|
||||||
const useCentralized = state.global.settings.useCentralizedExamples !== false;
|
|
||||||
|
|
||||||
// Find all controls that require centralized mode
|
|
||||||
const exampleSections = document.querySelectorAll('[data-requires-centralized="true"]');
|
|
||||||
exampleSections.forEach(section => {
|
|
||||||
// Enable/disable all inputs and buttons in the section
|
|
||||||
const controls = section.querySelectorAll('input, button, select');
|
|
||||||
controls.forEach(control => {
|
|
||||||
control.disabled = !useCentralized;
|
|
||||||
|
|
||||||
// Add/remove disabled class for styling
|
|
||||||
if (control.classList.contains('primary-btn') || control.classList.contains('secondary-btn')) {
|
|
||||||
if (!useCentralized) {
|
|
||||||
control.classList.add('disabled');
|
|
||||||
} else {
|
|
||||||
control.classList.remove('disabled');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Visually show the section as disabled
|
|
||||||
if (!useCentralized) {
|
|
||||||
section.style.opacity = '0.6';
|
|
||||||
section.style.pointerEvents = 'none';
|
|
||||||
} else {
|
|
||||||
section.style.opacity = '';
|
|
||||||
section.style.pointerEvents = '';
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -796,6 +796,30 @@ export class VirtualScroller {
|
|||||||
console.log('Virtual scroller enabled');
|
console.log('Virtual scroller enabled');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function for deep merging objects
|
||||||
|
deepMerge(target, source) {
|
||||||
|
if (!source) return target;
|
||||||
|
|
||||||
|
const result = { ...target };
|
||||||
|
|
||||||
|
Object.keys(source).forEach(key => {
|
||||||
|
if (source[key] !== null && typeof source[key] === 'object' && !Array.isArray(source[key])) {
|
||||||
|
// If property exists in target and is an object, recursively merge
|
||||||
|
if (target[key] && typeof target[key] === 'object' && !Array.isArray(target[key])) {
|
||||||
|
result[key] = this.deepMerge(target[key], source[key]);
|
||||||
|
} else {
|
||||||
|
// Otherwise just assign the source value
|
||||||
|
result[key] = source[key];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For non-objects (including arrays), just assign the value
|
||||||
|
result[key] = source[key];
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
updateSingleItem(filePath, updatedItem) {
|
updateSingleItem(filePath, updatedItem) {
|
||||||
if (!filePath || !updatedItem) {
|
if (!filePath || !updatedItem) {
|
||||||
console.error('Invalid parameters for updateSingleItem');
|
console.error('Invalid parameters for updateSingleItem');
|
||||||
@@ -809,8 +833,8 @@ export class VirtualScroller {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the item data
|
// Update the item data using deep merge
|
||||||
this.items[index] = {...this.items[index], ...updatedItem};
|
this.items[index] = this.deepMerge(this.items[index], updatedItem);
|
||||||
|
|
||||||
// If the item is currently rendered, update its DOM representation
|
// If the item is currently rendered, update its DOM representation
|
||||||
if (this.renderedItems.has(index)) {
|
if (this.renderedItems.has(index)) {
|
||||||
@@ -822,6 +846,28 @@ export class VirtualScroller {
|
|||||||
|
|
||||||
// Create and render the updated element
|
// Create and render the updated element
|
||||||
const updatedElement = this.createItemElement(this.items[index], index);
|
const updatedElement = this.createItemElement(this.items[index], index);
|
||||||
|
|
||||||
|
// Add update indicator visual effects
|
||||||
|
updatedElement.classList.add('updated');
|
||||||
|
|
||||||
|
// Add temporary update tag
|
||||||
|
const updateIndicator = document.createElement('div');
|
||||||
|
updateIndicator.className = 'update-indicator';
|
||||||
|
updateIndicator.textContent = 'Updated';
|
||||||
|
updatedElement.querySelector('.card-preview').appendChild(updateIndicator);
|
||||||
|
|
||||||
|
// Automatically remove the updated class after animation completes
|
||||||
|
setTimeout(() => {
|
||||||
|
updatedElement.classList.remove('updated');
|
||||||
|
}, 1500);
|
||||||
|
|
||||||
|
// Automatically remove the indicator after animation completes
|
||||||
|
setTimeout(() => {
|
||||||
|
if (updateIndicator && updateIndicator.parentNode) {
|
||||||
|
updateIndicator.remove();
|
||||||
|
}
|
||||||
|
}, 2000);
|
||||||
|
|
||||||
this.renderedItems.set(index, updatedElement);
|
this.renderedItems.set(index, updatedElement);
|
||||||
this.gridElement.appendChild(updatedElement);
|
this.gridElement.appendChild(updatedElement);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -441,521 +441,4 @@ export async function openExampleImagesFolder(modelHash) {
|
|||||||
showToast('Failed to open example images folder', 'error');
|
showToast('Failed to open example images folder', 'error');
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets local URLs for example images with primary and fallback options
|
|
||||||
* @param {Object} img - Image object
|
|
||||||
* @param {number} index - Image index
|
|
||||||
* @param {string} modelHash - Model hash
|
|
||||||
* @returns {Object} - Object with primary and fallback URLs
|
|
||||||
*/
|
|
||||||
export function getLocalExampleImageUrl(img, index, modelHash) {
|
|
||||||
if (!modelHash) return { primary: null, fallback: null };
|
|
||||||
|
|
||||||
// Get remote extension
|
|
||||||
const remoteExt = (img.url || '').split('?')[0].split('.').pop().toLowerCase();
|
|
||||||
|
|
||||||
// If it's a video (mp4), use that extension with no fallback
|
|
||||||
if (remoteExt === 'mp4') {
|
|
||||||
const videoUrl = `/example_images_static/${modelHash}/image_${index + 1}.mp4`;
|
|
||||||
return { primary: videoUrl, fallback: null };
|
|
||||||
}
|
|
||||||
|
|
||||||
// For images, prepare both possible formats
|
|
||||||
const basePath = `/example_images_static/${modelHash}/image_${index + 1}`;
|
|
||||||
const webpUrl = `${basePath}.webp`;
|
|
||||||
const originalExtUrl = remoteExt ? `${basePath}.${remoteExt}` : `${basePath}.jpg`;
|
|
||||||
|
|
||||||
// Check if optimization is enabled (defaults to true)
|
|
||||||
const optimizeImages = state.settings.optimizeExampleImages !== false;
|
|
||||||
|
|
||||||
// Return primary and fallback URLs based on current settings
|
|
||||||
return {
|
|
||||||
primary: optimizeImages ? webpUrl : originalExtUrl,
|
|
||||||
fallback: optimizeImages ? originalExtUrl : webpUrl
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Try to load local image first, fall back to remote if local fails
|
|
||||||
* @param {HTMLImageElement} imgElement - The image element to update
|
|
||||||
* @param {Object} urls - Object with local URLs {primary, fallback} and remote URL
|
|
||||||
*/
|
|
||||||
export function tryLocalImageOrFallbackToRemote(imgElement, urls) {
|
|
||||||
const { primary: localUrl, fallback: fallbackUrl } = urls.local || {};
|
|
||||||
const remoteUrl = urls.remote;
|
|
||||||
|
|
||||||
// If no local options, use remote directly
|
|
||||||
if (!localUrl) {
|
|
||||||
imgElement.src = remoteUrl;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try primary local URL
|
|
||||||
const testImg = new Image();
|
|
||||||
testImg.onload = () => {
|
|
||||||
// Primary local image loaded successfully
|
|
||||||
imgElement.src = localUrl;
|
|
||||||
};
|
|
||||||
testImg.onerror = () => {
|
|
||||||
// Try fallback URL if available
|
|
||||||
if (fallbackUrl) {
|
|
||||||
const fallbackImg = new Image();
|
|
||||||
fallbackImg.onload = () => {
|
|
||||||
imgElement.src = fallbackUrl;
|
|
||||||
};
|
|
||||||
fallbackImg.onerror = () => {
|
|
||||||
// Both local options failed, use remote
|
|
||||||
imgElement.src = remoteUrl;
|
|
||||||
};
|
|
||||||
fallbackImg.src = fallbackUrl;
|
|
||||||
} else {
|
|
||||||
// No fallback, use remote
|
|
||||||
imgElement.src = remoteUrl;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
testImg.src = localUrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Try to load local video first, fall back to remote if local fails
|
|
||||||
* @param {HTMLVideoElement} videoElement - The video element to update
|
|
||||||
* @param {Object} urls - Object with local URLs {primary} and remote URL
|
|
||||||
*/
|
|
||||||
export function tryLocalVideoOrFallbackToRemote(videoElement, urls) {
|
|
||||||
const { primary: localUrl } = urls.local || {};
|
|
||||||
const remoteUrl = urls.remote;
|
|
||||||
|
|
||||||
// Only try local if we have a local path
|
|
||||||
if (localUrl) {
|
|
||||||
// Try to fetch local file headers to see if it exists
|
|
||||||
fetch(localUrl, { method: 'HEAD' })
|
|
||||||
.then(response => {
|
|
||||||
if (response.ok) {
|
|
||||||
// Local video exists, use it
|
|
||||||
videoElement.src = localUrl;
|
|
||||||
const source = videoElement.querySelector('source');
|
|
||||||
if (source) source.src = localUrl;
|
|
||||||
} else {
|
|
||||||
// Local video doesn't exist, use remote
|
|
||||||
videoElement.src = remoteUrl;
|
|
||||||
const source = videoElement.querySelector('source');
|
|
||||||
if (source) source.src = remoteUrl;
|
|
||||||
}
|
|
||||||
videoElement.load();
|
|
||||||
})
|
|
||||||
.catch(() => {
|
|
||||||
// Error fetching, use remote
|
|
||||||
videoElement.src = remoteUrl;
|
|
||||||
const source = videoElement.querySelector('source');
|
|
||||||
if (source) source.src = remoteUrl;
|
|
||||||
videoElement.load();
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// No local path, use remote directly
|
|
||||||
videoElement.src = remoteUrl;
|
|
||||||
const source = videoElement.querySelector('source');
|
|
||||||
if (source) source.src = remoteUrl;
|
|
||||||
videoElement.load();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize lazy loading for images and videos in a container
|
|
||||||
* @param {HTMLElement} container - The container with lazy-loadable elements
|
|
||||||
*/
|
|
||||||
export function initLazyLoading(container) {
|
|
||||||
const lazyElements = container.querySelectorAll('.lazy');
|
|
||||||
|
|
||||||
const lazyLoad = (element) => {
|
|
||||||
// Get URLs from data attributes
|
|
||||||
const localUrls = {
|
|
||||||
primary: element.dataset.localSrc || null,
|
|
||||||
fallback: element.dataset.localFallbackSrc || null
|
|
||||||
};
|
|
||||||
const remoteUrl = element.dataset.remoteSrc;
|
|
||||||
|
|
||||||
const urls = {
|
|
||||||
local: localUrls,
|
|
||||||
remote: remoteUrl
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check if element is a video or image
|
|
||||||
if (element.tagName.toLowerCase() === 'video') {
|
|
||||||
tryLocalVideoOrFallbackToRemote(element, urls);
|
|
||||||
} else {
|
|
||||||
tryLocalImageOrFallbackToRemote(element, urls);
|
|
||||||
}
|
|
||||||
|
|
||||||
element.classList.remove('lazy');
|
|
||||||
};
|
|
||||||
|
|
||||||
const observer = new IntersectionObserver((entries) => {
|
|
||||||
entries.forEach(entry => {
|
|
||||||
if (entry.isIntersecting) {
|
|
||||||
lazyLoad(entry.target);
|
|
||||||
observer.unobserve(entry.target);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
lazyElements.forEach(element => observer.observe(element));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the actual rendered rectangle of a media element with object-fit: contain
|
|
||||||
* @param {HTMLElement} mediaElement - The img or video element
|
|
||||||
* @param {number} containerWidth - Width of the container
|
|
||||||
* @param {number} containerHeight - Height of the container
|
|
||||||
* @returns {Object} - Rect with left, top, right, bottom coordinates
|
|
||||||
*/
|
|
||||||
export function getRenderedMediaRect(mediaElement, containerWidth, containerHeight) {
|
|
||||||
// Get natural dimensions of the media
|
|
||||||
const naturalWidth = mediaElement.naturalWidth || mediaElement.videoWidth || mediaElement.clientWidth;
|
|
||||||
const naturalHeight = mediaElement.naturalHeight || mediaElement.videoHeight || mediaElement.clientHeight;
|
|
||||||
|
|
||||||
if (!naturalWidth || !naturalHeight) {
|
|
||||||
// Fallback if dimensions cannot be determined
|
|
||||||
return { left: 0, top: 0, right: containerWidth, bottom: containerHeight };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate aspect ratios
|
|
||||||
const containerRatio = containerWidth / containerHeight;
|
|
||||||
const mediaRatio = naturalWidth / naturalHeight;
|
|
||||||
|
|
||||||
let renderedWidth, renderedHeight, left = 0, top = 0;
|
|
||||||
|
|
||||||
// Apply object-fit: contain logic
|
|
||||||
if (containerRatio > mediaRatio) {
|
|
||||||
// Container is wider than media - will have empty space on sides
|
|
||||||
renderedHeight = containerHeight;
|
|
||||||
renderedWidth = renderedHeight * mediaRatio;
|
|
||||||
left = (containerWidth - renderedWidth) / 2;
|
|
||||||
} else {
|
|
||||||
// Container is taller than media - will have empty space top/bottom
|
|
||||||
renderedWidth = containerWidth;
|
|
||||||
renderedHeight = renderedWidth / mediaRatio;
|
|
||||||
top = (containerHeight - renderedHeight) / 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
left,
|
|
||||||
top,
|
|
||||||
right: left + renderedWidth,
|
|
||||||
bottom: top + renderedHeight
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize metadata panel interaction handlers
|
|
||||||
* @param {HTMLElement} container - Container element with media wrappers
|
|
||||||
*/
|
|
||||||
export function initMetadataPanelHandlers(container) {
|
|
||||||
const mediaWrappers = container.querySelectorAll('.media-wrapper');
|
|
||||||
|
|
||||||
mediaWrappers.forEach(wrapper => {
|
|
||||||
// Get the metadata panel and media element (img or video)
|
|
||||||
const metadataPanel = wrapper.querySelector('.image-metadata-panel');
|
|
||||||
const mediaElement = wrapper.querySelector('img, video');
|
|
||||||
|
|
||||||
if (!metadataPanel || !mediaElement) return;
|
|
||||||
|
|
||||||
let isOverMetadataPanel = false;
|
|
||||||
|
|
||||||
// Add event listeners to the wrapper for mouse tracking
|
|
||||||
wrapper.addEventListener('mousemove', (e) => {
|
|
||||||
// Get mouse position relative to wrapper
|
|
||||||
const rect = wrapper.getBoundingClientRect();
|
|
||||||
const mouseX = e.clientX - rect.left;
|
|
||||||
const mouseY = e.clientY - rect.top;
|
|
||||||
|
|
||||||
// Get the actual displayed dimensions of the media element
|
|
||||||
const mediaRect = getRenderedMediaRect(mediaElement, rect.width, rect.height);
|
|
||||||
|
|
||||||
// Check if mouse is over the actual media content
|
|
||||||
const isOverMedia = (
|
|
||||||
mouseX >= mediaRect.left &&
|
|
||||||
mouseX <= mediaRect.right &&
|
|
||||||
mouseY >= mediaRect.top &&
|
|
||||||
mouseY <= mediaRect.bottom
|
|
||||||
);
|
|
||||||
|
|
||||||
// Show metadata panel when over media content or metadata panel itself
|
|
||||||
if (isOverMedia || isOverMetadataPanel) {
|
|
||||||
metadataPanel.classList.add('visible');
|
|
||||||
} else {
|
|
||||||
metadataPanel.classList.remove('visible');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
wrapper.addEventListener('mouseleave', () => {
|
|
||||||
if (!isOverMetadataPanel) {
|
|
||||||
metadataPanel.classList.remove('visible');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Add mouse enter/leave events for the metadata panel itself
|
|
||||||
metadataPanel.addEventListener('mouseenter', () => {
|
|
||||||
isOverMetadataPanel = true;
|
|
||||||
metadataPanel.classList.add('visible');
|
|
||||||
});
|
|
||||||
|
|
||||||
metadataPanel.addEventListener('mouseleave', () => {
|
|
||||||
isOverMetadataPanel = false;
|
|
||||||
// Only hide if mouse is not over the media
|
|
||||||
const rect = wrapper.getBoundingClientRect();
|
|
||||||
const mediaRect = getRenderedMediaRect(mediaElement, rect.width, rect.height);
|
|
||||||
const mouseX = event.clientX - rect.left;
|
|
||||||
const mouseY = event.clientY - rect.top;
|
|
||||||
|
|
||||||
const isOverMedia = (
|
|
||||||
mouseX >= mediaRect.left &&
|
|
||||||
mouseX <= mediaRect.right &&
|
|
||||||
mouseY >= mediaRect.top &&
|
|
||||||
mouseY <= mediaRect.bottom
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!isOverMedia) {
|
|
||||||
metadataPanel.classList.remove('visible');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Prevent events from bubbling
|
|
||||||
metadataPanel.addEventListener('click', (e) => {
|
|
||||||
e.stopPropagation();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Handle copy prompt buttons
|
|
||||||
const copyBtns = metadataPanel.querySelectorAll('.copy-prompt-btn');
|
|
||||||
copyBtns.forEach(copyBtn => {
|
|
||||||
const promptIndex = copyBtn.dataset.promptIndex;
|
|
||||||
const promptElement = wrapper.querySelector(`#prompt-${promptIndex}`);
|
|
||||||
|
|
||||||
copyBtn.addEventListener('click', async (e) => {
|
|
||||||
e.stopPropagation();
|
|
||||||
|
|
||||||
if (!promptElement) return;
|
|
||||||
|
|
||||||
try {
|
|
||||||
await copyToClipboard(promptElement.textContent, 'Prompt copied to clipboard');
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Copy failed:', err);
|
|
||||||
showToast('Copy failed', 'error');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
// Prevent panel scroll from causing modal scroll
|
|
||||||
metadataPanel.addEventListener('wheel', (e) => {
|
|
||||||
const isAtTop = metadataPanel.scrollTop === 0;
|
|
||||||
const isAtBottom = metadataPanel.scrollHeight - metadataPanel.scrollTop === metadataPanel.clientHeight;
|
|
||||||
|
|
||||||
// Only prevent default if scrolling would cause the panel to scroll
|
|
||||||
if ((e.deltaY < 0 && !isAtTop) || (e.deltaY > 0 && !isAtBottom)) {
|
|
||||||
e.stopPropagation();
|
|
||||||
}
|
|
||||||
}, { passive: true });
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialize NSFW content blur toggle handlers
|
|
||||||
* @param {HTMLElement} container - Container element with media wrappers
|
|
||||||
*/
|
|
||||||
export function initNsfwBlurHandlers(container) {
|
|
||||||
// Handle toggle blur buttons
|
|
||||||
const toggleButtons = container.querySelectorAll('.toggle-blur-btn');
|
|
||||||
toggleButtons.forEach(btn => {
|
|
||||||
btn.addEventListener('click', (e) => {
|
|
||||||
e.stopPropagation();
|
|
||||||
const wrapper = btn.closest('.media-wrapper');
|
|
||||||
const media = wrapper.querySelector('img, video');
|
|
||||||
const isBlurred = media.classList.toggle('blurred');
|
|
||||||
const icon = btn.querySelector('i');
|
|
||||||
|
|
||||||
// Update the icon based on blur state
|
|
||||||
if (isBlurred) {
|
|
||||||
icon.className = 'fas fa-eye';
|
|
||||||
} else {
|
|
||||||
icon.className = 'fas fa-eye-slash';
|
|
||||||
}
|
|
||||||
|
|
||||||
// Toggle the overlay visibility
|
|
||||||
const overlay = wrapper.querySelector('.nsfw-overlay');
|
|
||||||
if (overlay) {
|
|
||||||
overlay.style.display = isBlurred ? 'flex' : 'none';
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
// Handle "Show" buttons in overlays
|
|
||||||
const showButtons = container.querySelectorAll('.show-content-btn');
|
|
||||||
showButtons.forEach(btn => {
|
|
||||||
btn.addEventListener('click', (e) => {
|
|
||||||
e.stopPropagation();
|
|
||||||
const wrapper = btn.closest('.media-wrapper');
|
|
||||||
const media = wrapper.querySelector('img, video');
|
|
||||||
media.classList.remove('blurred');
|
|
||||||
|
|
||||||
// Update the toggle button icon
|
|
||||||
const toggleBtn = wrapper.querySelector('.toggle-blur-btn');
|
|
||||||
if (toggleBtn) {
|
|
||||||
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hide the overlay
|
|
||||||
const overlay = wrapper.querySelector('.nsfw-overlay');
|
|
||||||
if (overlay) {
|
|
||||||
overlay.style.display = 'none';
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Toggle showcase expansion
|
|
||||||
* @param {HTMLElement} element - The scroll indicator element
|
|
||||||
*/
|
|
||||||
export function toggleShowcase(element) {
|
|
||||||
const carousel = element.nextElementSibling;
|
|
||||||
const isCollapsed = carousel.classList.contains('collapsed');
|
|
||||||
const indicator = element.querySelector('span');
|
|
||||||
const icon = element.querySelector('i');
|
|
||||||
|
|
||||||
carousel.classList.toggle('collapsed');
|
|
||||||
|
|
||||||
if (isCollapsed) {
|
|
||||||
const count = carousel.querySelectorAll('.media-wrapper').length;
|
|
||||||
indicator.textContent = `Scroll or click to hide examples`;
|
|
||||||
icon.classList.replace('fa-chevron-down', 'fa-chevron-up');
|
|
||||||
initLazyLoading(carousel);
|
|
||||||
|
|
||||||
// Initialize NSFW content blur toggle handlers
|
|
||||||
initNsfwBlurHandlers(carousel);
|
|
||||||
|
|
||||||
// Initialize metadata panel interaction handlers
|
|
||||||
initMetadataPanelHandlers(carousel);
|
|
||||||
} else {
|
|
||||||
const count = carousel.querySelectorAll('.media-wrapper').length;
|
|
||||||
indicator.textContent = `Scroll or click to show ${count} examples`;
|
|
||||||
icon.classList.replace('fa-chevron-up', 'fa-chevron-down');
|
|
||||||
|
|
||||||
// Make sure any open metadata panels get closed
|
|
||||||
const carouselContainer = carousel.querySelector('.carousel-container');
|
|
||||||
if (carouselContainer) {
|
|
||||||
carouselContainer.style.height = '0';
|
|
||||||
setTimeout(() => {
|
|
||||||
carouselContainer.style.height = '';
|
|
||||||
}, 300);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set up showcase scroll functionality
|
|
||||||
* @param {string} modalId - ID of the modal element
|
|
||||||
*/
|
|
||||||
export function setupShowcaseScroll(modalId) {
|
|
||||||
// Listen for wheel events
|
|
||||||
document.addEventListener('wheel', (event) => {
|
|
||||||
const modalContent = document.querySelector(`#${modalId} .modal-content`);
|
|
||||||
if (!modalContent) return;
|
|
||||||
|
|
||||||
const showcase = modalContent.querySelector('.showcase-section');
|
|
||||||
if (!showcase) return;
|
|
||||||
|
|
||||||
const carousel = showcase.querySelector('.carousel');
|
|
||||||
const scrollIndicator = showcase.querySelector('.scroll-indicator');
|
|
||||||
|
|
||||||
if (carousel?.classList.contains('collapsed') && event.deltaY > 0) {
|
|
||||||
const isNearBottom = modalContent.scrollHeight - modalContent.scrollTop - modalContent.clientHeight < 100;
|
|
||||||
|
|
||||||
if (isNearBottom) {
|
|
||||||
toggleShowcase(scrollIndicator);
|
|
||||||
event.preventDefault();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, { passive: false });
|
|
||||||
|
|
||||||
// Use MutationObserver to set up back-to-top button when modal content is added
|
|
||||||
const observer = new MutationObserver((mutations) => {
|
|
||||||
for (const mutation of mutations) {
|
|
||||||
if (mutation.type === 'childList' && mutation.addedNodes.length) {
|
|
||||||
const modal = document.getElementById(modalId);
|
|
||||||
if (modal && modal.querySelector('.modal-content')) {
|
|
||||||
setupBackToTopButton(modal.querySelector('.modal-content'));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Start observing the document body for changes
|
|
||||||
observer.observe(document.body, { childList: true, subtree: true });
|
|
||||||
|
|
||||||
// Also try to set up the button immediately in case the modal is already open
|
|
||||||
const modalContent = document.querySelector(`#${modalId} .modal-content`);
|
|
||||||
if (modalContent) {
|
|
||||||
setupBackToTopButton(modalContent);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set up back-to-top button
|
|
||||||
* @param {HTMLElement} modalContent - Modal content element
|
|
||||||
*/
|
|
||||||
export function setupBackToTopButton(modalContent) {
|
|
||||||
// Remove any existing scroll listeners to avoid duplicates
|
|
||||||
modalContent.onscroll = null;
|
|
||||||
|
|
||||||
// Add new scroll listener
|
|
||||||
modalContent.addEventListener('scroll', () => {
|
|
||||||
const backToTopBtn = modalContent.querySelector('.back-to-top');
|
|
||||||
if (backToTopBtn) {
|
|
||||||
if (modalContent.scrollTop > 300) {
|
|
||||||
backToTopBtn.classList.add('visible');
|
|
||||||
} else {
|
|
||||||
backToTopBtn.classList.remove('visible');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Trigger a scroll event to check initial position
|
|
||||||
modalContent.dispatchEvent(new Event('scroll'));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scroll to top of modal content
|
|
||||||
* @param {HTMLElement} button - Back to top button element
|
|
||||||
*/
|
|
||||||
export function scrollToTop(button) {
|
|
||||||
const modalContent = button.closest('.modal-content');
|
|
||||||
if (modalContent) {
|
|
||||||
modalContent.scrollTo({
|
|
||||||
top: 0,
|
|
||||||
behavior: 'smooth'
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get example image files for a specific model from the backend
|
|
||||||
* @param {string} modelHash - The model's hash
|
|
||||||
* @returns {Promise<Array>} Array of file objects with path and metadata
|
|
||||||
*/
|
|
||||||
export async function getExampleImageFiles(modelHash) {
|
|
||||||
try {
|
|
||||||
const response = await fetch(`/api/example-image-files?model_hash=${modelHash}`);
|
|
||||||
const result = await response.json();
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
return result.files;
|
|
||||||
} else {
|
|
||||||
console.error('Failed to get example image files:', result.error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error('Error fetching example image files:', error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
@@ -229,7 +229,7 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Add Cache Management Section -->
|
<!-- Add Cache Management Section -->
|
||||||
<div class="settings-section">
|
<!-- <div class="settings-section">
|
||||||
<h3>Cache Management</h3>
|
<h3>Cache Management</h3>
|
||||||
|
|
||||||
<div class="setting-item">
|
<div class="setting-item">
|
||||||
@@ -249,33 +249,13 @@
|
|||||||
<span class="warning-text">May cause temporary performance impact during rebuild.</span>
|
<span class="warning-text">May cause temporary performance impact during rebuild.</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div> -->
|
||||||
|
|
||||||
<!-- Add Example Images Settings Section -->
|
<!-- Add Example Images Settings Section -->
|
||||||
<div class="settings-section">
|
<div class="settings-section">
|
||||||
<h3>Example Images</h3>
|
<h3>Example Images</h3>
|
||||||
|
|
||||||
<div class="setting-item">
|
<div class="setting-item">
|
||||||
<div class="setting-row">
|
|
||||||
<div class="setting-info">
|
|
||||||
<label for="useCentralizedExamples">Use Centralized Example Storage</label>
|
|
||||||
</div>
|
|
||||||
<div class="setting-control">
|
|
||||||
<label class="toggle-switch">
|
|
||||||
<input type="checkbox" id="useCentralizedExamples" checked
|
|
||||||
onchange="settingsManager.saveToggleSetting('useCentralizedExamples', 'use_centralized_examples')">
|
|
||||||
<span class="toggle-slider"></span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="input-help">
|
|
||||||
When enabled (recommended), example images are stored in a central folder for better organization and performance.
|
|
||||||
When disabled, only example images stored alongside models (e.g., model-name.example.0.jpg) will be shown, but download
|
|
||||||
and management features will be unavailable.
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="setting-item" data-requires-centralized="true">
|
|
||||||
<div class="setting-row">
|
<div class="setting-row">
|
||||||
<div class="setting-info">
|
<div class="setting-info">
|
||||||
<label for="exampleImagesPath">Download Location <i class="fas fa-sync-alt restart-required-icon" title="Requires restart"></i></label>
|
<label for="exampleImagesPath">Download Location <i class="fas fa-sync-alt restart-required-icon" title="Requires restart"></i></label>
|
||||||
@@ -291,29 +271,8 @@
|
|||||||
Enter the folder path where example images from Civitai will be saved
|
Enter the folder path where example images from Civitai will be saved
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- New migrate section -->
|
|
||||||
<div class="setting-item" data-requires-centralized="true">
|
|
||||||
<div class="setting-row">
|
|
||||||
<div class="setting-info">
|
|
||||||
<label for="exampleImagesMigratePattern">Migrate Existing Example Images</label>
|
|
||||||
</div>
|
|
||||||
<div class="setting-control migrate-control">
|
|
||||||
<input type="text" id="exampleImagesMigratePattern"
|
|
||||||
placeholder="{model}.example.{index}.{ext}"
|
|
||||||
value="{model}.example.{index}.{ext}" />
|
|
||||||
<button id="exampleImagesMigrateBtn" class="secondary-btn">
|
|
||||||
<i class="fas fa-file-import"></i> <span>Migrate</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="input-help">
|
|
||||||
Pattern to find existing example images. Use {model} for model filename, {index} for numbering, and {ext} for file extension.<br>
|
|
||||||
Example patterns: "{model}.example.{index}.{ext}", "{model}_{index}.{ext}", "{model}/{model}.example.{index}.{ext}"
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="setting-item" data-requires-centralized="true">
|
<div class="setting-item">
|
||||||
<div class="setting-row">
|
<div class="setting-row">
|
||||||
<div class="setting-info">
|
<div class="setting-info">
|
||||||
<label for="optimizeExampleImages">Optimize Downloaded Images</label>
|
<label for="optimizeExampleImages">Optimize Downloaded Images</label>
|
||||||
@@ -327,7 +286,7 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="input-help">
|
<div class="input-help">
|
||||||
Optimize example images to reduce file size and improve loading speed
|
Optimize example images to reduce file size and improve loading speed (metadata will be preserved)
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -613,4 +572,32 @@
|
|||||||
<button class="confirm-btn" id="confirmRelinkBtn">Confirm Re-link</button>
|
<button class="confirm-btn" id="confirmRelinkBtn">Confirm Re-link</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Example Images Access Modal -->
|
||||||
|
<div id="exampleAccessModal" class="modal">
|
||||||
|
<div class="modal-content example-access-modal">
|
||||||
|
<button class="close" onclick="modalManager.closeModal('exampleAccessModal')">×</button>
|
||||||
|
<h2>Local Example Images</h2>
|
||||||
|
<p>No local example images found for this model. View options:</p>
|
||||||
|
|
||||||
|
<div class="example-access-options">
|
||||||
|
<button id="downloadExamplesBtn" class="example-option-btn">
|
||||||
|
<i class="fas fa-cloud-download-alt"></i>
|
||||||
|
<span class="option-title">Download from Civitai</span>
|
||||||
|
<span class="option-desc">Save remote examples locally for offline use and faster loading</span>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
<button id="importExamplesBtn" class="example-option-btn">
|
||||||
|
<i class="fas fa-file-import"></i>
|
||||||
|
<span class="option-title">Import Your Own</span>
|
||||||
|
<span class="option-desc">Add your own custom examples for this model</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="modal-footer-note">
|
||||||
|
<i class="fas fa-info-circle"></i>
|
||||||
|
<span>Remote examples are still viewable in the model details even without local copies</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
Reference in New Issue
Block a user