Refactor example images handling by introducing migration logic, updating metadata structure, and enhancing image loading in the UI

This commit is contained in:
Will Miao
2025-06-18 17:14:49 +08:00
parent 022c6c157a
commit 3c047bee58
10 changed files with 412 additions and 118 deletions

View File

@@ -10,6 +10,7 @@ from .routes.misc_routes import MiscRoutes
from .routes.example_images_routes import ExampleImagesRoutes from .routes.example_images_routes import ExampleImagesRoutes
from .services.service_registry import ServiceRegistry from .services.service_registry import ServiceRegistry
from .services.settings_manager import settings from .services.settings_manager import settings
from .utils.example_images_migration import ExampleImagesMigration
import logging import logging
import sys import sys
import os import os
@@ -130,13 +131,13 @@ class LoraManager:
logging.getLogger('aiohttp.access').setLevel(logging.WARNING) logging.getLogger('aiohttp.access').setLevel(logging.WARNING)
# Initialize CivitaiClient first to ensure it's ready for other services # Initialize CivitaiClient first to ensure it's ready for other services
civitai_client = await ServiceRegistry.get_civitai_client() await ServiceRegistry.get_civitai_client()
# Register DownloadManager with ServiceRegistry # Register DownloadManager with ServiceRegistry
download_manager = await ServiceRegistry.get_download_manager() await ServiceRegistry.get_download_manager()
# Initialize WebSocket manager # Initialize WebSocket manager
ws_manager = await ServiceRegistry.get_websocket_manager() await ServiceRegistry.get_websocket_manager()
# Initialize scanners in background # Initialize scanners in background
lora_scanner = await ServiceRegistry.get_lora_scanner() lora_scanner = await ServiceRegistry.get_lora_scanner()
@@ -156,6 +157,8 @@ class LoraManager:
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init') asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init') asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
await ExampleImagesMigration.check_and_run_migrations()
logger.info("LoRA Manager: All services initialized and background tasks scheduled") logger.info("LoRA Manager: All services initialized and background tasks scheduled")
except Exception as e: except Exception as e:

View File

@@ -70,8 +70,7 @@ class LoraRoutes:
# It's initializing if the cache object doesn't exist yet, # It's initializing if the cache object doesn't exist yet,
# OR if the scanner explicitly says it's initializing (background task running). # OR if the scanner explicitly says it's initializing (background task running).
is_initializing = ( is_initializing = (
self.scanner._cache is None or self.scanner._cache is None or self.scanner.is_initializing()
(hasattr(self.scanner, '_is_initializing') and self.scanner._is_initializing)
) )
if is_initializing: if is_initializing:

View File

@@ -749,6 +749,10 @@ class ModelScanner:
"""Scan all model directories and return metadata""" """Scan all model directories and return metadata"""
raise NotImplementedError("Subclasses must implement scan_all_models") raise NotImplementedError("Subclasses must implement scan_all_models")
def is_initializing(self) -> bool:
"""Check if the scanner is currently initializing"""
return self._is_initializing
def get_model_roots(self) -> List[str]: def get_model_roots(self) -> List[str]:
"""Get model root directories""" """Get model root directories"""
raise NotImplementedError("Subclasses must implement get_model_roots") raise NotImplementedError("Subclasses must implement get_model_roots")

View File

@@ -128,76 +128,6 @@ class ExampleImagesFileManager:
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'] 'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
}) })
# Check if files use 1-based indexing (look for patterns like "image_1.jpg")
has_one_based = any(re.match(r'image_1\.\w+$', f['name']) for f in files)
has_zero_based = any(re.match(r'image_0\.\w+$', f['name']) for f in files)
# If there are 1-based indices and no 0-based indices, rename files
if has_one_based and not has_zero_based:
logger.info(f"Converting 1-based to 0-based indexing in {model_folder}")
# Sort files to ensure correct order
files.sort(key=lambda x: x['name'])
# First, create rename mapping to avoid conflicts
renames = []
for file in files:
match = re.match(r'image_(\d+)\.(\w+)$', file['name'])
if match:
index = int(match.group(1))
ext = match.group(2)
if index > 0: # Only rename if index is positive
new_name = f"image_{index-1}.{ext}"
renames.append((file['name'], new_name))
# Use temporary filenames to avoid conflicts
for old_name, new_name in renames:
old_path = os.path.join(model_folder, old_name)
temp_path = os.path.join(model_folder, f"temp_{old_name}")
try:
os.rename(old_path, temp_path)
except Exception as e:
logger.error(f"Failed to rename {old_path} to {temp_path}: {e}")
# Rename from temporary names to final names
for old_name, new_name in renames:
temp_path = os.path.join(model_folder, f"temp_{old_name}")
new_path = os.path.join(model_folder, new_name)
try:
os.rename(temp_path, new_path)
logger.debug(f"Renamed {old_name} to {new_name}")
# Update file list entry
for file in files:
if file['name'] == old_name:
file['name'] = new_name
file['path'] = f'/example_images_static/{model_hash}/{new_name}'
except Exception as e:
logger.error(f"Failed to rename {temp_path} to {new_path}: {e}")
# Refresh file list after renaming
files = []
for file in os.listdir(model_folder):
file_path = os.path.join(model_folder, file)
if os.path.isfile(file_path):
file_ext = os.path.splitext(file)[1].lower()
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
files.append({
'name': file,
'path': f'/example_images_static/{model_hash}/{file}',
'extension': file_ext,
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
})
# Sort files by index for consistent order
def extract_index(filename):
match = re.match(r'image_(\d+)\.\w+$', filename)
if match:
return int(match.group(1))
return float('inf') # Place non-matching files at the end
files.sort(key=lambda x: extract_index(x['name']))
return web.json_response({ return web.json_response({
'success': True, 'success': True,
'files': files 'files': files

View File

@@ -198,29 +198,32 @@ class MetadataUpdater:
newly_imported_paths: List of paths to newly imported files newly_imported_paths: List of paths to newly imported files
Returns: Returns:
list: Updated images array tuple: (regular_images, custom_images) - Both image arrays
""" """
try: try:
# Ensure civitai field exists in model_data # Ensure civitai field exists in model_data
if not model_data.get('civitai'): if not model_data.get('civitai'):
model_data['civitai'] = {} model_data['civitai'] = {}
# Ensure images array exists # Ensure customImages array exists
if not model_data['civitai'].get('images'): if not model_data['civitai'].get('customImages'):
model_data['civitai']['images'] = [] model_data['civitai']['customImages'] = []
# Get current images array # Get current customImages array
images = model_data['civitai']['images'] custom_images = model_data['civitai']['customImages']
# Add new image entry for each imported file # Add new image entry for each imported file
for path in newly_imported_paths: for path_tuple in newly_imported_paths:
path, short_id = path_tuple
# Determine if video or image # Determine if video or image
file_ext = os.path.splitext(path)[1].lower() file_ext = os.path.splitext(path)[1].lower()
is_video = file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'] is_video = file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
# Create image metadata entry # Create image metadata entry
image_entry = { image_entry = {
"url": "", # Empty URL as required "url": "", # Empty URL as requested
"id": short_id,
"nsfwLevel": 0, "nsfwLevel": 0,
"width": 720, # Default dimensions "width": 720, # Default dimensions
"height": 1280, "height": 1280,
@@ -240,8 +243,8 @@ class MetadataUpdater:
# If PIL fails or is unavailable, use default dimensions # If PIL fails or is unavailable, use default dimensions
pass pass
# Append to existing images array # Append to existing customImages array
images.append(image_entry) custom_images.append(image_entry)
# Save metadata to .metadata.json file # Save metadata to .metadata.json file
file_path = model_data.get('file_path') file_path = model_data.get('file_path')
@@ -261,8 +264,12 @@ class MetadataUpdater:
if file_path: if file_path:
await scanner.update_single_model_cache(file_path, file_path, model_data) await scanner.update_single_model_cache(file_path, file_path, model_data)
return images # Get regular images array (might be None)
regular_images = model_data['civitai'].get('images', [])
# Return both image arrays
return regular_images, custom_images
except Exception as e: except Exception as e:
logger.error(f"Failed to update metadata after import: {e}", exc_info=True) logger.error(f"Failed to update metadata after import: {e}", exc_info=True)
return [] return [], []

View File

@@ -0,0 +1,318 @@
import asyncio
import logging
import os
import re
import json
from ..services.settings_manager import settings
from ..services.service_registry import ServiceRegistry
from ..utils.metadata_manager import MetadataManager
from ..utils.example_images_processor import ExampleImagesProcessor
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
logger = logging.getLogger(__name__)
CURRENT_NAMING_VERSION = 2 # Increment this when naming conventions change
class ExampleImagesMigration:
"""Handles migrations for example images naming conventions"""
@staticmethod
async def check_and_run_migrations():
"""Check if migrations are needed and run them in background"""
example_images_path = settings.get('example_images_path')
if not example_images_path or not os.path.exists(example_images_path):
logger.debug("No example images path configured or path doesn't exist, skipping migrations")
return
# Check current version from progress file
current_version = 0
progress_file = os.path.join(example_images_path, '.download_progress.json')
if os.path.exists(progress_file):
try:
with open(progress_file, 'r', encoding='utf-8') as f:
progress_data = json.load(f)
current_version = progress_data.get('naming_version', 0)
except Exception as e:
logger.error(f"Failed to load progress file for migration check: {e}")
# If current version is less than target version, start migration
if current_version < CURRENT_NAMING_VERSION:
logger.info(f"Starting example images naming migration from v{current_version} to v{CURRENT_NAMING_VERSION}")
# Start migration in background task
asyncio.create_task(
ExampleImagesMigration.run_migrations(example_images_path, current_version, CURRENT_NAMING_VERSION)
)
@staticmethod
async def run_migrations(example_images_path, from_version, to_version):
"""Run necessary migrations based on version difference"""
try:
# Get all model folders
model_folders = []
for item in os.listdir(example_images_path):
item_path = os.path.join(example_images_path, item)
if os.path.isdir(item_path) and len(item) == 64: # SHA256 hash is 64 chars
model_folders.append(item_path)
logger.info(f"Found {len(model_folders)} model folders to check for migration")
# Apply migrations sequentially
if from_version < 1 and to_version >= 1:
await ExampleImagesMigration._migrate_to_v1(model_folders)
if from_version < 2 and to_version >= 2:
await ExampleImagesMigration._migrate_to_v2(model_folders)
# Update version in progress file
progress_file = os.path.join(example_images_path, '.download_progress.json')
try:
progress_data = {}
if os.path.exists(progress_file):
with open(progress_file, 'r', encoding='utf-8') as f:
progress_data = json.load(f)
progress_data['naming_version'] = to_version
with open(progress_file, 'w', encoding='utf-8') as f:
json.dump(progress_data, f, indent=2)
logger.info(f"Example images naming migration to v{to_version} completed")
except Exception as e:
logger.error(f"Failed to update version in progress file: {e}")
except Exception as e:
logger.error(f"Error during migration: {e}", exc_info=True)
@staticmethod
async def _migrate_to_v1(model_folders):
"""Migrate from 1-based to 0-based indexing"""
count = 0
for folder in model_folders:
has_one_based = False
has_zero_based = False
files_to_rename = []
# Check naming pattern in this folder
for file in os.listdir(folder):
if re.match(r'image_1\.\w+$', file):
has_one_based = True
if re.match(r'image_0\.\w+$', file):
has_zero_based = True
# Only migrate folders with 1-based indexing and no 0-based
if has_one_based and not has_zero_based:
# Create rename mapping
for file in os.listdir(folder):
match = re.match(r'image_(\d+)\.(\w+)$', file)
if match:
index = int(match.group(1))
ext = match.group(2)
if index > 0: # Only rename if index is positive
files_to_rename.append((
file,
f"image_{index-1}.{ext}"
))
# Use temporary names to avoid conflicts
for old_name, new_name in files_to_rename:
old_path = os.path.join(folder, old_name)
temp_path = os.path.join(folder, f"temp_{old_name}")
try:
os.rename(old_path, temp_path)
except Exception as e:
logger.error(f"Failed to rename {old_path} to {temp_path}: {e}")
# Rename from temporary names to final names
for old_name, new_name in files_to_rename:
temp_path = os.path.join(folder, f"temp_{old_name}")
new_path = os.path.join(folder, new_name)
try:
os.rename(temp_path, new_path)
logger.debug(f"Renamed {old_name} to {new_name} in {folder}")
except Exception as e:
logger.error(f"Failed to rename {temp_path} to {new_path}: {e}")
count += 1
# Give other tasks a chance to run
if count % 10 == 0:
await asyncio.sleep(0)
logger.info(f"Migrated {count} folders from 1-based to 0-based indexing")
@staticmethod
async def _migrate_to_v2(model_folders):
"""
Migrate to v2 naming scheme:
- Move custom examples from images array to customImages array
- Rename files from image_<index>.<ext> to custom_<short_id>.<ext>
- Add id field to each custom image entry
"""
count = 0
updated_models = 0
migration_errors = 0
# Get scanner instances
lora_scanner = await ServiceRegistry.get_lora_scanner()
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
# Wait until scanners are initialized
scanners = [lora_scanner, checkpoint_scanner]
for scanner in scanners:
if scanner.is_initializing():
logger.info("Waiting for scanners to complete initialization before starting migration...")
initialized = False
retry_count = 0
while not initialized and retry_count < 120: # Wait up to 120 seconds
await asyncio.sleep(1)
initialized = not scanner.is_initializing()
retry_count += 1
if not initialized:
logger.warning("Scanner initialization timeout - proceeding with migration anyway")
logger.info(f"Starting migration to v2 naming scheme for {len(model_folders)} model folders")
for folder in model_folders:
try:
# Extract model hash from folder name
model_hash = os.path.basename(folder)
if not model_hash or len(model_hash) != 64:
continue
# Find the model in scanner cache
model_data = None
scanner = None
for scan_obj in scanners:
if scan_obj.has_hash(model_hash):
cache = await scan_obj.get_cached_data()
for item in cache.raw_data:
if item.get('sha256') == model_hash:
model_data = item
scanner = scan_obj
break
if model_data:
break
if not model_data or not scanner:
logger.debug(f"Model with hash {model_hash} not found in cache, skipping migration")
continue
# Clone model data to avoid modifying the cache directly
model_metadata = model_data.copy()
# Check if model has civitai metadata
if not model_metadata.get('civitai'):
continue
# Get images array
images = model_metadata.get('civitai', {}).get('images', [])
if not images:
continue
# Initialize customImages array if it doesn't exist
if not model_metadata['civitai'].get('customImages'):
model_metadata['civitai']['customImages'] = []
# Find custom examples (entries with empty url)
custom_indices = []
for i, image in enumerate(images):
if image.get('url') == "":
custom_indices.append(i)
if not custom_indices:
continue
logger.debug(f"Found {len(custom_indices)} custom examples in {model_hash}")
# Process each custom example
for index in custom_indices:
try:
image_entry = images[index]
# Determine media type based on the entry type
media_type = 'videos' if image_entry.get('type') == 'video' else 'images'
extensions_to_try = SUPPORTED_MEDIA_EXTENSIONS[media_type]
# Find the image file by trying possible extensions
old_path = None
old_filename = None
found = False
for ext in extensions_to_try:
test_path = os.path.join(folder, f"image_{index}{ext}")
if os.path.exists(test_path):
old_path = test_path
old_filename = f"image_{index}{ext}"
found = True
break
if not found:
logger.warning(f"Could not find file for index {index} in {model_hash}, skipping")
continue
# Generate short ID for the custom example
short_id = ExampleImagesProcessor.generate_short_id()
# Get file extension
file_ext = os.path.splitext(old_path)[1]
# Create new filename
new_filename = f"custom_{short_id}{file_ext}"
new_path = os.path.join(folder, new_filename)
# Rename the file
try:
os.rename(old_path, new_path)
logger.debug(f"Renamed {old_filename} to {new_filename} in {folder}")
except Exception as e:
logger.error(f"Failed to rename {old_path} to {new_path}: {e}")
continue
# Create a copy of the image entry with the id field
custom_entry = image_entry.copy()
custom_entry['id'] = short_id
# Add to customImages array
model_metadata['civitai']['customImages'].append(custom_entry)
count += 1
except Exception as e:
logger.error(f"Error migrating custom example at index {index} for {model_hash}: {e}")
# Remove custom examples from the original images array
model_metadata['civitai']['images'] = [
img for i, img in enumerate(images) if i not in custom_indices
]
# Save the updated metadata
file_path = model_data.get('file_path')
if file_path:
try:
# Create a copy of model data without 'folder' field
model_copy = model_metadata.copy()
model_copy.pop('folder', None)
# Save metadata to file
await MetadataManager.save_metadata(file_path, model_copy)
# Update scanner cache
await scanner.update_single_model_cache(file_path, file_path, model_metadata)
updated_models += 1
except Exception as e:
logger.error(f"Failed to save metadata for {model_hash}: {e}")
migration_errors += 1
# Give other tasks a chance to run
if count % 10 == 0:
await asyncio.sleep(0)
except Exception as e:
logger.error(f"Error migrating folder {folder}: {e}")
migration_errors += 1
logger.info(f"Migration to v2 complete: migrated {count} custom examples across {updated_models} models with {migration_errors} errors")

View File

@@ -2,8 +2,9 @@ import logging
import os import os
import re import re
import tempfile import tempfile
import random
import string
from aiohttp import web from aiohttp import web
import asyncio
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -11,6 +12,12 @@ logger = logging.getLogger(__name__)
class ExampleImagesProcessor: class ExampleImagesProcessor:
"""Processes and manipulates example images""" """Processes and manipulates example images"""
@staticmethod
def generate_short_id(length=8):
"""Generate a short random alphanumeric identifier"""
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(length))
@staticmethod @staticmethod
def get_civitai_optimized_url(image_url): def get_civitai_optimized_url(image_url):
"""Convert Civitai image URL to its optimized WebP version""" """Convert Civitai image URL to its optimized WebP version"""
@@ -265,11 +272,6 @@ class ExampleImagesProcessor:
'error': f"Model with hash {model_hash} not found in cache" 'error': f"Model with hash {model_hash} not found in cache"
}, status=404) }, status=404)
# Get the current number of images in the civitai.images array
civitai_data = model_data.get('civitai')
current_images = civitai_data.get('images', []) if civitai_data is not None else []
next_index = len(current_images)
# Create model folder # Create model folder
model_folder = os.path.join(example_images_path, model_hash) model_folder = os.path.join(example_images_path, model_hash)
os.makedirs(model_folder, exist_ok=True) os.makedirs(model_folder, exist_ok=True)
@@ -293,16 +295,17 @@ class ExampleImagesProcessor:
errors.append(f"Unsupported file type: {file_path}") errors.append(f"Unsupported file type: {file_path}")
continue continue
# Generate new filename using sequential index starting from current image length # Generate new filename using short ID instead of UUID
new_filename = f"image_{next_index}{file_ext}" short_id = ExampleImagesProcessor.generate_short_id()
next_index += 1 new_filename = f"custom_{short_id}{file_ext}"
dest_path = os.path.join(model_folder, new_filename) dest_path = os.path.join(model_folder, new_filename)
# Copy the file # Copy the file
import shutil import shutil
shutil.copy2(file_path, dest_path) shutil.copy2(file_path, dest_path)
newly_imported_paths.append(dest_path) # Store both the dest_path and the short_id
newly_imported_paths.append((dest_path, short_id))
# Add to imported files list # Add to imported files list
imported_files.append({ imported_files.append({
@@ -315,7 +318,7 @@ class ExampleImagesProcessor:
errors.append(f"Error importing {file_path}: {str(e)}") errors.append(f"Error importing {file_path}: {str(e)}")
# Update metadata with new example images # Update metadata with new example images
updated_images = await MetadataUpdater.update_metadata_after_import( regular_images, custom_images = await MetadataUpdater.update_metadata_after_import(
model_hash, model_hash,
model_data, model_data,
scanner, scanner,
@@ -328,7 +331,8 @@ class ExampleImagesProcessor:
(f' with {len(errors)} errors' if errors else ''), (f' with {len(errors)} errors' if errors else ''),
'files': imported_files, 'files': imported_files,
'errors': errors, 'errors': errors,
'updated_images': updated_images, 'regular_images': regular_images,
'custom_images': custom_images,
"model_file_path": model_data.get('file_path', ''), "model_file_path": model_data.get('file_path', ''),
}) })

View File

@@ -39,7 +39,23 @@ class ModelRouteUtils:
async def update_model_metadata(metadata_path: str, local_metadata: Dict, async def update_model_metadata(metadata_path: str, local_metadata: Dict,
civitai_metadata: Dict, client: CivitaiClient) -> None: civitai_metadata: Dict, client: CivitaiClient) -> None:
"""Update local metadata with CivitAI data""" """Update local metadata with CivitAI data"""
local_metadata['civitai'] = civitai_metadata # Save existing trainedWords and customImages if they exist
existing_civitai = local_metadata.get('civitai', {})
existing_trained_words = existing_civitai.get('trainedWords', [])
# Create a new civitai metadata by updating existing with new
merged_civitai = existing_civitai.copy()
merged_civitai.update(civitai_metadata)
# Special handling for trainedWords - ensure we don't lose any existing trained words
new_trained_words = civitai_metadata.get('trainedWords', [])
if existing_trained_words:
# Use a set to combine words without duplicates, then convert back to list
merged_trained_words = list(set(existing_trained_words + new_trained_words))
merged_civitai['trainedWords'] = merged_trained_words
# Update local metadata with merged civitai data
local_metadata['civitai'] = merged_civitai
local_metadata['from_civitai'] = True local_metadata['from_civitai'] = True
# Update model name if available # Update model name if available
@@ -219,7 +235,7 @@ class ModelRouteUtils:
fields = [ fields = [
"id", "modelId", "name", "createdAt", "updatedAt", "id", "modelId", "name", "createdAt", "updatedAt",
"publishedAt", "trainedWords", "baseModel", "description", "publishedAt", "trainedWords", "baseModel", "description",
"model", "images", "creator" "model", "images", "customImages", "creator"
] ]
return {k: data[k] for k in fields if k in data} return {k: data[k] for k in fields if k in data}

View File

@@ -65,15 +65,21 @@ export function renderShowcaseContent(images, exampleFiles = []) {
// Find matching file in our list of actual files // Find matching file in our list of actual files
let localFile = null; let localFile = null;
if (exampleFiles.length > 0) { if (exampleFiles.length > 0) {
// Try to find the corresponding file by index first if (img.id) {
localFile = exampleFiles.find(file => { // This is a custom image, find by custom_<id>
const match = file.name.match(/image_(\d+)\./); const customPrefix = `custom_${img.id}`;
return match && parseInt(match[1]) === index; localFile = exampleFiles.find(file => file.name.startsWith(customPrefix));
}); } else {
// This is a regular image from civitai, find by index
localFile = exampleFiles.find(file => {
const match = file.name.match(/image_(\d+)\./);
return match && parseInt(match[1]) === index;
});
// If not found by index, just use the same position in the array if available // If not found by index, just use the same position in the array if available
if (!localFile && index < exampleFiles.length) { if (!localFile && index < exampleFiles.length) {
localFile = exampleFiles[index]; localFile = exampleFiles[index];
}
} }
} }
@@ -301,8 +307,11 @@ async function handleImportFiles(files, modelHash, importContainer) {
const showcaseTab = document.getElementById('showcase-tab'); const showcaseTab = document.getElementById('showcase-tab');
if (showcaseTab) { if (showcaseTab) {
// Get the updated images from the result // Get the updated images from the result
const updatedImages = result.updated_images || []; const regularImages = result.regular_images || [];
showcaseTab.innerHTML = renderShowcaseContent(updatedImages, updatedFilesResult.files); const customImages = result.custom_images || [];
// Combine both arrays for rendering
const allImages = [...regularImages, ...customImages];
showcaseTab.innerHTML = renderShowcaseContent(allImages, updatedFilesResult.files);
// Re-initialize showcase functionality // Re-initialize showcase functionality
const carousel = showcaseTab.querySelector('.carousel'); const carousel = showcaseTab.querySelector('.carousel');
@@ -321,7 +330,8 @@ async function handleImportFiles(files, modelHash, importContainer) {
// Create an update object with only the necessary properties // Create an update object with only the necessary properties
const updateData = { const updateData = {
civitai: { civitai: {
images: updatedImages images: regularImages,
customImages: customImages
} }
}; };

View File

@@ -192,17 +192,20 @@ export function showLoraModal(lora) {
// Load recipes for this Lora // Load recipes for this Lora
loadRecipesForLora(lora.model_name, lora.sha256); loadRecipesForLora(lora.model_name, lora.sha256);
// Load example images asynchronously // Load example images asynchronously - merge regular and custom images
loadExampleImages(lora.civitai?.images, lora.sha256, lora.file_path); const regularImages = lora.civitai?.images || [];
const customImages = lora.civitai?.customImages || [];
// Combine images - regular images first, then custom images
const allImages = [...regularImages, ...customImages];
loadExampleImages(allImages, lora.sha256);
} }
/** /**
* Load example images asynchronously * Load example images asynchronously
* @param {Array} images - Array of image objects * @param {Array} images - Array of image objects (both regular and custom)
* @param {string} modelHash - Model hash for fetching local files * @param {string} modelHash - Model hash for fetching local files
* @param {string} filePath - File path for fetching local files
*/ */
async function loadExampleImages(images, modelHash, filePath) { async function loadExampleImages(images, modelHash) {
try { try {
const showcaseTab = document.getElementById('showcase-tab'); const showcaseTab = document.getElementById('showcase-tab');
if (!showcaseTab) return; if (!showcaseTab) return;