diff --git a/.github/workflows/frontend-tests.yml b/.github/workflows/frontend-tests.yml
new file mode 100644
index 00000000..362a8b67
--- /dev/null
+++ b/.github/workflows/frontend-tests.yml
@@ -0,0 +1,50 @@
+name: Frontend Tests
+
+on:
+ push:
+ branches:
+ - main
+ - master
+ paths:
+ - 'package.json'
+ - 'package-lock.json'
+ - 'vitest.config.js'
+ - 'tests/frontend/**'
+ - 'static/js/**'
+ - '.github/workflows/frontend-tests.yml'
+ pull_request:
+ paths:
+ - 'package.json'
+ - 'package-lock.json'
+ - 'vitest.config.js'
+ - 'tests/frontend/**'
+ - 'static/js/**'
+ - '.github/workflows/frontend-tests.yml'
+
+jobs:
+ vitest:
+ name: Run Vitest with coverage
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4
+
+ - name: Use Node.js 20
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ cache: 'npm'
+
+ - name: Install dependencies
+ run: npm ci
+
+ - name: Run frontend tests with coverage
+ run: npm run test:coverage
+
+ - name: Upload coverage artifact
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: frontend-coverage
+ path: coverage/frontend
+ if-no-files-found: warn
diff --git a/.gitignore b/.gitignore
index 7b15029e..bae77cab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,5 @@ py/run_test.py
.vscode/
cache/
civitai/
+node_modules/
+coverage/
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 00000000..31a731cc
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,22 @@
+# Repository Guidelines
+
+## Project Structure & Module Organization
+ComfyUI LoRA Manager pairs a Python backend with browser-side widgets. Backend modules live in py/ with HTTP entry points in py/routes/, feature logic in py/services/, shared helpers in py/utils/, and custom nodes in py/nodes/. UI scripts extend ComfyUI from web/comfyui/, while deploy-ready assets remain in static/ and templates/. Localization files live in locales/, example workflows in example_workflows/, and interim tests such as test_i18n.py sit beside their source until a dedicated tests/ tree lands.
+
+## Build, Test, and Development Commands
+- pip install -r requirements.txt installs backend dependencies.
+- python standalone.py --port 8188 launches the standalone server for iterative development.
+- python -m pytest test_i18n.py runs the current regression suite; target new files explicitly, e.g. python -m pytest tests/test_recipes.py.
+- python scripts/sync_translation_keys.py synchronizes locale keys after UI string updates.
+
+## Coding Style & Naming Conventions
+Follow PEP 8 with four-space indentation and descriptive snake_case file and function names such as settings_manager.py. Classes stay PascalCase, constants in UPPER_SNAKE_CASE, and loggers retrieved via logging.getLogger(__name__). Prefer explicit type hints and docstrings on public APIs. JavaScript under web/comfyui/ uses ES modules with camelCase helpers and the _widget.js suffix for UI components.
+
+## Testing Guidelines
+Pytest powers backend tests. Name modules test_.py and keep them near the code or in a future tests/ package. Mock ComfyUI dependencies through helpers in standalone.py, keep filesystem fixtures deterministic, and ensure translations are covered. Run python -m pytest before submitting changes.
+
+## Commit & Pull Request Guidelines
+Commits follow the conventional format, e.g. feat(settings): add default model path, and should stay focused on a single concern. Pull requests must outline the problem, summarize the solution, list manual verification steps (server run, targeted pytest), and link related issues. Include screenshots or GIFs for UI or locale updates and call out migration steps such as settings.json adjustments.
+
+## Configuration & Localization Tips
+Copy settings.json.example to settings.json and adapt model directories before running the standalone server. Store reference assets in civitai/ or docs/ to keep runtime directories deploy-ready. Whenever UI text changes, update every locales/<lang>.json file and rerun the translation sync script so ComfyUI surfaces localized strings.
diff --git a/README.md b/README.md
index 1627d434..7e932acc 100644
--- a/README.md
+++ b/README.md
@@ -34,6 +34,11 @@ Enhance your Civitai browsing experience with our companion browser extension! S
## Release Notes
+### v0.9.3
+* **Metadata Archive Database Support** - Added the ability to download and utilize a metadata archive database, enabling access to metadata for models that have been deleted from CivitAI.
+* **App-Level Proxy Settings** - Introduced support for configuring a global proxy within the application, making it easier to use the manager behind network restrictions.
+* **Bug Fixes** - Various bug fixes for improved stability and reliability.
+
### v0.9.2
* **Bulk Auto-Organization Action** - Added a new bulk auto-organization feature. You can now select multiple models and automatically organize them according to your current path template settings for streamlined management.
* **Bug Fixes** - Addressed several bugs to improve stability and reliability.
diff --git a/__init__.py b/__init__.py
index cf317063..5223fc45 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,13 +1,32 @@
-from .py.lora_manager import LoraManager
-from .py.nodes.lora_loader import LoraManagerLoader, LoraManagerTextLoader
-from .py.nodes.trigger_word_toggle import TriggerWordToggle
-from .py.nodes.lora_stacker import LoraStacker
-from .py.nodes.save_image import SaveImage
-from .py.nodes.debug_metadata import DebugMetadata
-from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
-from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraSelectFromText
-# Import metadata collector to install hooks on startup
-from .py.metadata_collector import init as init_metadata_collector
+try: # pragma: no cover - import fallback for pytest collection
+ from .py.lora_manager import LoraManager
+ from .py.nodes.lora_loader import LoraManagerLoader, LoraManagerTextLoader
+ from .py.nodes.trigger_word_toggle import TriggerWordToggle
+ from .py.nodes.lora_stacker import LoraStacker
+ from .py.nodes.save_image import SaveImage
+ from .py.nodes.debug_metadata import DebugMetadata
+ from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
+ from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraSelectFromText
+ from .py.metadata_collector import init as init_metadata_collector
+except ImportError: # pragma: no cover - allows running under pytest without package install
+ import importlib
+ import pathlib
+ import sys
+
+ package_root = pathlib.Path(__file__).resolve().parent
+ if str(package_root) not in sys.path:
+ sys.path.append(str(package_root))
+
+ LoraManager = importlib.import_module("py.lora_manager").LoraManager
+ LoraManagerLoader = importlib.import_module("py.nodes.lora_loader").LoraManagerLoader
+ LoraManagerTextLoader = importlib.import_module("py.nodes.lora_loader").LoraManagerTextLoader
+ TriggerWordToggle = importlib.import_module("py.nodes.trigger_word_toggle").TriggerWordToggle
+ LoraStacker = importlib.import_module("py.nodes.lora_stacker").LoraStacker
+ SaveImage = importlib.import_module("py.nodes.save_image").SaveImage
+ DebugMetadata = importlib.import_module("py.nodes.debug_metadata").DebugMetadata
+ WanVideoLoraSelect = importlib.import_module("py.nodes.wanvideo_lora_select").WanVideoLoraSelect
+ WanVideoLoraSelectFromText = importlib.import_module("py.nodes.wanvideo_lora_select_from_text").WanVideoLoraSelectFromText
+ init_metadata_collector = importlib.import_module("py.metadata_collector").init
NODE_CLASS_MAPPINGS = {
LoraManagerLoader.NAME: LoraManagerLoader,
diff --git a/docs/EventManagementImplementation.md b/docs/EventManagementImplementation.md
deleted file mode 100644
index 6631fc16..00000000
--- a/docs/EventManagementImplementation.md
+++ /dev/null
@@ -1,182 +0,0 @@
-# Event Management Implementation Summary
-
-## What Has Been Implemented
-
-### 1. Enhanced EventManager Class
-- **Location**: `static/js/utils/EventManager.js`
-- **Features**:
- - Priority-based event handling
- - Conditional execution based on application state
- - Element filtering (target/exclude selectors)
- - Mouse button filtering
- - Automatic cleanup with cleanup functions
- - State tracking for app modes
- - Error handling for event handlers
-
-### 2. BulkManager Integration
-- **Location**: `static/js/managers/BulkManager.js`
-- **Migrated Events**:
- - Global keyboard shortcuts (Ctrl+A, Escape, B key)
- - Marquee selection events (mousedown, mousemove, mouseup, contextmenu)
- - State synchronization with EventManager
-- **Benefits**:
- - Centralized priority handling
- - Conditional execution based on modal state
- - Better coordination with other components
-
-### 3. UIHelpers Integration
-- **Location**: `static/js/utils/uiHelpers.js`
-- **Migrated Events**:
- - Mouse position tracking for node selector positioning
- - Node selector click events (outside clicks and selection)
- - State management for node selector
-- **Benefits**:
- - Reduced direct DOM listeners
- - Coordinated state tracking
- - Better cleanup
-
-### 4. ModelCard Integration
-- **Location**: `static/js/components/shared/ModelCard.js`
-- **Migrated Events**:
- - Model card click delegation
- - Action button handling (star, globe, copy, etc.)
- - Better return value handling for event propagation
-- **Benefits**:
- - Single event listener for all model cards
- - Priority-based execution
- - Better event flow control
-
-### 5. Documentation and Initialization
-- **EventManagerDocs.md**: Comprehensive documentation
-- **eventManagementInit.js**: Initialization and global handlers
-- **Features**:
- - Global escape key handling
- - Modal state synchronization
- - Error handling
- - Analytics integration points
- - Cleanup on page unload
-
-## Application States Tracked
-
-1. **bulkMode**: When bulk selection mode is active
-2. **marqueeActive**: When marquee selection is in progress
-3. **modalOpen**: When any modal dialog is open
-4. **nodeSelectorActive**: When node selector popup is visible
-
-## Priority Levels Used
-
-- **250+**: Critical system events (escape keys)
-- **200+**: High priority system events (modal close)
-- **100-199**: Application-level shortcuts (bulk operations)
-- **80-99**: UI interactions (marquee selection)
-- **60-79**: Component interactions (model cards)
-- **10-49**: Tracking and monitoring
-- **1-9**: Analytics and low-priority tasks
-
-## Event Flow Examples
-
-### Bulk Mode Toggle (B key)
-1. **Priority 100**: BulkManager keyboard handler catches 'b' key
-2. Toggles bulk mode state
-3. Updates EventManager state
-4. Updates UI accordingly
-5. Stops propagation (returns true)
-
-### Marquee Selection
-1. **Priority 80**: BulkManager mousedown handler (only in .models-container, excluding cards/buttons)
-2. Starts marquee selection
-3. **Priority 90**: BulkManager mousemove handler (only when marquee active)
-4. Updates selection rectangle
-5. **Priority 90**: BulkManager mouseup handler ends selection
-
-### Model Card Click
-1. **Priority 60**: ModelCard delegation handler checks for specific elements
-2. If action button: handles action and stops propagation
-3. If general card click: continues to other handlers
-4. Bulk selection may also handle the event if in bulk mode
-
-## Remaining Event Listeners (Not Yet Migrated)
-
-### High Priority for Migration
-1. **SearchManager keyboard events** - Global search shortcuts
-2. **ModalManager escape handling** - Already integrated with initialization
-3. **Scroll-based events** - Back to top, virtual scrolling
-4. **Resize events** - Panel positioning, responsive layouts
-
-### Medium Priority
-1. **Form input events** - Tag inputs, settings forms
-2. **Component-specific events** - Recipe modal, showcase view
-3. **Sidebar events** - Resize handling, toggle events
-
-### Low Priority (Can Remain As-Is)
-1. **VirtualScroller events** - Performance-critical, specialized
-2. **Component lifecycle events** - Modal open/close callbacks
-3. **One-time setup events** - Theme initialization, etc.
-
-## Benefits Achieved
-
-### Performance Improvements
-- **Reduced DOM listeners**: From ~15+ individual listeners to ~5 coordinated handlers
-- **Conditional execution**: Handlers only run when conditions are met
-- **Priority ordering**: Important events handled first
-- **Better memory management**: Automatic cleanup prevents leaks
-
-### Coordination Improvements
-- **State synchronization**: All components aware of app state
-- **Event flow control**: Proper propagation stopping
-- **Conflict resolution**: Priority system prevents conflicts
-- **Debugging**: Centralized event handling for easier debugging
-
-### Code Quality Improvements
-- **Consistent patterns**: All event handling follows same patterns
-- **Better separation of concerns**: Event logic separated from business logic
-- **Error handling**: Centralized error catching and reporting
-- **Documentation**: Clear patterns for future development
-
-## Next Steps (Recommendations)
-
-### 1. Migrate Search Events
-```javascript
-// In SearchManager.js
-eventManager.addHandler('keydown', 'search-shortcuts', (e) => {
- if ((e.ctrlKey || e.metaKey) && e.key === 'f') {
- this.focusSearchInput();
- return true;
- }
-}, { priority: 120 });
-```
-
-### 2. Integrate Resize Events
-```javascript
-// Create ResizeManager
-eventManager.addHandler('resize', 'layout-resize', debounce((e) => {
- this.updateLayoutDimensions();
-}, 250), { priority: 50 });
-```
-
-### 3. Add Debug Mode
-```javascript
-// In EventManager.js
-if (window.DEBUG_EVENTS) {
- console.log(`Event ${eventType} handled by ${source} (priority: ${priority})`);
-}
-```
-
-### 4. Create Event Analytics
-```javascript
-// Track event patterns for optimization
-eventManager.addHandler('*', 'analytics', (e) => {
- this.trackEventUsage(e.type, performance.now());
-}, { priority: 1 });
-```
-
-## Testing Recommendations
-
-1. **Verify bulk mode interactions** work correctly
-2. **Test marquee selection** in various scenarios
-3. **Check modal state synchronization**
-4. **Verify node selector** positioning and cleanup
-5. **Test keyboard shortcuts** don't conflict
-6. **Verify proper cleanup** when components are destroyed
-
-The centralized event management system provides a solid foundation for coordinated, efficient event handling across the application while maintaining good performance and code organization.
diff --git a/docs/EventManagerDocs.md b/docs/EventManagerDocs.md
deleted file mode 100644
index 2ccc8174..00000000
--- a/docs/EventManagerDocs.md
+++ /dev/null
@@ -1,301 +0,0 @@
-# Centralized Event Management System
-
-This document describes the centralized event management system that coordinates event handling across the ComfyUI LoRA Manager application.
-
-## Overview
-
-The `EventManager` class provides a centralized way to handle DOM events with priority-based execution, conditional execution based on application state, and proper cleanup mechanisms.
-
-## Features
-
-- **Priority-based execution**: Handlers with higher priority run first
-- **Conditional execution**: Handlers can be executed based on application state
-- **Element filtering**: Handlers can target specific elements or exclude others
-- **Automatic cleanup**: Cleanup functions are called when handlers are removed
-- **State tracking**: Tracks application states like bulk mode, modal open, etc.
-
-## Basic Usage
-
-### Importing
-
-```javascript
-import { eventManager } from './EventManager.js';
-```
-
-### Adding Event Handlers
-
-```javascript
-eventManager.addHandler('click', 'myComponent', (event) => {
- console.log('Button clicked!');
- return true; // Stop propagation to other handlers
-}, {
- priority: 100,
- targetSelector: '.my-button',
- skipWhenModalOpen: true
-});
-```
-
-### Removing Event Handlers
-
-```javascript
-// Remove specific handler
-eventManager.removeHandler('click', 'myComponent');
-
-// Remove all handlers for a component
-eventManager.removeAllHandlersForSource('myComponent');
-```
-
-### Updating Application State
-
-```javascript
-// Set state
-eventManager.setState('bulkMode', true);
-eventManager.setState('modalOpen', true);
-
-// Get state
-const isBulkMode = eventManager.getState('bulkMode');
-```
-
-## Available States
-
-- `bulkMode`: Whether bulk selection mode is active
-- `marqueeActive`: Whether marquee selection is in progress
-- `modalOpen`: Whether any modal is currently open
-- `nodeSelectorActive`: Whether the node selector popup is active
-
-## Handler Options
-
-### Priority
-Higher numbers = higher priority. Handlers run in descending priority order.
-
-```javascript
-{
- priority: 100 // High priority
-}
-```
-
-### Conditional Execution
-
-```javascript
-{
- onlyInBulkMode: true, // Only run when bulk mode is active
- onlyWhenMarqueeActive: true, // Only run when marquee selection is active
- skipWhenModalOpen: true, // Skip when any modal is open
- skipWhenNodeSelectorActive: true, // Skip when node selector is active
- onlyWhenNodeSelectorActive: true // Only run when node selector is active
-}
-```
-
-### Element Filtering
-
-```javascript
-{
- targetSelector: '.model-card', // Only handle events on matching elements
- excludeSelector: 'button, input', // Exclude events from these elements
- button: 0 // Only handle specific mouse button (0=left, 1=middle, 2=right)
-}
-```
-
-### Cleanup Functions
-
-```javascript
-{
- cleanup: () => {
- // Custom cleanup logic
- console.log('Handler cleaned up');
- }
-}
-```
-
-## Integration Examples
-
-### BulkManager Integration
-
-```javascript
-class BulkManager {
- registerEventHandlers() {
- // High priority keyboard shortcuts
- eventManager.addHandler('keydown', 'bulkManager-keyboard', (e) => {
- return this.handleGlobalKeyboard(e);
- }, {
- priority: 100,
- skipWhenModalOpen: true
- });
-
- // Marquee selection
- eventManager.addHandler('mousedown', 'bulkManager-marquee-start', (e) => {
- return this.handleMarqueeStart(e);
- }, {
- priority: 80,
- skipWhenModalOpen: true,
- targetSelector: '.models-container',
- excludeSelector: '.model-card, button, input',
- button: 0
- });
- }
-
- cleanup() {
- eventManager.removeAllHandlersForSource('bulkManager-keyboard');
- eventManager.removeAllHandlersForSource('bulkManager-marquee-start');
- }
-}
-```
-
-### Modal Integration
-
-```javascript
-class ModalManager {
- showModal(modalId) {
- // Update state when modal opens
- eventManager.setState('modalOpen', true);
- this.displayModal(modalId);
- }
-
- closeModal(modalId) {
- // Update state when modal closes
- eventManager.setState('modalOpen', false);
- this.hideModal(modalId);
- }
-}
-```
-
-### Component Event Delegation
-
-```javascript
-export function setupComponentEvents() {
- eventManager.addHandler('click', 'myComponent-actions', (event) => {
- const button = event.target.closest('.action-button');
- if (!button) return false;
-
- this.handleAction(button.dataset.action);
- return true; // Stop propagation
- }, {
- priority: 60,
- targetSelector: '.component-container'
- });
-}
-```
-
-## Best Practices
-
-### 1. Use Descriptive Source Names
-Use the format `componentName-purposeDescription`:
-```javascript
-// Good
-'bulkManager-marqueeSelection'
-'nodeSelector-clickOutside'
-'modelCard-delegation'
-
-// Avoid
-'bulk'
-'click'
-'handler1'
-```
-
-### 2. Set Appropriate Priorities
-- 200+: Critical system events (escape keys, critical modals)
-- 100-199: High priority application events (keyboard shortcuts)
-- 50-99: Normal UI interactions (buttons, cards)
-- 1-49: Low priority events (tracking, analytics)
-
-### 3. Use Conditional Execution
-Instead of checking state inside handlers, use options:
-```javascript
-// Good
-eventManager.addHandler('click', 'bulk-action', handler, {
- onlyInBulkMode: true
-});
-
-// Avoid
-eventManager.addHandler('click', 'bulk-action', (e) => {
- if (!state.bulkMode) return;
- // handler logic
-});
-```
-
-### 4. Clean Up Properly
-Always clean up handlers when components are destroyed:
-```javascript
-class MyComponent {
- constructor() {
- this.registerEvents();
- }
-
- destroy() {
- eventManager.removeAllHandlersForSource('myComponent');
- }
-}
-```
-
-### 5. Return Values Matter
-- Return `true` to stop event propagation to other handlers
-- Return `false` or `undefined` to continue with other handlers
-
-## Migration Guide
-
-### From Direct Event Listeners
-
-**Before:**
-```javascript
-document.addEventListener('click', (e) => {
- if (e.target.closest('.my-button')) {
- this.handleClick(e);
- }
-});
-```
-
-**After:**
-```javascript
-eventManager.addHandler('click', 'myComponent-button', (e) => {
- this.handleClick(e);
-}, {
- targetSelector: '.my-button'
-});
-```
-
-### From Event Delegation
-
-**Before:**
-```javascript
-container.addEventListener('click', (e) => {
- const card = e.target.closest('.model-card');
- if (!card) return;
-
- if (e.target.closest('.action-btn')) {
- this.handleAction(e);
- }
-});
-```
-
-**After:**
-```javascript
-eventManager.addHandler('click', 'container-actions', (e) => {
- const card = e.target.closest('.model-card');
- if (!card) return false;
-
- if (e.target.closest('.action-btn')) {
- this.handleAction(e);
- return true;
- }
-}, {
- targetSelector: '.container'
-});
-```
-
-## Performance Benefits
-
-1. **Reduced DOM listeners**: Single listener per event type instead of multiple
-2. **Conditional execution**: Handlers only run when conditions are met
-3. **Priority ordering**: Important handlers run first, avoiding unnecessary work
-4. **Automatic cleanup**: Prevents memory leaks from orphaned listeners
-5. **Centralized debugging**: All event handling flows through one system
-
-## Debugging
-
-Enable debug logging to trace event handling:
-```javascript
-// Add to EventManager.js for debugging
-console.log(`Handling ${eventType} event with ${handlers.length} handlers`);
-```
-
-The event manager provides a foundation for coordinated, efficient event handling across the entire application.
diff --git a/docs/LM-Extension-Wiki.md b/docs/LM-Extension-Wiki.md
new file mode 100644
index 00000000..ddaeac4e
--- /dev/null
+++ b/docs/LM-Extension-Wiki.md
@@ -0,0 +1,180 @@
+## Overview
+
+The **LoRA Manager Civitai Extension** is a Browser extension designed to work seamlessly with [LoRA Manager](https://github.com/willmiao/ComfyUI-Lora-Manager) to significantly enhance your browsing experience on [Civitai](https://civitai.com).
+It also supports browsing on [CivArchive](https://civarchive.com/) (formerly CivitaiArchive).
+
+With this extension, you can:
+
+✅ Instantly see which models are already present in your local library
+✅ Download new models with a single click
+✅ Manage downloads efficiently with queue and parallel download support
+✅ Keep your downloaded models automatically organized according to your custom settings
+
+
+
+
+---
+
+## Why Are All Features for Supporters Only?
+
+I love building tools for the Stable Diffusion and ComfyUI communities, and LoRA Manager is a passion project that I've poured countless hours into. When I created this companion extension, my hope was to offer its core features for free, as a thank-you to all of you.
+
+Unfortunately, I've reached a point where I need to be realistic. The level of support from the free model has been far lower than what's needed to justify the continuous development and maintenance for both projects. It was a difficult decision, but I've chosen to make the extension's features exclusive to supporters.
+
+This change is crucial for me to be able to continue dedicating my time to improving the free and open-source LoRA Manager, which I'm committed to keeping available for everyone.
+
+Your support does more than just unlock a few features—it allows me to keep innovating and ensures the core LoRA Manager project thrives. I'm incredibly grateful for your understanding and any support you can offer. ❤️
+
+(_For those who previously supported me on Ko-fi with a one-time donation, I'll be sending out license keys individually as a thank-you._)
+
+
+---
+
+## Installation
+
+### Supported Browsers & Installation Methods
+
+| Browser | Installation Method |
+|--------------------|-------------------------------------------------------------------------------------|
+| **Google Chrome** | [Chrome Web Store link](https://chromewebstore.google.com/detail/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) |
+| **Microsoft Edge** | Install via Chrome Web Store (compatible) |
+| **Brave Browser** | Install via Chrome Web Store (compatible) |
+| **Opera** | Install via Chrome Web Store (compatible) |
+| **Firefox** |
|
+
+For non-Chrome browsers (e.g., Microsoft Edge), you can typically install extensions from the Chrome Web Store by following these steps: open the extension’s Chrome Web Store page, click 'Get extension', then click 'Allow' when prompted to enable installations from other stores, and finally click 'Add extension' to complete the installation.
+
+---
+
+## Privacy & Security
+
+I understand concerns around browser extensions and privacy, and I want to be fully transparent about how the **LM Civitai Extension** works:
+
+- **Reviewed and Verified**
+ This extension has been **manually reviewed and approved by the Chrome Web Store**. The Firefox version uses the **exact same code** (only the packaging format differs) and has passed **Mozilla’s Add-on review**.
+
+- **Minimal Network Access**
+ The only external server this extension connects to is:
+ **`https://willmiao.shop`** — used solely for **license validation**.
+
+ It does **not collect, transmit, or store any personal or usage data**.
+ No browsing history, no user IDs, no analytics, no hidden trackers.
+
+- **Local-Only Model Detection**
+ Model detection and LoRA Manager communication all happen **locally** within your browser, directly interacting with your local LoRA Manager backend.
+
+I value your trust and are committed to keeping your local setup private and secure. If you have any questions, feel free to reach out!
+
+---
+
+## How to Use
+
+After installing the extension, you'll automatically receive a **7-day trial** to explore all features.
+
+When the extension is correctly installed and your license is valid:
+
+- Open **Civitai**, and you'll see visual indicators added by the extension on model cards, showing:
+ - ✅ Models already present in your local library
+ - ⬇️ A download button for models not in your library
+
+Clicking the download button adds the corresponding model version to the download queue, waiting to be downloaded. You can set up to **5 models to download simultaneously**.
+
+### Visual Indicators Appear On:
+
+- **Home Page** — Featured models
+- **Models Page**
+- **Creator Profiles** — If the creator has set their models to be visible
+- **Recommended Resources** — On individual model pages
+
+### Version Buttons on Model Pages
+
+On a specific model page, visual indicators also appear on version buttons, showing which versions are already in your local library.
+
+When switching to a specific version by clicking a version button:
+
+- Clicking the download button will open a dropdown:
+ - Download via **LoRA Manager**
+ - Download via **Original Download** (browser download)
+
+You can check **Remember my choice** to set your preferred default. You can change this setting anytime in the extension's settings.
+
+
+
+### Resources on Image Pages (2025-08-05) — now shows in-library indicators for image resources. ‘Import image as recipe’ coming soon!
+
+
+
+---
+
+## Model Download Location & LoRA Manager Settings
+
+To use the **one-click download function**, you must first set:
+
+- Your **Default LoRAs Root**
+- Your **Default Checkpoints Root**
+
+These are set within LoRA Manager's settings.
+
+When everything is configured, downloaded model files will be placed in:
+
+`//`
+
+
+### Update: Default Path Customization (2025-07-21)
+
+A new setting to customize the default download path has been added in the nightly version. You can now personalize where models are saved when downloading via the LM Civitai Extension.
+
+
+
+The previous YAML path mapping file will be deprecated—settings will now be unified in settings.json to simplify configuration.
+
+---
+
+## Backend Port Configuration
+
+If your **ComfyUI** or **LoRA Manager** backend is running on a port **other than the default 8188**, you must configure the backend port in the extension's settings.
+
+After correctly setting and saving the port, you'll see in the extension's header area:
+- A **Healthy** status with the tooltip: `Connected to LoRA Manager on port xxxx`
+
+
+---
+
+## Advanced Usage
+
+### Connecting to a Remote LoRA Manager
+
+If your LoRA Manager is running on another computer, you can still connect from your browser using port forwarding.
+
+> **Why can't you set a remote IP directly?**
+>
+> For privacy and security, the extension only requests access to `http://127.0.0.1/*`. Supporting remote IPs would require much broader permissions, which may be rejected by browser stores and could raise user concerns.
+
+**Solution: Port Forwarding with `socat`**
+
+On your browser computer, run:
+
+`socat TCP-LISTEN:8188,bind=127.0.0.1,fork TCP:REMOTE.IP.ADDRESS.HERE:8188`
+
+- Replace `REMOTE.IP.ADDRESS.HERE` with the IP of the machine running LoRA Manager.
+- Adjust the port if needed.
+
+This lets the extension connect to `127.0.0.1:8188` as usual, with traffic forwarded to your remote server.
+
+_Thanks to user **Temikus** for sharing this solution!_
+
+---
+
+## Roadmap
+
+The extension will evolve alongside **LoRA Manager** improvements. Planned features include:
+
+- [x] Support for **additional model types** (e.g., embeddings)
+- [ ] One-click **Recipe Import**
+- [x] Display of in-library status for all resources in the **Resources Used** section of the image page
+- [x] One-click **Auto-organize Models**
+
+**Stay tuned — and thank you for your support!**
+
+---
+
diff --git a/docs/architecture/example_images_routes.md b/docs/architecture/example_images_routes.md
new file mode 100644
index 00000000..128530f6
--- /dev/null
+++ b/docs/architecture/example_images_routes.md
@@ -0,0 +1,93 @@
+# Example image route architecture
+
+The example image routing stack mirrors the layered model route stack described in
+[`docs/architecture/model_routes.md`](model_routes.md). HTTP wiring, controller setup,
+handler orchestration, and long-running workflows now live in clearly separated modules so
+we can extend download/import behaviour without touching the entire feature surface.
+
+```mermaid
+graph TD
+ subgraph HTTP
+ A[ExampleImagesRouteRegistrar] -->|binds| B[ExampleImagesRoutes controller]
+ end
+ subgraph Application
+ B --> C[ExampleImagesHandlerSet]
+ C --> D1[Handlers]
+ D1 --> E1[Use cases]
+ E1 --> F1[Download manager / processor / file manager]
+ end
+ subgraph Side Effects
+ F1 --> G1[Filesystem]
+ F1 --> G2[Model metadata]
+ F1 --> G3[WebSocket progress]
+ end
+```
+
+## Layer responsibilities
+
+| Layer | Module(s) | Responsibility |
+| --- | --- | --- |
+| Registrar | `py/routes/example_images_route_registrar.py` | Declarative catalogue of every example image endpoint plus helpers that bind them to an `aiohttp` router. Keeps HTTP concerns symmetrical with the model registrar. |
+| Controller | `py/routes/example_images_routes.py` | Lazily constructs `ExampleImagesHandlerSet`, injects defaults for the download manager, processor, and file manager, and exposes the registrar-ready mapping just like `BaseModelRoutes`. |
+| Handler set | `py/routes/handlers/example_images_handlers.py` | Groups HTTP adapters by concern (downloads, imports/deletes, filesystem access). Each handler translates domain errors into HTTP responses and defers to a use case or utility service. |
+| Use cases | `py/services/use_cases/example_images/*.py` | Encapsulate orchestration for downloads and imports. They validate input, translate concurrency/configuration errors, and keep handler logic declarative. |
+| Supporting services | `py/utils/example_images_download_manager.py`, `py/utils/example_images_processor.py`, `py/utils/example_images_file_manager.py` | Execute long-running work: pull assets from Civitai, persist uploads, clean metadata, expose filesystem actions with guardrails, and broadcast progress snapshots. |
+
+## Handler responsibilities & invariants
+
+`ExampleImagesHandlerSet` flattens the handler objects into the `{"handler_name": coroutine}`
+mapping consumed by the registrar. The table below outlines how each handler collaborates
+with the use cases and utilities.
+
+| Handler | Key endpoints | Collaborators | Contracts |
+| --- | --- | --- | --- |
+| `ExampleImagesDownloadHandler` | `/api/lm/download-example-images`, `/api/lm/example-images-status`, `/api/lm/pause-example-images`, `/api/lm/resume-example-images`, `/api/lm/force-download-example-images` | `DownloadExampleImagesUseCase`, `DownloadManager` | Delegates payload validation and concurrency checks to the use case; progress/status endpoints expose the same snapshot used for WebSocket broadcasts; pause/resume surface `DownloadNotRunningError` as HTTP 400 instead of 500. |
+| `ExampleImagesManagementHandler` | `/api/lm/import-example-images`, `/api/lm/delete-example-image` | `ImportExampleImagesUseCase`, `ExampleImagesProcessor` | Multipart uploads are streamed to disk via the use case; validation failures return HTTP 400 with no filesystem side effects; deletion funnels through the processor to prune metadata and cached images consistently. |
+| `ExampleImagesFileHandler` | `/api/lm/open-example-images-folder`, `/api/lm/example-image-files`, `/api/lm/has-example-images` | `ExampleImagesFileManager` | Centralises filesystem access, enforcing settings-based root paths and returning HTTP 400/404 for missing configuration or folders; responses always include `success`/`has_images` booleans for UI consumption. |
+
+## Use case boundaries
+
+| Use case | Entry point | Dependencies | Guarantees |
+| --- | --- | --- | --- |
+| `DownloadExampleImagesUseCase` | `execute(payload)` | `DownloadManager.start_download`, download configuration errors | Raises `DownloadExampleImagesInProgressError` when the manager reports an active job, rewraps configuration errors into `DownloadExampleImagesConfigurationError`, and lets `ExampleImagesDownloadError` bubble as 500s so handlers do not duplicate logging. |
+| `ImportExampleImagesUseCase` | `execute(request)` | `ExampleImagesProcessor.import_images`, temporary file helpers | Supports multipart or JSON payloads, normalises file paths into a single list, cleans up temp files even on failure, and maps validation issues to `ImportExampleImagesValidationError` for HTTP 400 responses. |
+
+## Maintaining critical invariants
+
+* **Shared progress snapshots** - The download handler returns the same snapshot built by
+ `DownloadManager`, guaranteeing parity between HTTP polling endpoints and WebSocket
+ progress events.
+* **Safe filesystem access** - All folder/file actions flow through
+ `ExampleImagesFileManager`, which validates the configured example image root and ensures
+ responses never leak absolute paths outside the allowed directory.
+* **Metadata hygiene** - Import/delete operations run through `ExampleImagesProcessor`,
+ which updates model metadata via `MetadataManager` and notifies the relevant scanners so
+ cache state stays in sync.
+
+## Migration notes
+
+The refactor brings the example image stack in line with the model/recipe stacks:
+
+1. `ExampleImagesRouteRegistrar` now owns the declarative route list. Downstream projects
+ should rely on `ExampleImagesRoutes.to_route_mapping()` instead of manually wiring
+ handler callables.
+2. `ExampleImagesRoutes` caches its `ExampleImagesHandlerSet` just like
+ `BaseModelRoutes`. If you previously instantiated handlers directly, inject custom
+ collaborators via the controller constructor (`download_manager`, `processor`,
+ `file_manager`) to keep test seams predictable.
+3. Tests that mocked `ExampleImagesRoutes.setup_routes` should switch to patching
+ `DownloadExampleImagesUseCase`/`ImportExampleImagesUseCase` at import time. The handlers
+ expect those abstractions to surface validation/concurrency errors, and bypassing them
+ will skip the HTTP-friendly error mapping.
+
+## Extending the stack
+
+1. Add the endpoint to `ROUTE_DEFINITIONS` with a unique `handler_name`.
+2. Expose the coroutine on an existing handler class (or create a new handler and extend
+ `ExampleImagesHandlerSet`).
+3. Wire additional services or factories inside `_build_handler_set` on
+ `ExampleImagesRoutes`, mirroring how the model stack introduces new use cases.
+
+`tests/routes/test_example_images_routes.py` exercises registrar binding, download pause
+flows, and import validations. Use it as a template when introducing new handler
+collaborators or error mappings.
diff --git a/docs/architecture/model_routes.md b/docs/architecture/model_routes.md
new file mode 100644
index 00000000..a9fbf967
--- /dev/null
+++ b/docs/architecture/model_routes.md
@@ -0,0 +1,100 @@
+# Base model route architecture
+
+The model routing stack now splits HTTP wiring, orchestration logic, and
+business rules into discrete layers. The goal is to make it obvious where a
+new collaborator should live and which contract it must honour. The diagram
+below captures the end-to-end flow for a typical request:
+
+```mermaid
+graph TD
+ subgraph HTTP
+ A[ModelRouteRegistrar] -->|binds| B[BaseModelRoutes handler proxy]
+ end
+ subgraph Application
+ B --> C[ModelHandlerSet]
+ C --> D1[Handlers]
+ D1 --> E1[Use cases]
+ E1 --> F1[Services / scanners]
+ end
+ subgraph Side Effects
+ F1 --> G1[Cache & metadata]
+ F1 --> G2[Filesystem]
+ F1 --> G3[WebSocket state]
+ end
+```
+
+Every box maps to a concrete module:
+
+| Layer | Module(s) | Responsibility |
+| --- | --- | --- |
+| Registrar | `py/routes/model_route_registrar.py` | Declarative list of routes shared by every model type and helper methods for binding them to an `aiohttp` application. |
+| Route controller | `py/routes/base_model_routes.py` | Constructs the handler graph, injects shared services, exposes proxies that surface `503 Service not ready` when the model service has not been attached. |
+| Handler set | `py/routes/handlers/model_handlers.py` | Thin HTTP adapters grouped by concern (page rendering, listings, mutations, queries, downloads, CivitAI integration, move operations, auto-organize). |
+| Use cases | `py/services/use_cases/*.py` | Encapsulate long-running flows (`DownloadModelUseCase`, `BulkMetadataRefreshUseCase`, `AutoOrganizeUseCase`). They normalise validation errors and concurrency constraints before returning control to the handlers. |
+| Services | `py/services/*.py` | Existing services and scanners that mutate caches, write metadata, move files, and broadcast WebSocket updates. |
+
+## Handler responsibilities & contracts
+
+`ModelHandlerSet` flattens the handler objects into the exact callables used by
+the registrar. The table below highlights the separation of concerns within
+the set and the invariants that must hold after each handler returns.
+
+| Handler | Key endpoints | Collaborators | Contracts |
+| --- | --- | --- | --- |
+| `ModelPageView` | `/{prefix}` | `SettingsManager`, `server_i18n`, Jinja environment, `service.scanner` | Template is rendered with `is_initializing` flag when caches are cold; i18n filter is registered exactly once per environment instance. |
+| `ModelListingHandler` | `/api/lm/{prefix}/list` | `service.get_paginated_data`, `service.format_response` | Listings respect pagination query parameters and cap `page_size` at 100; every item is formatted before response. |
+| `ModelManagementHandler` | Mutations (delete, exclude, metadata, preview, tags, rename, bulk delete, duplicate verification) | `ModelLifecycleService`, `MetadataSyncService`, `PreviewAssetService`, `TagUpdateService`, scanner cache/index | Cache state mirrors filesystem changes: deletes prune cache & hash index, preview replacements synchronise metadata and cache NSFW levels, metadata saves trigger cache resort when names change. |
+| `ModelQueryHandler` | Read-only queries (top tags, folders, duplicates, metadata, URLs) | Service query helpers & scanner cache | Outputs always wrapped in `{"success": True}` when no error; duplicate/filename grouping omits empty entries; invalid parameters (e.g. missing `model_root`) return HTTP 400. |
+| `ModelDownloadHandler` | `/api/lm/download-model`, `/download-model-get`, `/download-progress/{id}`, `/cancel-download-get` | `DownloadModelUseCase`, `DownloadCoordinator`, `WebSocketManager` | Payload validation errors become HTTP 400 without mutating download progress cache; early-access failures surface as HTTP 401; successful downloads cache progress snapshots that back both WebSocket broadcasts and polling endpoints. |
+| `ModelCivitaiHandler` | CivitAI metadata routes | `MetadataSyncService`, metadata provider factory, `BulkMetadataRefreshUseCase` | `fetch_all_civitai` streams progress via `WebSocketBroadcastCallback`; version lookups validate model type before returning; local availability fields derive from hash lookups without mutating cache state. |
+| `ModelMoveHandler` | `move_model`, `move_models_bulk` | `ModelMoveService` | Moves execute atomically per request; bulk operations aggregate success/failure per file set. |
+| `ModelAutoOrganizeHandler` | `/api/lm/{prefix}/auto-organize` (GET/POST), `/auto-organize-progress` | `AutoOrganizeUseCase`, `WebSocketProgressCallback`, `WebSocketManager` | Enforces single-flight execution using the shared lock; progress broadcasts remain available to polling clients until explicitly cleared; conflicts return HTTP 409 with a descriptive error. |
+
+## Use case boundaries
+
+Each use case exposes a narrow asynchronous API that hides the underlying
+services. Their error mapping is essential for predictable HTTP responses.
+
+| Use case | Entry point | Dependencies | Guarantees |
+| --- | --- | --- | --- |
+| `DownloadModelUseCase` | `execute(payload)` | `DownloadCoordinator.schedule_download` | Translates `ValueError` into `DownloadModelValidationError` for HTTP 400, recognises early-access errors (`"401"` in message) and surfaces them as `DownloadModelEarlyAccessError`, forwards success dictionaries untouched. |
+| `AutoOrganizeUseCase` | `execute(file_paths, progress_callback)` | `ModelFileService.auto_organize_models`, `WebSocketManager` lock | Guarded by `ws_manager` lock + status checks; raises `AutoOrganizeInProgressError` before invoking the file service when another run is already active. |
+| `BulkMetadataRefreshUseCase` | `execute_with_error_handling(progress_callback)` | `MetadataSyncService`, `SettingsManager`, `WebSocketBroadcastCallback` | Iterates through cached models, applies metadata sync, emits progress snapshots that handlers broadcast unchanged. |
+
+## Maintaining legacy contracts
+
+The refactor preserves the invariants called out in the previous architecture
+notes. The most critical ones are reiterated here to emphasise the
+collaboration points:
+
+1. **Cache mutations** – Delete, exclude, rename, and bulk delete operations are
+ channelled through `ModelManagementHandler`. The handler delegates to
+ `ModelLifecycleService` or `MetadataSyncService`, and the scanner cache is
+ mutated in-place before the handler returns. The accompanying tests assert
+ that `scanner._cache.raw_data` and `scanner._hash_index` stay in sync after
+ each mutation.
+2. **Preview updates** – `PreviewAssetService.replace_preview` writes the new
+ asset, `MetadataSyncService` persists the JSON metadata, and
+ `scanner.update_preview_in_cache` mirrors the change. The handler returns
+ the static URL produced by `config.get_preview_static_url`, keeping browser
+ clients in lockstep with disk state.
+3. **Download progress** – `DownloadCoordinator.schedule_download` generates the
+ download identifier, registers a WebSocket progress callback, and caches the
+ latest numeric progress via `WebSocketManager`. Both `download_model`
+ responses and `/download-progress/{id}` polling read from the same cache to
+ guarantee consistent progress reporting across transports.
+
+## Extending the stack
+
+To add a new shared route:
+
+1. Declare it in `COMMON_ROUTE_DEFINITIONS` using a unique handler name.
+2. Implement the corresponding coroutine on one of the handlers inside
+ `ModelHandlerSet` (or introduce a new handler class when the concern does not
+ fit existing ones).
+3. Inject additional dependencies in `BaseModelRoutes._create_handler_set` by
+ wiring services or use cases through the constructor parameters.
+
+Model-specific routes should continue to be registered inside the subclass
+implementation of `setup_specific_routes`, reusing the shared registrar where
+possible.
diff --git a/docs/architecture/recipe_routes.md b/docs/architecture/recipe_routes.md
new file mode 100644
index 00000000..0bdb7c90
--- /dev/null
+++ b/docs/architecture/recipe_routes.md
@@ -0,0 +1,89 @@
+# Recipe route architecture
+
+The recipe routing stack now mirrors the modular model route design. HTTP
+bindings, controller wiring, handler orchestration, and business rules live in
+separate layers so new behaviours can be added without re-threading the entire
+feature. The diagram below outlines the flow for a typical request:
+
+```mermaid
+graph TD
+ subgraph HTTP
+ A[RecipeRouteRegistrar] -->|binds| B[RecipeRoutes controller]
+ end
+ subgraph Application
+ B --> C[RecipeHandlerSet]
+ C --> D1[Handlers]
+ D1 --> E1[Use cases]
+ E1 --> F1[Services / scanners]
+ end
+ subgraph Side Effects
+ F1 --> G1[Cache & fingerprint index]
+ F1 --> G2[Metadata files]
+ F1 --> G3[Temporary shares]
+ end
+```
+
+## Layer responsibilities
+
+| Layer | Module(s) | Responsibility |
+| --- | --- | --- |
+| Registrar | `py/routes/recipe_route_registrar.py` | Declarative list of every recipe endpoint and helper methods that bind them to an `aiohttp` application. |
+| Controller | `py/routes/base_recipe_routes.py`, `py/routes/recipe_routes.py` | Lazily resolves scanners/clients from the service registry, wires shared templates/i18n, instantiates `RecipeHandlerSet`, and exposes a `{handler_name: coroutine}` mapping for the registrar. |
+| Handler set | `py/routes/handlers/recipe_handlers.py` | Thin HTTP adapters grouped by concern (page view, listings, queries, mutations, sharing). They normalise responses and translate service exceptions into HTTP status codes. |
+| Services & scanners | `py/services/recipes/*.py`, `py/services/recipe_scanner.py`, `py/services/service_registry.py` | Concrete business logic: metadata parsing, persistence, sharing, fingerprint/index maintenance, and cache refresh. |
+
+## Handler responsibilities & invariants
+
+`RecipeHandlerSet` flattens purpose-built handler objects into the callables the
+registrar binds. Each handler is responsible for a narrow concern and enforces a
+set of invariants before returning:
+
+| Handler | Key endpoints | Collaborators | Contracts |
+| --- | --- | --- | --- |
+| `RecipePageView` | `/loras/recipes` | `SettingsManager`, `server_i18n`, Jinja environment, recipe scanner getter | Template rendered with `is_initializing` flag when caches are still warming; i18n filter registered exactly once per environment instance. |
+| `RecipeListingHandler` | `/api/lm/recipes`, `/api/lm/recipe/{id}` | `recipe_scanner.get_paginated_data`, `recipe_scanner.get_recipe_by_id` | Listings respect pagination and search filters; every item receives a `file_url` fallback even when metadata is incomplete; missing recipes become HTTP 404. |
+| `RecipeQueryHandler` | Tag/base-model stats, syntax, LoRA lookups | Recipe scanner cache, `format_recipe_file_url` helper | Cache snapshots are reused without forcing refresh; duplicate lookups collapse groups by fingerprint; syntax lookups return helpful errors when LoRAs are absent. |
+| `RecipeManagementHandler` | Save, update, reconnect, bulk delete, widget ingest | `RecipePersistenceService`, `RecipeAnalysisService`, recipe scanner | Persistence results propagate HTTP status codes; fingerprint/index updates flow through the scanner before returning; validation errors surface as HTTP 400 without touching disk. |
+| `RecipeAnalysisHandler` | Uploaded/local/remote analysis | `RecipeAnalysisService`, `civitai_client`, recipe scanner | Unsupported content types map to HTTP 400; download errors (`RecipeDownloadError`) are not retried; every response includes a `loras` array for client compatibility. |
+| `RecipeSharingHandler` | Share + download | `RecipeSharingService`, recipe scanner | Share responses provide a stable download URL and filename; expired shares surface as HTTP 404; downloads stream via `web.FileResponse` with attachment headers. |
+
+## Use case boundaries
+
+The dedicated services encapsulate long-running work so handlers stay thin.
+
+| Use case | Entry point | Dependencies | Guarantees |
+| --- | --- | --- | --- |
+| `RecipeAnalysisService` | `analyze_uploaded_image`, `analyze_remote_image`, `analyze_local_image`, `analyze_widget_metadata` | `ExifUtils`, `RecipeParserFactory`, downloader factory, optional metadata collector/processor | Normalises missing/invalid payloads into `RecipeValidationError`; generates consistent fingerprint data to keep duplicate detection stable; temporary files are cleaned up after every analysis path. |
+| `RecipePersistenceService` | `save_recipe`, `delete_recipe`, `update_recipe`, `reconnect_lora`, `bulk_delete`, `save_recipe_from_widget` | `ExifUtils`, recipe scanner, card preview sizing constants | Writes images/JSON metadata atomically; updates scanner caches and hash indices before returning; recalculates fingerprints whenever LoRA assignments change. |
+| `RecipeSharingService` | `share_recipe`, `prepare_download` | `tempfile`, recipe scanner | Copies originals to TTL-managed temp files; metadata lookups re-use the scanner; expired shares trigger cleanup and `RecipeNotFoundError`. |
+
+## Maintaining critical invariants
+
+* **Cache updates** – Mutations (`save`, `delete`, `bulk_delete`, `update`) call
+ back into the recipe scanner to mutate the in-memory cache and fingerprint
+ index before returning a response. Tests assert that these methods are invoked
+ even when stubbing persistence.
+* **Fingerprint management** – `RecipePersistenceService` recomputes
+ fingerprints whenever LoRA metadata changes and duplicate lookups use those
+ fingerprints to group recipes. Handlers bubble the resulting IDs so clients
+ can merge duplicates without an extra fetch.
+* **Metadata synchronisation** – Saving or reconnecting a recipe updates the
+ JSON sidecar, refreshes embedded metadata via `ExifUtils`, and instructs the
+ scanner to resort its cache. Sharing relies on this metadata to generate
+ filenames and ensure downloads stay in sync with on-disk state.
+
+## Extending the stack
+
+1. Declare the new endpoint in `ROUTE_DEFINITIONS` with a unique handler name.
+2. Implement the coroutine on an existing handler or introduce a new handler
+ class inside `py/routes/handlers/recipe_handlers.py` when the concern does
+ not fit existing ones.
+3. Wire additional collaborators inside
+ `BaseRecipeRoutes._create_handler_set` (inject new services or factories) and
+ expose helper getters on the handler owner if the handler needs to share
+ utilities.
+
+Integration tests in `tests/routes/test_recipe_routes.py` exercise the listing,
+mutation, analysis-error, and sharing paths end-to-end, ensuring the controller
+and handler wiring remains valid as new capabilities are added.
+
diff --git a/docs/frontend-dom-fixtures.md b/docs/frontend-dom-fixtures.md
new file mode 100644
index 00000000..ee7b785c
--- /dev/null
+++ b/docs/frontend-dom-fixtures.md
@@ -0,0 +1,51 @@
+# Frontend DOM Fixture Strategy
+
+This guide outlines how to reproduce the markup emitted by the Django templates while running Vitest in jsdom. The aim is to make it straightforward to write integration-style unit tests for managers and UI helpers without having to duplicate template fragments inline.
+
+## Loading Template Markup
+
+Vitest executes inside Node, so we can read the same HTML templates that ship with the extension:
+
+1. Use the helper utilities from `tests/frontend/utils/domFixtures.js` to read files under the `templates/` directory.
+2. Mount the returned markup into `document.body` (or any custom container) before importing the module under test so its query selectors resolve correctly.
+
+```js
+import { renderTemplate } from '../utils/domFixtures.js'; // adjust the relative path to your spec
+
+beforeEach(() => {
+ renderTemplate('loras.html', {
+ dataset: { page: 'loras' }
+ });
+});
+```
+
+The helper ensures the dataset is applied to the container, which mirrors how Django sets `data-page` in production.
+
+## Working with Partial Components
+
+Many features are implemented as template partials located under `templates/components/`. When a test only needs a fragment (for example, the progress panel or context menu markup), load the component file directly:
+
+```js
+const container = renderTemplate('components/progress_panel.html');
+
+const progressPanel = container.querySelector('#progress-panel');
+```
+
+This pattern avoids hand-written fixture strings and keeps the tests aligned with the actual markup.
+
+## Resetting Between Tests
+
+The shared Vitest setup clears `document.body` and storage APIs before each test. If a suite adds additional DOM nodes outside of the body or needs to reset custom attributes mid-test, use `resetDom()` exported from `domFixtures.js`.
+
+```js
+import { resetDom } from '../utils/domFixtures.js';
+
+afterEach(() => {
+ resetDom();
+});
+```
+
+## Future Enhancements
+
+- Provide typed helpers for injecting mock script tags (e.g., replicating ComfyUI globals).
+- Compose higher-level fixtures that mimic specific pages (loras, checkpoints, recipes) once those managers receive dedicated suites.
diff --git a/docs/frontend-filtering-test-matrix.md b/docs/frontend-filtering-test-matrix.md
new file mode 100644
index 00000000..cd8f48d2
--- /dev/null
+++ b/docs/frontend-filtering-test-matrix.md
@@ -0,0 +1,44 @@
+# LoRA & Checkpoints Filtering/Sorting Test Matrix
+
+This matrix captures the scenarios that Phase 3 frontend tests should cover for the LoRA and Checkpoint managers. It focuses on how search, filter, sort, and duplicate badge toggles interact so future specs can share fixtures and expectations.
+
+## Scope
+
+- **Components**: `PageControls`, `FilterManager`, `SearchManager`, and `ModelDuplicatesManager` wiring invoked through `CheckpointsPageManager` and `LorasPageManager`.
+- **Templates**: `templates/loras.html` and `templates/checkpoints.html` along with shared filter panel and toolbar partials.
+- **APIs**: Requests issued through `baseModelApi.fetchModels` (via `resetAndReload`/`refreshModels`) and duplicates badge updates.
+
+## Shared Setup Considerations
+
+1. Render full page templates using `renderLorasPage` / `renderCheckpointsPage` helpers before importing modules so DOM queries resolve.
+2. Stub storage helpers (`getStorageItem`, `setStorageItem`, `getSessionItem`, `setSessionItem`) to observe persistence behavior without mutating real storage.
+3. Mock `sidebarManager` to capture refresh calls triggered after sort/filter actions.
+4. Provide fake API implementations exposing `resetAndReload`, `refreshModels`, `fetchFromCivitai`, `toggleBulkMode`, and `clearCustomFilter` so control events remain asynchronous but deterministic.
+5. Supply a minimal `ModelDuplicatesManager` mock exposing `toggleDuplicateMode`, `checkDuplicatesCount`, and `updateDuplicatesBadgeAfterRefresh` to validate duplicate badge wiring.
+
+## Scenario Matrix
+
+| ID | Feature | Scenario | LoRAs Expectations | Checkpoints Expectations | Notes |
+| --- | --- | --- | --- | --- | --- |
+| F-01 | Search filter | Typing a query updates `pageState.filters.search`, persists to session, and triggers `resetAndReload` on submit | Validate `SearchManager` writes query and reloads via API stub; confirm LoRA cards pass query downstream | Same as LoRAs | Cover `enter` press and clicking search icon |
+| F-02 | Tag filter | Selecting a tag chip adds it to filters, applies active styling, and reloads results | Tag stored under `filters.tags`; `FilterManager.applyFilters` persists and triggers `resetAndReload(true)` | Same; ensure base model tag set is scoped to checkpoints dataset | Include removal path |
+| F-03 | Base model filter | Toggling base model checkboxes updates `filters.baseModel`, persists, and reloads | Ensure only LoRA-supported models show; toggle multi-select | Ensure SDXL/Flux base models appear as expected | Capture UI state restored from storage on next init |
+| F-04 | Favorites-only | Clicking favorites toggle updates session flag and calls `resetAndReload(true)` | Button gains `.active` class and API called | Same | Verify duplicates badge refresh when active |
+| F-05 | Sort selection | Changing sort select saves preference (legacy + new format) and reloads | Confirm `PageControls.saveSortPreference` invoked with option and API called | Same with checkpoints-specific defaults | Cover `convertLegacySortFormat` branch |
+| F-06 | Filter persistence | Re-initializing manager loads stored filters/sort and updates DOM | Filters pre-populate chips/checkboxes; favorites state restored | Same | Requires simulating repeated construction |
+| F-07 | Combined filters | Applying search + tag + base model yields aggregated query params for fetch | Assert API receives merged filter payload | Same | Validate toast messaging for active filters |
+| F-08 | Clearing filters | Using "Clear filters" resets state, storage, and reloads list | `FilterManager.clearFilters` empties `filters`, removes active class, shows toast | Same | Ensure favorites-only toggle unaffected |
+| F-09 | Duplicate badge toggle | Pressing "Find duplicates" toggles duplicate mode and updates badge counts post-refresh | `ModelDuplicatesManager.toggleDuplicateMode` invoked and badge refresh called after API rebuild | Same plus checkpoint-specific duplicate badge dataset | Connects to future duplicate-specific specs |
+| F-10 | Bulk actions menu | Opening bulk dropdown keeps filters intact and closes on outside click | Validate dropdown class toggling and no unintended reload | Same | Guard against regression when dropdown interacts with filters |
+
+## Automation Coverage Status
+
+- ✅ F-01 Search filter, F-02 Tag filter, F-03 Base model filter, F-04 Favorites-only toggle, F-05 Sort selection, and F-09 Duplicate badge toggle are covered by `tests/frontend/components/pageControls.filtering.test.js` for both LoRA and checkpoint pages.
+- ⏳ F-06 Filter persistence, F-07 Combined filters, F-08 Clearing filters, and F-10 Bulk actions remain to be automated alongside upcoming bulk mode refinements.
+
+## Coverage Gaps & Follow-Ups
+
+- Write Vitest suites that exercise the matrix for both managers, sharing fixtures through page helpers to avoid duplication.
+- Capture API parameter assertions by inspecting `baseModelApi.fetchModels` mocks rather than relying solely on state mutations.
+- Add regression cases for legacy storage migrations (old filter keys) once fixtures exist for older payloads.
+- Extend duplicate badge coverage with scenarios where `checkDuplicatesCount` signals zero duplicates versus pending calculations.
diff --git a/docs/frontend-testing-roadmap.md b/docs/frontend-testing-roadmap.md
new file mode 100644
index 00000000..6ef5b514
--- /dev/null
+++ b/docs/frontend-testing-roadmap.md
@@ -0,0 +1,33 @@
+# Frontend Automation Testing Roadmap
+
+This roadmap tracks the planned rollout of automated testing for the ComfyUI LoRA Manager frontend. Each phase builds on the infrastructure introduced in this change set and records progress so future contributors can quickly identify the next tasks.
+
+## Phase Overview
+
+| Phase | Goal | Primary Focus | Status | Notes |
+| --- | --- | --- | --- | --- |
+| Phase 0 | Establish baseline tooling | Add Node test runner, jsdom environment, and seed smoke tests | ✅ Complete | Vitest + jsdom configured, example state tests committed |
+| Phase 1 | Cover state management logic | Unit test selectors, derived data helpers, and storage utilities under `static/js/state` and `static/js/utils` | ✅ Complete | Storage helpers and state selectors now exercised via deterministic suites |
+| Phase 2 | Test AppCore orchestration | Simulate page bootstrapping, infinite scroll hooks, and manager registration using JSDOM DOM fixtures | ✅ Complete | AppCore initialization + page feature suites now validate manager wiring, infinite scroll hooks, and onboarding gating |
+| Phase 3 | Validate page-specific managers | Add focused suites for `loras`, `checkpoints`, `embeddings`, and `recipes` managers covering filtering, sorting, and bulk actions | ✅ Complete | LoRA/checkpoint suites expanded; embeddings + recipes managers now covered with initialization, filtering, and duplicate workflows |
+| Phase 4 | Interaction-level regression tests | Exercise template fragments, modals, and menus to ensure UI wiring remains intact | ✅ Complete | Vitest DOM suites cover NSFW selector, recipe modal editing, and global context menus |
+| Phase 5 | Continuous integration & coverage | Integrate frontend tests into CI workflow and track coverage metrics | ✅ Complete | CI workflow runs Vitest and aggregates V8 coverage into `coverage/frontend` via a dedicated script |
+
+## Next Steps Checklist
+
+- [x] Expand unit tests for `storageHelpers` covering migrations and namespace behavior.
+- [x] Document DOM fixture strategy for reproducing template structures in tests.
+- [x] Prototype AppCore initialization test that verifies manager bootstrapping with stubbed dependencies.
+- [x] Add AppCore page feature suite exercising context menu creation and infinite scroll registration via DOM fixtures.
+- [x] Extend AppCore orchestration tests to cover manager wiring, bulk menu setup, and onboarding gating scenarios.
+- [x] Add interaction regression suites for context menus and recipe modals to complete Phase 4.
+- [x] Evaluate integrating coverage reporting once test surface grows (> 20 specs).
+- [x] Create shared fixtures for the loras and checkpoints pages once dedicated manager suites are added.
+- [x] Draft focused test matrix for loras/checkpoints manager filtering and sorting paths ahead of Phase 3.
+- [x] Implement LoRAs manager filtering/sorting specs for scenarios F-01–F-05 & F-09; queue remaining edge cases after duplicate/bulk flows stabilize.
+- [x] Implement checkpoints manager filtering/sorting specs for scenarios F-01–F-05 & F-09; cover remaining paths alongside bulk action work.
+- [x] Implement checkpoints page manager smoke tests covering initialization and duplicate badge wiring.
+- [x] Outline focused checkpoints scenarios (filtering, sorting, duplicate badge toggles) to feed into the shared test matrix.
+- [ ] Add duplicate badge regression coverage for zero/pending states after API refreshes.
+
+Maintaining this roadmap alongside code changes will make it easier to append new automated test tasks and update their progress.
diff --git a/locales/de.json b/locales/de.json
index 41443a48..984f9130 100644
--- a/locales/de.json
+++ b/locales/de.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "Keine Remote-Beispielbilder für dieses Modell auf Civitai verfügbar"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "Beispielbilder herunterladen",
+ "missingPath": "Bitte legen Sie einen Speicherort fest, bevor Sie Beispielbilder herunterladen.",
+ "unavailable": "Beispielbild-Downloads sind noch nicht verfügbar. Versuchen Sie es erneut, nachdem die Seite vollständig geladen ist."
+ },
+ "cleanupExampleImages": {
+ "label": "Beispielbild-Ordner bereinigen",
+ "success": "{count} Ordner wurden in den Papierkorb verschoben",
+ "none": "Keine Beispielbild-Ordner mussten bereinigt werden",
+ "partial": "Bereinigung abgeschlossen, {failures} Ordner übersprungen",
+ "error": "Fehler beim Bereinigen der Beispielbild-Ordner: {message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "Download-Pfad-Vorlagen",
"exampleImages": "Beispielbilder",
"misc": "Verschiedenes",
- "metadataArchive": "Metadaten-Archiv-Datenbank"
+ "metadataArchive": "Metadaten-Archiv-Datenbank",
+ "proxySettings": "Proxy-Einstellungen"
},
"contentFiltering": {
"blurNsfwContent": "NSFW-Inhalte unscharf stellen",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "Basis-Modell + Erster Tag",
"baseModelAuthor": "Basis-Modell + Autor",
"authorFirstTag": "Autor + Erster Tag",
+ "baseModelAuthorFirstTag": "Basis-Modell + Autor + Erster Tag",
"customTemplate": "Benutzerdefinierte Vorlage"
},
"customTemplatePlaceholder": "Benutzerdefinierte Vorlage eingeben (z.B. {base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "Verbindung zum Download-Server wird hergestellt...",
"completed": "Abgeschlossen",
"downloadComplete": "Download erfolgreich abgeschlossen"
+ },
+ "proxySettings": {
+ "enableProxy": "App-Proxy aktivieren",
+ "enableProxyHelp": "Aktivieren Sie benutzerdefinierte Proxy-Einstellungen für diese Anwendung. Überschreibt die System-Proxy-Einstellungen.",
+ "proxyType": "Proxy-Typ",
+ "proxyTypeHelp": "Wählen Sie den Typ des Proxy-Servers (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "Proxy-Host",
+ "proxyHostPlaceholder": "proxy.beispiel.de",
+ "proxyHostHelp": "Der Hostname oder die IP-Adresse Ihres Proxy-Servers",
+ "proxyPort": "Proxy-Port",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "Die Portnummer Ihres Proxy-Servers",
+ "proxyUsername": "Benutzername (optional)",
+ "proxyUsernamePlaceholder": "benutzername",
+ "proxyUsernameHelp": "Benutzername für die Proxy-Authentifizierung (falls erforderlich)",
+ "proxyPassword": "Passwort (optional)",
+ "proxyPasswordPlaceholder": "passwort",
+ "proxyPasswordHelp": "Passwort für die Proxy-Authentifizierung (falls erforderlich)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "Stärke Min",
"strengthMax": "Stärke Max",
"strength": "Stärke",
+ "clipStrength": "Clip-Stärke",
"clipSkip": "Clip Skip",
"valuePlaceholder": "Wert",
"add": "Hinzufügen"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "Beispielbilder-Pfad erfolgreich aktualisiert",
+ "pathUpdateFailed": "Fehler beim Aktualisieren des Beispielbilder-Pfads: {message}",
"downloadInProgress": "Download bereits in Bearbeitung",
"enterLocationFirst": "Bitte geben Sie zuerst einen Download-Speicherort ein",
"downloadStarted": "Beispielbilder-Download gestartet",
diff --git a/locales/en.json b/locales/en.json
index c1243784..a68a60fd 100644
--- a/locales/en.json
+++ b/locales/en.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "No remote example images available for this model on Civitai"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "Download example images",
+ "missingPath": "Set a download location before downloading example images.",
+ "unavailable": "Example image downloads aren't available yet. Try again after the page finishes loading."
+ },
+ "cleanupExampleImages": {
+ "label": "Clean up example image folders",
+ "success": "Moved {count} folder(s) to the deleted folder",
+ "none": "No example image folders needed cleanup",
+ "partial": "Cleanup completed with {failures} folder(s) skipped",
+ "error": "Failed to clean example image folders: {message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "Download Path Templates",
"exampleImages": "Example Images",
"misc": "Misc.",
- "metadataArchive": "Metadata Archive Database"
+ "metadataArchive": "Metadata Archive Database",
+ "proxySettings": "Proxy Settings"
},
"contentFiltering": {
"blurNsfwContent": "Blur NSFW Content",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "Base Model + First Tag",
"baseModelAuthor": "Base Model + Author",
"authorFirstTag": "Author + First Tag",
+ "baseModelAuthorFirstTag": "Base Model + Author + First Tag",
"customTemplate": "Custom Template"
},
"customTemplatePlaceholder": "Enter custom template (e.g., {base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "Connecting to download server...",
"completed": "Completed",
"downloadComplete": "Download completed successfully"
+ },
+ "proxySettings": {
+ "enableProxy": "Enable App-level Proxy",
+ "enableProxyHelp": "Enable custom proxy settings for this application, overriding system proxy settings",
+ "proxyType": "Proxy Type",
+ "proxyTypeHelp": "Select the type of proxy server (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "Proxy Host",
+ "proxyHostPlaceholder": "proxy.example.com",
+ "proxyHostHelp": "The hostname or IP address of your proxy server",
+ "proxyPort": "Proxy Port",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "The port number of your proxy server",
+ "proxyUsername": "Username (Optional)",
+ "proxyUsernamePlaceholder": "username",
+ "proxyUsernameHelp": "Username for proxy authentication (if required)",
+ "proxyPassword": "Password (Optional)",
+ "proxyPasswordPlaceholder": "password",
+ "proxyPasswordHelp": "Password for proxy authentication (if required)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "Strength Min",
"strengthMax": "Strength Max",
"strength": "Strength",
+ "clipStrength": "Clip Strength",
"clipSkip": "Clip Skip",
"valuePlaceholder": "Value",
"add": "Add"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "Example images path updated successfully",
+ "pathUpdateFailed": "Failed to update example images path: {message}",
"downloadInProgress": "Download already in progress",
"enterLocationFirst": "Please enter a download location first",
"downloadStarted": "Example images download started",
diff --git a/locales/es.json b/locales/es.json
index fa378d63..a226c6e3 100644
--- a/locales/es.json
+++ b/locales/es.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "No hay imágenes de ejemplo remotas disponibles para este modelo en Civitai"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "Descargar imágenes de ejemplo",
+ "missingPath": "Establece una ubicación de descarga antes de descargar imágenes de ejemplo.",
+ "unavailable": "Las descargas de imágenes de ejemplo aún no están disponibles. Intenta de nuevo después de que la página termine de cargar."
+ },
+ "cleanupExampleImages": {
+ "label": "Limpiar carpetas de imágenes de ejemplo",
+ "success": "Se movieron {count} carpeta(s) a la carpeta de eliminados",
+ "none": "No hay carpetas de imágenes de ejemplo que necesiten limpieza",
+ "partial": "Limpieza completada con {failures} carpeta(s) omitidas",
+ "error": "No se pudieron limpiar las carpetas de imágenes de ejemplo: {message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "Plantillas de rutas de descarga",
"exampleImages": "Imágenes de ejemplo",
"misc": "Varios",
- "metadataArchive": "Base de datos de archivo de metadatos"
+ "metadataArchive": "Base de datos de archivo de metadatos",
+ "proxySettings": "Configuración de proxy"
},
"contentFiltering": {
"blurNsfwContent": "Difuminar contenido NSFW",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "Modelo base + primera etiqueta",
"baseModelAuthor": "Modelo base + autor",
"authorFirstTag": "Autor + primera etiqueta",
+ "baseModelAuthorFirstTag": "Modelo base + autor + primera etiqueta",
"customTemplate": "Plantilla personalizada"
},
"customTemplatePlaceholder": "Introduce plantilla personalizada (ej., {base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "Conectando al servidor de descarga...",
"completed": "Completado",
"downloadComplete": "Descarga completada exitosamente"
+ },
+ "proxySettings": {
+ "enableProxy": "Habilitar proxy a nivel de aplicación",
+ "enableProxyHelp": "Habilita la configuración de proxy personalizada para esta aplicación, sobrescribiendo la configuración de proxy del sistema",
+ "proxyType": "Tipo de proxy",
+ "proxyTypeHelp": "Selecciona el tipo de servidor proxy (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "Host del proxy",
+ "proxyHostPlaceholder": "proxy.ejemplo.com",
+ "proxyHostHelp": "El nombre de host o dirección IP de tu servidor proxy",
+ "proxyPort": "Puerto del proxy",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "El número de puerto de tu servidor proxy",
+ "proxyUsername": "Usuario (opcional)",
+ "proxyUsernamePlaceholder": "usuario",
+ "proxyUsernameHelp": "Usuario para autenticación de proxy (si es necesario)",
+ "proxyPassword": "Contraseña (opcional)",
+ "proxyPasswordPlaceholder": "contraseña",
+ "proxyPasswordHelp": "Contraseña para autenticación de proxy (si es necesario)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "Fuerza mínima",
"strengthMax": "Fuerza máxima",
"strength": "Fuerza",
+ "clipStrength": "Fuerza de Clip",
"clipSkip": "Clip Skip",
"valuePlaceholder": "Valor",
"add": "Añadir"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "Ruta de imágenes de ejemplo actualizada exitosamente",
+ "pathUpdateFailed": "Error al actualizar la ruta de imágenes de ejemplo: {message}",
"downloadInProgress": "Descarga ya en progreso",
"enterLocationFirst": "Por favor introduce primero una ubicación de descarga",
"downloadStarted": "Descarga de imágenes de ejemplo iniciada",
diff --git a/locales/fr.json b/locales/fr.json
index 5131296c..6fcf042a 100644
--- a/locales/fr.json
+++ b/locales/fr.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "Aucune image d'exemple distante disponible pour ce modèle sur Civitai"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "Télécharger les images d'exemple",
+ "missingPath": "Définissez un emplacement de téléchargement avant de télécharger les images d'exemple.",
+ "unavailable": "Le téléchargement des images d'exemple n'est pas encore disponible. Réessayez après le chargement complet de la page."
+ },
+ "cleanupExampleImages": {
+ "label": "Nettoyer les dossiers d'images d'exemple",
+ "success": "{count} dossier(s) déplacé(s) vers le dossier supprimé",
+ "none": "Aucun dossier d'images d'exemple à nettoyer",
+ "partial": "Nettoyage terminé avec {failures} dossier(s) ignoré(s)",
+ "error": "Échec du nettoyage des dossiers d'images d'exemple : {message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "Modèles de chemin de téléchargement",
"exampleImages": "Images d'exemple",
"misc": "Divers",
- "metadataArchive": "Base de données d'archive des métadonnées"
+ "metadataArchive": "Base de données d'archive des métadonnées",
+ "proxySettings": "Paramètres du proxy"
},
"contentFiltering": {
"blurNsfwContent": "Flouter le contenu NSFW",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "Modèle de base + Premier tag",
"baseModelAuthor": "Modèle de base + Auteur",
"authorFirstTag": "Auteur + Premier tag",
+ "baseModelAuthorFirstTag": "Modèle de base + Auteur + Premier tag",
"customTemplate": "Modèle personnalisé"
},
"customTemplatePlaceholder": "Entrez un modèle personnalisé (ex: {base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "Connexion au serveur de téléchargement...",
"completed": "Terminé",
"downloadComplete": "Téléchargement terminé avec succès"
+ },
+ "proxySettings": {
+ "enableProxy": "Activer le proxy au niveau de l'application",
+ "enableProxyHelp": "Activer les paramètres de proxy personnalisés pour cette application, remplaçant les paramètres de proxy système",
+ "proxyType": "Type de proxy",
+ "proxyTypeHelp": "Sélectionnez le type de serveur proxy (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "Hôte du proxy",
+ "proxyHostPlaceholder": "proxy.exemple.com",
+ "proxyHostHelp": "Le nom d'hôte ou l'adresse IP de votre serveur proxy",
+ "proxyPort": "Port du proxy",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "Le numéro de port de votre serveur proxy",
+ "proxyUsername": "Nom d'utilisateur (optionnel)",
+ "proxyUsernamePlaceholder": "nom_utilisateur",
+ "proxyUsernameHelp": "Nom d'utilisateur pour l'authentification proxy (si nécessaire)",
+ "proxyPassword": "Mot de passe (optionnel)",
+ "proxyPasswordPlaceholder": "mot_de_passe",
+ "proxyPasswordHelp": "Mot de passe pour l'authentification proxy (si nécessaire)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "Force Min",
"strengthMax": "Force Max",
"strength": "Force",
+ "clipStrength": "Force Clip",
"clipSkip": "Clip Skip",
"valuePlaceholder": "Valeur",
"add": "Ajouter"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "Chemin des images d'exemple mis à jour avec succès",
+ "pathUpdateFailed": "Échec de la mise à jour du chemin des images d'exemple : {message}",
"downloadInProgress": "Téléchargement déjà en cours",
"enterLocationFirst": "Veuillez d'abord entrer un emplacement de téléchargement",
"downloadStarted": "Téléchargement des images d'exemple démarré",
diff --git a/locales/ja.json b/locales/ja.json
index 17529561..cc367cf2 100644
--- a/locales/ja.json
+++ b/locales/ja.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "このモデルのCivitaiでのリモート例画像は利用できません"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "例画像をダウンロード",
+ "missingPath": "例画像をダウンロードする前にダウンロード場所を設定してください。",
+ "unavailable": "例画像のダウンロードはまだ利用できません。ページの読み込みが完了してから再度お試しください。"
+ },
+ "cleanupExampleImages": {
+ "label": "例画像フォルダをクリーンアップ",
+ "success": "{count} 個のフォルダを削除フォルダに移動しました",
+ "none": "クリーンアップが必要な例画像フォルダはありません",
+ "partial": "クリーンアップが完了しましたが、{failures} 個のフォルダはスキップされました",
+ "error": "例画像フォルダのクリーンアップに失敗しました:{message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "ダウンロードパステンプレート",
"exampleImages": "例画像",
"misc": "その他",
- "metadataArchive": "メタデータアーカイブデータベース"
+ "metadataArchive": "メタデータアーカイブデータベース",
+ "proxySettings": "プロキシ設定"
},
"contentFiltering": {
"blurNsfwContent": "NSFWコンテンツをぼかす",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "ベースモデル + 最初のタグ",
"baseModelAuthor": "ベースモデル + 作成者",
"authorFirstTag": "作成者 + 最初のタグ",
+ "baseModelAuthorFirstTag": "ベースモデル + 作成者 + 最初のタグ",
"customTemplate": "カスタムテンプレート"
},
"customTemplatePlaceholder": "カスタムテンプレートを入力(例:{base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "ダウンロードサーバーに接続中...",
"completed": "完了",
"downloadComplete": "ダウンロードが正常に完了しました"
+ },
+ "proxySettings": {
+ "enableProxy": "アプリレベルのプロキシを有効化",
+ "enableProxyHelp": "このアプリケーション専用のカスタムプロキシ設定を有効にします(システムのプロキシ設定を上書きします)",
+ "proxyType": "プロキシタイプ",
+ "proxyTypeHelp": "プロキシサーバーの種類を選択(HTTP、HTTPS、SOCKS4、SOCKS5)",
+ "proxyHost": "プロキシホスト",
+ "proxyHostPlaceholder": "proxy.example.com",
+ "proxyHostHelp": "プロキシサーバーのホスト名またはIPアドレス",
+ "proxyPort": "プロキシポート",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "プロキシサーバーのポート番号",
+ "proxyUsername": "ユーザー名(任意)",
+ "proxyUsernamePlaceholder": "ユーザー名",
+ "proxyUsernameHelp": "プロキシ認証用のユーザー名(必要な場合)",
+ "proxyPassword": "パスワード(任意)",
+ "proxyPasswordPlaceholder": "パスワード",
+ "proxyPasswordHelp": "プロキシ認証用のパスワード(必要な場合)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "強度最小",
"strengthMax": "強度最大",
"strength": "強度",
+ "clipStrength": "クリップ強度",
"clipSkip": "Clip Skip",
"valuePlaceholder": "値",
"add": "追加"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "例画像パスが正常に更新されました",
+ "pathUpdateFailed": "例画像パスの更新に失敗しました:{message}",
"downloadInProgress": "ダウンロードは既に進行中です",
"enterLocationFirst": "最初にダウンロード場所を入力してください",
"downloadStarted": "例画像のダウンロードが開始されました",
diff --git a/locales/ko.json b/locales/ko.json
index 18b58951..f238f433 100644
--- a/locales/ko.json
+++ b/locales/ko.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "Civitai에서 이 모델의 원격 예시 이미지를 사용할 수 없습니다"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "예시 이미지 다운로드",
+ "missingPath": "예시 이미지를 다운로드하기 전에 다운로드 위치를 설정하세요.",
+ "unavailable": "예시 이미지 다운로드는 아직 사용할 수 없습니다. 페이지 로딩이 완료된 후 다시 시도하세요."
+ },
+ "cleanupExampleImages": {
+ "label": "예시 이미지 폴더 정리",
+ "success": "{count}개의 폴더가 삭제 폴더로 이동되었습니다",
+ "none": "정리가 필요한 예시 이미지 폴더가 없습니다",
+ "partial": "정리가 완료되었으나 {failures}개의 폴더가 건너뛰어졌습니다",
+ "error": "예시 이미지 폴더 정리에 실패했습니다: {message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "다운로드 경로 템플릿",
"exampleImages": "예시 이미지",
"misc": "기타",
- "metadataArchive": "메타데이터 아카이브 데이터베이스"
+ "metadataArchive": "메타데이터 아카이브 데이터베이스",
+ "proxySettings": "프록시 설정"
},
"contentFiltering": {
"blurNsfwContent": "NSFW 콘텐츠 블러 처리",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "베이스 모델 + 첫 번째 태그",
"baseModelAuthor": "베이스 모델 + 제작자",
"authorFirstTag": "제작자 + 첫 번째 태그",
+ "baseModelAuthorFirstTag": "베이스 모델 + 제작자 + 첫 번째 태그",
"customTemplate": "사용자 정의 템플릿"
},
"customTemplatePlaceholder": "사용자 정의 템플릿 입력 (예: {base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "다운로드 서버에 연결 중...",
"completed": "완료됨",
"downloadComplete": "다운로드가 성공적으로 완료되었습니다"
+ },
+ "proxySettings": {
+ "enableProxy": "앱 수준 프록시 활성화",
+ "enableProxyHelp": "이 애플리케이션에 대한 사용자 지정 프록시 설정을 활성화하여 시스템 프록시 설정을 무시합니다",
+ "proxyType": "프록시 유형",
+ "proxyTypeHelp": "프록시 서버 유형을 선택하세요 (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "프록시 호스트",
+ "proxyHostPlaceholder": "proxy.example.com",
+ "proxyHostHelp": "프록시 서버의 호스트명 또는 IP 주소",
+ "proxyPort": "프록시 포트",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "프록시 서버의 포트 번호",
+ "proxyUsername": "사용자 이름 (선택사항)",
+ "proxyUsernamePlaceholder": "username",
+ "proxyUsernameHelp": "프록시 인증에 필요한 사용자 이름 (필요한 경우)",
+ "proxyPassword": "비밀번호 (선택사항)",
+ "proxyPasswordPlaceholder": "password",
+ "proxyPasswordHelp": "프록시 인증에 필요한 비밀번호 (필요한 경우)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "최소 강도",
"strengthMax": "최대 강도",
"strength": "강도",
+ "clipStrength": "클립 강도",
"clipSkip": "클립 스킵",
"valuePlaceholder": "값",
"add": "추가"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "예시 이미지 경로가 성공적으로 업데이트되었습니다",
+ "pathUpdateFailed": "예시 이미지 경로 업데이트 실패: {message}",
"downloadInProgress": "이미 다운로드가 진행 중입니다",
"enterLocationFirst": "먼저 다운로드 위치를 입력해주세요",
"downloadStarted": "예시 이미지 다운로드가 시작되었습니다",
diff --git a/locales/ru.json b/locales/ru.json
index 68c357ad..71e33bc5 100644
--- a/locales/ru.json
+++ b/locales/ru.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "Нет удаленных примеров изображений для этой модели на Civitai"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "Загрузить примеры изображений",
+ "missingPath": "Укажите место загрузки перед загрузкой примеров изображений.",
+ "unavailable": "Загрузка примеров изображений пока недоступна. Попробуйте снова после полной загрузки страницы."
+ },
+ "cleanupExampleImages": {
+ "label": "Очистить папки с примерами изображений",
+ "success": "Перемещено {count} папок в папку удалённых",
+ "none": "Нет папок с примерами изображений, требующих очистки",
+ "partial": "Очистка завершена, пропущено {failures} папок",
+ "error": "Не удалось очистить папки с примерами изображений: {message}"
+ }
+ },
"header": {
"appTitle": "LoRA Manager",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "Шаблоны путей загрузки",
"exampleImages": "Примеры изображений",
"misc": "Разное",
- "metadataArchive": "Архив метаданных"
+ "metadataArchive": "Архив метаданных",
+ "proxySettings": "Настройки прокси"
},
"contentFiltering": {
"blurNsfwContent": "Размывать NSFW контент",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "Базовая модель + Первый тег",
"baseModelAuthor": "Базовая модель + Автор",
"authorFirstTag": "Автор + Первый тег",
+ "baseModelAuthorFirstTag": "Базовая модель + Автор + Первый тег",
"customTemplate": "Пользовательский шаблон"
},
"customTemplatePlaceholder": "Введите пользовательский шаблон (например, {base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "Подключение к серверу загрузки...",
"completed": "Завершено",
"downloadComplete": "Загрузка успешно завершена"
+ },
+ "proxySettings": {
+ "enableProxy": "Включить прокси на уровне приложения",
+ "enableProxyHelp": "Включить пользовательские настройки прокси для этого приложения, переопределяя системные настройки прокси",
+ "proxyType": "Тип прокси",
+ "proxyTypeHelp": "Выберите тип прокси-сервера (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "Хост прокси",
+ "proxyHostPlaceholder": "proxy.example.com",
+ "proxyHostHelp": "Имя хоста или IP-адрес вашего прокси-сервера",
+ "proxyPort": "Порт прокси",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "Номер порта вашего прокси-сервера",
+ "proxyUsername": "Имя пользователя (необязательно)",
+ "proxyUsernamePlaceholder": "имя пользователя",
+ "proxyUsernameHelp": "Имя пользователя для аутентификации на прокси (если требуется)",
+ "proxyPassword": "Пароль (необязательно)",
+ "proxyPasswordPlaceholder": "пароль",
+ "proxyPasswordHelp": "Пароль для аутентификации на прокси (если требуется)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "Мин. сила",
"strengthMax": "Макс. сила",
"strength": "Сила",
+ "clipStrength": "Сила клипа",
"clipSkip": "Clip Skip",
"valuePlaceholder": "Значение",
"add": "Добавить"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "Путь к примерам изображений успешно обновлен",
+ "pathUpdateFailed": "Не удалось обновить путь к примерам изображений: {message}",
"downloadInProgress": "Загрузка уже в процессе",
"enterLocationFirst": "Пожалуйста, сначала введите место загрузки",
"downloadStarted": "Загрузка примеров изображений начата",
diff --git a/locales/zh-CN.json b/locales/zh-CN.json
index 8c1dc44e..f2a1d097 100644
--- a/locales/zh-CN.json
+++ b/locales/zh-CN.json
@@ -21,8 +21,8 @@
"disabled": "已禁用"
},
"language": {
- "select": "语言",
- "select_help": "选择你喜欢的界面语言",
+ "select": "Language",
+ "select_help": "Choose your preferred language for the interface",
"english": "English",
"chinese_simplified": "中文(简体)",
"chinese_traditional": "中文(繁体)",
@@ -32,7 +32,13 @@
"korean": "韩语",
"french": "法语",
"spanish": "西班牙语",
- "Hebrew": "עברית"
+ "Hebrew": "עברית"
+ "russian": "Русский",
+ "german": "Deutsch",
+ "japanese": "日本語",
+ "korean": "한국어",
+ "french": "Français",
+ "spanish": "Español"
},
"fileSize": {
"zero": "0 字节",
@@ -123,6 +129,20 @@
"noRemoteImagesAvailable": "此模型在 Civitai 上没有远程示例图片"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "下载示例图片",
+ "missingPath": "请先设置下载位置后再下载示例图片。",
+ "unavailable": "示例图片下载当前不可用。请在页面加载完成后重试。"
+ },
+ "cleanupExampleImages": {
+ "label": "清理示例图片文件夹",
+ "success": "已将 {count} 个文件夹移动到已删除文件夹",
+ "none": "没有需要清理的示例图片文件夹",
+ "partial": "清理完成,有 {failures} 个文件夹跳过",
+ "error": "清理示例图片文件夹失败:{message}"
+ }
+ },
"header": {
"appTitle": "LoRA 管理器",
"navigation": {
@@ -182,7 +202,8 @@
"downloadPathTemplates": "下载路径模板",
"exampleImages": "示例图片",
"misc": "其他",
- "metadataArchive": "元数据归档数据库"
+ "metadataArchive": "元数据归档数据库",
+ "proxySettings": "代理设置"
},
"contentFiltering": {
"blurNsfwContent": "模糊 NSFW 内容",
@@ -240,6 +261,7 @@
"baseModelFirstTag": "基础模型 + 首标签",
"baseModelAuthor": "基础模型 + 作者",
"authorFirstTag": "作者 + 首标签",
+ "baseModelAuthorFirstTag": "基础模型 + 作者 + 首标签",
"customTemplate": "自定义模板"
},
"customTemplatePlaceholder": "输入自定义模板(如:{base_model}/{author}/{first_tag})",
@@ -301,6 +323,24 @@
"connecting": "正在连接下载服务器...",
"completed": "已完成",
"downloadComplete": "下载成功完成"
+ },
+ "proxySettings": {
+ "enableProxy": "启用应用级代理",
+ "enableProxyHelp": "为此应用启用自定义代理设置,覆盖系统代理设置",
+ "proxyType": "代理类型",
+ "proxyTypeHelp": "选择代理服务器类型 (HTTP, HTTPS, SOCKS4, SOCKS5)",
+ "proxyHost": "代理主机",
+ "proxyHostPlaceholder": "proxy.example.com",
+ "proxyHostHelp": "代理服务器的主机名或IP地址",
+ "proxyPort": "代理端口",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "代理服务器的端口号",
+ "proxyUsername": "用户名 (可选)",
+ "proxyUsernamePlaceholder": "用户名",
+ "proxyUsernameHelp": "代理认证的用户名 (如果需要)",
+ "proxyPassword": "密码 (可选)",
+ "proxyPasswordPlaceholder": "密码",
+ "proxyPasswordHelp": "代理认证的密码 (如果需要)"
}
},
"loras": {
@@ -708,6 +748,7 @@
"strengthMin": "最小强度",
"strengthMax": "最大强度",
"strength": "强度",
+ "clipStrength": "Clip 强度",
"clipSkip": "Clip Skip",
"valuePlaceholder": "数值",
"add": "添加"
@@ -1148,6 +1189,7 @@
},
"exampleImages": {
"pathUpdated": "示例图片路径更新成功",
+ "pathUpdateFailed": "更新示例图片路径失败:{message}",
"downloadInProgress": "下载已在进行中",
"enterLocationFirst": "请先输入下载位置",
"downloadStarted": "示例图片下载已开始",
diff --git a/locales/zh-TW.json b/locales/zh-TW.json
index f9d02b45..b2a1792c 100644
--- a/locales/zh-TW.json
+++ b/locales/zh-TW.json
@@ -123,6 +123,20 @@
"noRemoteImagesAvailable": "此模型在 Civitai 上無遠端範例圖片"
}
},
+ "globalContextMenu": {
+ "downloadExampleImages": {
+ "label": "下載範例圖片",
+ "missingPath": "請先設定下載位置再下載範例圖片。",
+ "unavailable": "範例圖片下載目前尚不可用。請在頁面載入完成後再試一次。"
+ },
+ "cleanupExampleImages": {
+ "label": "清理範例圖片資料夾",
+ "success": "已將 {count} 個資料夾移至已刪除資料夾",
+ "none": "沒有需要清理的範例圖片資料夾",
+ "partial": "清理完成,有 {failures} 個資料夾略過",
+ "error": "清理範例圖片資料夾失敗:{message}"
+ }
+ },
"header": {
"appTitle": "LoRA 管理器",
"navigation": {
@@ -182,7 +196,8 @@
"downloadPathTemplates": "下載路徑範本",
"exampleImages": "範例圖片",
"misc": "其他",
- "metadataArchive": "中繼資料封存資料庫"
+ "metadataArchive": "中繼資料封存資料庫",
+ "proxySettings": "代理設定"
},
"contentFiltering": {
"blurNsfwContent": "模糊 NSFW 內容",
@@ -240,6 +255,7 @@
"baseModelFirstTag": "基礎模型 + 第一標籤",
"baseModelAuthor": "基礎模型 + 作者",
"authorFirstTag": "作者 + 第一標籤",
+ "baseModelAuthorFirstTag": "基礎模型 + 作者 + 第一標籤",
"customTemplate": "自訂範本"
},
"customTemplatePlaceholder": "輸入自訂範本(例如:{base_model}/{author}/{first_tag})",
@@ -301,6 +317,24 @@
"connecting": "正在連接下載伺服器...",
"completed": "已完成",
"downloadComplete": "下載成功完成"
+ },
+ "proxySettings": {
+ "enableProxy": "啟用應用程式代理",
+ "enableProxyHelp": "啟用此應用程式的自訂代理設定,將覆蓋系統代理設定",
+ "proxyType": "代理類型",
+ "proxyTypeHelp": "選擇代理伺服器類型(HTTP、HTTPS、SOCKS4、SOCKS5)",
+ "proxyHost": "代理主機",
+ "proxyHostPlaceholder": "proxy.example.com",
+ "proxyHostHelp": "您的代理伺服器主機名稱或 IP 位址",
+ "proxyPort": "代理埠號",
+ "proxyPortPlaceholder": "8080",
+ "proxyPortHelp": "您的代理伺服器埠號",
+ "proxyUsername": "使用者名稱(選填)",
+ "proxyUsernamePlaceholder": "username",
+ "proxyUsernameHelp": "代理驗證所需的使用者名稱(如有需要)",
+ "proxyPassword": "密碼(選填)",
+ "proxyPasswordPlaceholder": "password",
+ "proxyPasswordHelp": "代理驗證所需的密碼(如有需要)"
}
},
"loras": {
@@ -708,6 +742,7 @@
"strengthMin": "最小強度",
"strengthMax": "最大強度",
"strength": "強度",
+ "clipStrength": "Clip 強度",
"clipSkip": "Clip Skip",
"valuePlaceholder": "數值",
"add": "新增"
@@ -1148,6 +1183,7 @@
},
"exampleImages": {
"pathUpdated": "範例圖片路徑已更新",
+ "pathUpdateFailed": "更新範例圖片路徑失敗:{message}",
"downloadInProgress": "下載已在進行中",
"enterLocationFirst": "請先輸入下載位置",
"downloadStarted": "範例圖片下載已開始",
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 00000000..875f7d6a
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,2572 @@
+{
+ "name": "comfyui-lora-manager-frontend",
+ "version": "0.1.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "comfyui-lora-manager-frontend",
+ "version": "0.1.0",
+ "devDependencies": {
+ "jsdom": "^24.0.0",
+ "vitest": "^1.6.0"
+ }
+ },
+ "node_modules/@asamuzakjp/css-color": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz",
+ "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@csstools/css-calc": "^2.1.3",
+ "@csstools/css-color-parser": "^3.0.9",
+ "@csstools/css-parser-algorithms": "^3.0.4",
+ "@csstools/css-tokenizer": "^3.0.3",
+ "lru-cache": "^10.4.3"
+ }
+ },
+ "node_modules/@csstools/color-helpers": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz",
+ "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/csstools"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/csstools"
+ }
+ ],
+ "license": "MIT-0",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@csstools/css-calc": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz",
+ "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/csstools"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/csstools"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@csstools/css-parser-algorithms": "^3.0.5",
+ "@csstools/css-tokenizer": "^3.0.4"
+ }
+ },
+ "node_modules/@csstools/css-color-parser": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz",
+ "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/csstools"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/csstools"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "@csstools/color-helpers": "^5.1.0",
+ "@csstools/css-calc": "^2.1.4"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@csstools/css-parser-algorithms": "^3.0.5",
+ "@csstools/css-tokenizer": "^3.0.4"
+ }
+ },
+ "node_modules/@csstools/css-parser-algorithms": {
+ "version": "3.0.5",
+ "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz",
+ "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/csstools"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/csstools"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "@csstools/css-tokenizer": "^3.0.4"
+ }
+ },
+ "node_modules/@csstools/css-tokenizer": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz",
+ "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/csstools"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/csstools"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz",
+ "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz",
+ "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz",
+ "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz",
+ "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz",
+ "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz",
+ "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz",
+ "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz",
+ "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz",
+ "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz",
+ "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz",
+ "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz",
+ "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz",
+ "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz",
+ "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz",
+ "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz",
+ "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz",
+ "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz",
+ "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz",
+ "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz",
+ "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz",
+ "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.2.tgz",
+ "integrity": "sha512-o3pcKzJgSGt4d74lSZ+OCnHwkKBeAbFDmbEm5gg70eA8VkyCuC/zV9TwBnmw6VjDlRdF4Pshfb+WE9E6XY1PoQ==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.2.tgz",
+ "integrity": "sha512-cqFSWO5tX2vhC9hJTK8WAiPIm4Q8q/cU8j2HQA0L3E1uXvBYbOZMhE2oFL8n2pKB5sOCHY6bBuHaRwG7TkfJyw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.2.tgz",
+ "integrity": "sha512-vngduywkkv8Fkh3wIZf5nFPXzWsNsVu1kvtLETWxTFf/5opZmflgVSeLgdHR56RQh71xhPhWoOkEBvbehwTlVA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.2.tgz",
+ "integrity": "sha512-h11KikYrUCYTrDj6h939hhMNlqU2fo/X4NB0OZcys3fya49o1hmFaczAiJWVAFgrM1NCP6RrO7lQKeVYSKBPSQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-arm64": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.2.tgz",
+ "integrity": "sha512-/eg4CI61ZUkLXxMHyVlmlGrSQZ34xqWlZNW43IAU4RmdzWEx0mQJ2mN/Cx4IHLVZFL6UBGAh+/GXhgvGb+nVxw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-freebsd-x64": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.2.tgz",
+ "integrity": "sha512-QOWgFH5X9+p+S1NAfOqc0z8qEpJIoUHf7OWjNUGOeW18Mx22lAUOiA9b6r2/vpzLdfxi/f+VWsYjUOMCcYh0Ng==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "freebsd"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.2.tgz",
+ "integrity": "sha512-kDWSPafToDd8LcBYd1t5jw7bD5Ojcu12S3uT372e5HKPzQt532vW+rGFFOaiR0opxePyUkHrwz8iWYEyH1IIQA==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.2.tgz",
+ "integrity": "sha512-gKm7Mk9wCv6/rkzwCiUC4KnevYhlf8ztBrDRT9g/u//1fZLapSRc+eDZj2Eu2wpJ+0RzUKgtNijnVIB4ZxyL+w==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.2.tgz",
+ "integrity": "sha512-66lA8vnj5mB/rtDNwPgrrKUOtCLVQypkyDa2gMfOefXK6rcZAxKLO9Fy3GkW8VkPnENv9hBkNOFfGLf6rNKGUg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.2.tgz",
+ "integrity": "sha512-s+OPucLNdJHvuZHuIz2WwncJ+SfWHFEmlC5nKMUgAelUeBUnlB4wt7rXWiyG4Zn07uY2Dd+SGyVa9oyLkVGOjA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.2.tgz",
+ "integrity": "sha512-8wTRM3+gVMDLLDdaT6tKmOE3lJyRy9NpJUS/ZRWmLCmOPIJhVyXwjBo+XbrrwtV33Em1/eCTd5TuGJm4+DmYjw==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-ppc64-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.2.tgz",
+ "integrity": "sha512-6yqEfgJ1anIeuP2P/zhtfBlDpXUb80t8DpbYwXQ3bQd95JMvUaqiX+fKqYqUwZXqdJDd8xdilNtsHM2N0cFm6A==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.2.tgz",
+ "integrity": "sha512-sshYUiYVSEI2B6dp4jMncwxbrUqRdNApF2c3bhtLAU0qA8Lrri0p0NauOsTWh3yCCCDyBOjESHMExonp7Nzc0w==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-musl": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.2.tgz",
+ "integrity": "sha512-duBLgd+3pqC4MMwBrKkFxaZerUxZcYApQVC5SdbF5/e/589GwVvlRUnyqMFbM8iUSb1BaoX/3fRL7hB9m2Pj8Q==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.2.tgz",
+ "integrity": "sha512-tzhYJJidDUVGMgVyE+PmxENPHlvvqm1KILjjZhB8/xHYqAGeizh3GBGf9u6WdJpZrz1aCpIIHG0LgJgH9rVjHQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.2.tgz",
+ "integrity": "sha512-opH8GSUuVcCSSyHHcl5hELrmnk4waZoVpgn/4FDao9iyE4WpQhyWJ5ryl5M3ocp4qkRuHfyXnGqg8M9oKCEKRA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.2.tgz",
+ "integrity": "sha512-LSeBHnGli1pPKVJ79ZVJgeZWWZXkEe/5o8kcn23M8eMKCUANejchJbF/JqzM4RRjOJfNRhKJk8FuqL1GKjF5oQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-openharmony-arm64": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.2.tgz",
+ "integrity": "sha512-uPj7MQ6/s+/GOpolavm6BPo+6CbhbKYyZHUDvZ/SmJM7pfDBgdGisFX3bY/CBDMg2ZO4utfhlApkSfZ92yXw7Q==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "openharmony"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.2.tgz",
+ "integrity": "sha512-Z9MUCrSgIaUeeHAiNkm3cQyst2UhzjPraR3gYYfOjAuZI7tcFRTOD+4cHLPoS/3qinchth+V56vtqz1Tv+6KPA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.2.tgz",
+ "integrity": "sha512-+GnYBmpjldD3XQd+HMejo+0gJGwYIOfFeoBQv32xF/RUIvccUz20/V6Otdv+57NE70D5pa8W/jVGDoGq0oON4A==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.2.tgz",
+ "integrity": "sha512-ApXFKluSB6kDQkAqZOKXBjiaqdF1BlKi+/eqnYe9Ee7U2K3pUDKsIyr8EYm/QDHTJIM+4X+lI0gJc3TTRhd+dA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.2.tgz",
+ "integrity": "sha512-ARz+Bs8kY6FtitYM96PqPEVvPXqEZmPZsSkXvyX19YzDqkCaIlhCieLLMI5hxO9SRZ2XtCtm8wxhy0iJ2jxNfw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz",
+ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@vitest/expect": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.6.1.tgz",
+ "integrity": "sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/spy": "1.6.1",
+ "@vitest/utils": "1.6.1",
+ "chai": "^4.3.10"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/runner": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.6.1.tgz",
+ "integrity": "sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/utils": "1.6.1",
+ "p-limit": "^5.0.0",
+ "pathe": "^1.1.1"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/snapshot": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.6.1.tgz",
+ "integrity": "sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "magic-string": "^0.30.5",
+ "pathe": "^1.1.1",
+ "pretty-format": "^29.7.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/spy": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.6.1.tgz",
+ "integrity": "sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "tinyspy": "^2.2.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/@vitest/utils": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.6.1.tgz",
+ "integrity": "sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "diff-sequences": "^29.6.3",
+ "estree-walker": "^3.0.3",
+ "loupe": "^2.3.7",
+ "pretty-format": "^29.7.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "8.15.0",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
+ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-walk": {
+ "version": "8.3.4",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz",
+ "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "acorn": "^8.11.0"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/agent-base": {
+ "version": "7.1.4",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
+ "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
+ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/assertion-error": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz",
+ "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cac": {
+ "version": "6.7.14",
+ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
+ "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/chai": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-4.5.0.tgz",
+ "integrity": "sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "assertion-error": "^1.1.0",
+ "check-error": "^1.0.3",
+ "deep-eql": "^4.1.3",
+ "get-func-name": "^2.0.2",
+ "loupe": "^2.3.6",
+ "pathval": "^1.1.1",
+ "type-detect": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/check-error": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz",
+ "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "get-func-name": "^2.0.2"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/confbox": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.8.tgz",
+ "integrity": "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/cssstyle": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz",
+ "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@asamuzakjp/css-color": "^3.2.0",
+ "rrweb-cssom": "^0.8.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/cssstyle/node_modules/rrweb-cssom": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz",
+ "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/data-urls": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz",
+ "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-mimetype": "^4.0.0",
+ "whatwg-url": "^14.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decimal.js": {
+ "version": "10.6.0",
+ "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz",
+ "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/deep-eql": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.4.tgz",
+ "integrity": "sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "type-detect": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/diff-sequences": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
+ "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/entities": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz",
+ "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/esbuild": {
+ "version": "0.21.5",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz",
+ "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.21.5",
+ "@esbuild/android-arm": "0.21.5",
+ "@esbuild/android-arm64": "0.21.5",
+ "@esbuild/android-x64": "0.21.5",
+ "@esbuild/darwin-arm64": "0.21.5",
+ "@esbuild/darwin-x64": "0.21.5",
+ "@esbuild/freebsd-arm64": "0.21.5",
+ "@esbuild/freebsd-x64": "0.21.5",
+ "@esbuild/linux-arm": "0.21.5",
+ "@esbuild/linux-arm64": "0.21.5",
+ "@esbuild/linux-ia32": "0.21.5",
+ "@esbuild/linux-loong64": "0.21.5",
+ "@esbuild/linux-mips64el": "0.21.5",
+ "@esbuild/linux-ppc64": "0.21.5",
+ "@esbuild/linux-riscv64": "0.21.5",
+ "@esbuild/linux-s390x": "0.21.5",
+ "@esbuild/linux-x64": "0.21.5",
+ "@esbuild/netbsd-x64": "0.21.5",
+ "@esbuild/openbsd-x64": "0.21.5",
+ "@esbuild/sunos-x64": "0.21.5",
+ "@esbuild/win32-arm64": "0.21.5",
+ "@esbuild/win32-ia32": "0.21.5",
+ "@esbuild/win32-x64": "0.21.5"
+ }
+ },
+ "node_modules/estree-walker": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
+ "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.0"
+ }
+ },
+ "node_modules/execa": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz",
+ "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^8.0.1",
+ "human-signals": "^5.0.0",
+ "is-stream": "^3.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^5.1.0",
+ "onetime": "^6.0.0",
+ "signal-exit": "^4.1.0",
+ "strip-final-newline": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=16.17"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
+ "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-func-name": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz",
+ "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz",
+ "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/html-encoding-sniffer": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz",
+ "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-encoding": "^3.1.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/http-proxy-agent": {
+ "version": "7.0.2",
+ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz",
+ "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "^7.1.0",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/https-proxy-agent": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
+ "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "agent-base": "^7.1.2",
+ "debug": "4"
+ },
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/human-signals": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz",
+ "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=16.17.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-potential-custom-element-name": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz",
+ "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/is-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
+ "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/js-tokens": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz",
+ "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jsdom": {
+ "version": "24.1.3",
+ "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-24.1.3.tgz",
+ "integrity": "sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cssstyle": "^4.0.1",
+ "data-urls": "^5.0.0",
+ "decimal.js": "^10.4.3",
+ "form-data": "^4.0.0",
+ "html-encoding-sniffer": "^4.0.0",
+ "http-proxy-agent": "^7.0.2",
+ "https-proxy-agent": "^7.0.5",
+ "is-potential-custom-element-name": "^1.0.1",
+ "nwsapi": "^2.2.12",
+ "parse5": "^7.1.2",
+ "rrweb-cssom": "^0.7.1",
+ "saxes": "^6.0.0",
+ "symbol-tree": "^3.2.4",
+ "tough-cookie": "^4.1.4",
+ "w3c-xmlserializer": "^5.0.0",
+ "webidl-conversions": "^7.0.0",
+ "whatwg-encoding": "^3.1.1",
+ "whatwg-mimetype": "^4.0.0",
+ "whatwg-url": "^14.0.0",
+ "ws": "^8.18.0",
+ "xml-name-validator": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "peerDependencies": {
+ "canvas": "^2.11.2"
+ },
+ "peerDependenciesMeta": {
+ "canvas": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/local-pkg": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.1.tgz",
+ "integrity": "sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mlly": "^1.7.3",
+ "pkg-types": "^1.2.1"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/loupe": {
+ "version": "2.3.7",
+ "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz",
+ "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "get-func-name": "^2.0.1"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "10.4.3",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
+ "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/magic-string": {
+ "version": "0.30.19",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz",
+ "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.5"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
+ "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/mlly": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.8.0.tgz",
+ "integrity": "sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "acorn": "^8.15.0",
+ "pathe": "^2.0.3",
+ "pkg-types": "^1.3.1",
+ "ufo": "^1.6.1"
+ }
+ },
+ "node_modules/mlly/node_modules/pathe": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.11",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+ "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz",
+ "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^4.0.0"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/npm-run-path/node_modules/path-key": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
+ "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/nwsapi": {
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.22.tgz",
+ "integrity": "sha512-ujSMe1OWVn55euT1ihwCI1ZcAaAU3nxUiDwfDQldc51ZXaB9m2AyOn6/jh1BLe2t/G8xd6uKG1UBF2aZJeg2SQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/onetime": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
+ "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mimic-fn": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz",
+ "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "yocto-queue": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "7.3.0",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz",
+ "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "entities": "^6.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/inikulin/parse5?sponsor=1"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pathe": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz",
+ "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/pathval": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz",
+ "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/pkg-types": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.3.1.tgz",
+ "integrity": "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "confbox": "^0.1.8",
+ "mlly": "^1.7.4",
+ "pathe": "^2.0.1"
+ }
+ },
+ "node_modules/pkg-types/node_modules/pathe": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/postcss": {
+ "version": "8.5.6",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz",
+ "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.11",
+ "picocolors": "^1.1.1",
+ "source-map-js": "^1.2.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/pretty-format": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
+ "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "ansi-styles": "^5.0.0",
+ "react-is": "^18.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/psl": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz",
+ "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "punycode": "^2.3.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/lupomontero"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/querystringify": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
+ "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/react-is": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
+ "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/rollup": {
+ "version": "4.52.2",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.52.2.tgz",
+ "integrity": "sha512-I25/2QgoROE1vYV+NQ1En9T9UFB9Cmfm2CJ83zZOlaDpvz29wGQSZXWKw7MiNXau7wYgB/T9fVIdIuEQ+KbiiA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "1.0.8"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.52.2",
+ "@rollup/rollup-android-arm64": "4.52.2",
+ "@rollup/rollup-darwin-arm64": "4.52.2",
+ "@rollup/rollup-darwin-x64": "4.52.2",
+ "@rollup/rollup-freebsd-arm64": "4.52.2",
+ "@rollup/rollup-freebsd-x64": "4.52.2",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.52.2",
+ "@rollup/rollup-linux-arm-musleabihf": "4.52.2",
+ "@rollup/rollup-linux-arm64-gnu": "4.52.2",
+ "@rollup/rollup-linux-arm64-musl": "4.52.2",
+ "@rollup/rollup-linux-loong64-gnu": "4.52.2",
+ "@rollup/rollup-linux-ppc64-gnu": "4.52.2",
+ "@rollup/rollup-linux-riscv64-gnu": "4.52.2",
+ "@rollup/rollup-linux-riscv64-musl": "4.52.2",
+ "@rollup/rollup-linux-s390x-gnu": "4.52.2",
+ "@rollup/rollup-linux-x64-gnu": "4.52.2",
+ "@rollup/rollup-linux-x64-musl": "4.52.2",
+ "@rollup/rollup-openharmony-arm64": "4.52.2",
+ "@rollup/rollup-win32-arm64-msvc": "4.52.2",
+ "@rollup/rollup-win32-ia32-msvc": "4.52.2",
+ "@rollup/rollup-win32-x64-gnu": "4.52.2",
+ "@rollup/rollup-win32-x64-msvc": "4.52.2",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/rrweb-cssom": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz",
+ "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/saxes": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz",
+ "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "xmlchars": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=v12.22.7"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/siginfo": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz",
+ "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/signal-exit": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
+ "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/stackback": {
+ "version": "0.0.2",
+ "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
+ "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/std-env": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz",
+ "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/strip-final-newline": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
+ "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/strip-literal": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.1.1.tgz",
+ "integrity": "sha512-631UJ6O00eNGfMiWG78ck80dfBab8X6IVFB51jZK5Icd7XAs60Z5y7QdSd/wGIklnWvRbUNloVzhOKKmutxQ6Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "js-tokens": "^9.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/antfu"
+ }
+ },
+ "node_modules/symbol-tree": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz",
+ "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/tinybench": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz",
+ "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/tinypool": {
+ "version": "0.8.4",
+ "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.4.tgz",
+ "integrity": "sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tinyspy": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz",
+ "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "node_modules/tough-cookie": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz",
+ "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "psl": "^1.1.33",
+ "punycode": "^2.1.1",
+ "universalify": "^0.2.0",
+ "url-parse": "^1.5.3"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz",
+ "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "punycode": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/type-detect": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz",
+ "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/ufo": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.1.tgz",
+ "integrity": "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/universalify": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz",
+ "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/url-parse": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz",
+ "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "querystringify": "^2.1.1",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "node_modules/vite": {
+ "version": "5.4.20",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.20.tgz",
+ "integrity": "sha512-j3lYzGC3P+B5Yfy/pfKNgVEg4+UtcIJcVRt2cDjIOmhLourAqPqf8P7acgxeiSgUB7E3p2P8/3gNIgDLpwzs4g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "esbuild": "^0.21.3",
+ "postcss": "^8.4.43",
+ "rollup": "^4.20.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "sass-embedded": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "sass-embedded": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vite-node": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.6.1.tgz",
+ "integrity": "sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cac": "^6.7.14",
+ "debug": "^4.3.4",
+ "pathe": "^1.1.1",
+ "picocolors": "^1.0.0",
+ "vite": "^5.0.0"
+ },
+ "bin": {
+ "vite-node": "vite-node.mjs"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ }
+ },
+ "node_modules/vitest": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.6.1.tgz",
+ "integrity": "sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@vitest/expect": "1.6.1",
+ "@vitest/runner": "1.6.1",
+ "@vitest/snapshot": "1.6.1",
+ "@vitest/spy": "1.6.1",
+ "@vitest/utils": "1.6.1",
+ "acorn-walk": "^8.3.2",
+ "chai": "^4.3.10",
+ "debug": "^4.3.4",
+ "execa": "^8.0.1",
+ "local-pkg": "^0.5.0",
+ "magic-string": "^0.30.5",
+ "pathe": "^1.1.1",
+ "picocolors": "^1.0.0",
+ "std-env": "^3.5.0",
+ "strip-literal": "^2.0.0",
+ "tinybench": "^2.5.1",
+ "tinypool": "^0.8.3",
+ "vite": "^5.0.0",
+ "vite-node": "1.6.1",
+ "why-is-node-running": "^2.2.2"
+ },
+ "bin": {
+ "vitest": "vitest.mjs"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/vitest"
+ },
+ "peerDependencies": {
+ "@edge-runtime/vm": "*",
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "@vitest/browser": "1.6.1",
+ "@vitest/ui": "1.6.1",
+ "happy-dom": "*",
+ "jsdom": "*"
+ },
+ "peerDependenciesMeta": {
+ "@edge-runtime/vm": {
+ "optional": true
+ },
+ "@types/node": {
+ "optional": true
+ },
+ "@vitest/browser": {
+ "optional": true
+ },
+ "@vitest/ui": {
+ "optional": true
+ },
+ "happy-dom": {
+ "optional": true
+ },
+ "jsdom": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/w3c-xmlserializer": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz",
+ "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "xml-name-validator": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz",
+ "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/whatwg-encoding": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz",
+ "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "iconv-lite": "0.6.3"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/whatwg-mimetype": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz",
+ "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/whatwg-url": {
+ "version": "14.2.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz",
+ "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "^5.1.0",
+ "webidl-conversions": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/why-is-node-running": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz",
+ "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "siginfo": "^2.0.0",
+ "stackback": "0.0.2"
+ },
+ "bin": {
+ "why-is-node-running": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ws": {
+ "version": "8.18.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
+ "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/xml-name-validator": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz",
+ "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/xmlchars": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz",
+ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/yocto-queue": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.2.1.tgz",
+ "integrity": "sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12.20"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ }
+ }
+}
diff --git a/package.json b/package.json
new file mode 100644
index 00000000..1a5d64e8
--- /dev/null
+++ b/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "comfyui-lora-manager-frontend",
+ "version": "0.1.0",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "test": "vitest run",
+ "test:watch": "vitest",
+ "test:coverage": "node scripts/run_frontend_coverage.js"
+ },
+ "devDependencies": {
+ "jsdom": "^24.0.0",
+ "vitest": "^1.6.0"
+ }
+}
diff --git a/py/__init__.py b/py/__init__.py
index e69de29b..54e9a3f4 100644
--- a/py/__init__.py
+++ b/py/__init__.py
@@ -0,0 +1,12 @@
+"""Project namespace package."""
+
+# pytest's internal compatibility layer still imports ``py.path.local`` from the
+# historical ``py`` dependency. Because this project reuses the ``py`` package
+# name, we expose a minimal shim so ``py.path.local`` resolves to ``pathlib.Path``
+# during test runs without pulling in the external dependency.
+from pathlib import Path
+from types import SimpleNamespace
+
+path = SimpleNamespace(local=Path)
+
+__all__ = ["path"]
diff --git a/py/config.py b/py/config.py
index 1bdfe2bb..a15d0141 100644
--- a/py/config.py
+++ b/py/config.py
@@ -3,12 +3,11 @@ import platform
import folder_paths # type: ignore
from typing import List
import logging
-import sys
import json
import urllib.parse
-# Check if running in standalone mode
-standalone_mode = 'nodes' not in sys.modules
+# Use an environment variable to control standalone mode
+standalone_mode = os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
logger = logging.getLogger(__name__)
diff --git a/py/lora_manager.py b/py/lora_manager.py
index f6598bca..98f1a00e 100644
--- a/py/lora_manager.py
+++ b/py/lora_manager.py
@@ -16,6 +16,7 @@ from .services.service_registry import ServiceRegistry
from .services.settings_manager import settings
from .utils.example_images_migration import ExampleImagesMigration
from .services.websocket_manager import ws_manager
+from .services.example_images_cleanup_service import ExampleImagesCleanupService
logger = logging.getLogger(__name__)
@@ -166,7 +167,7 @@ class LoraManager:
RecipeRoutes.setup_routes(app)
UpdateRoutes.setup_routes(app)
MiscRoutes.setup_routes(app)
- ExampleImagesRoutes.setup_routes(app)
+ ExampleImagesRoutes.setup_routes(app, ws_manager=ws_manager)
# Setup WebSocket routes that are shared across all model types
app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection)
@@ -221,7 +222,7 @@ class LoraManager:
name='post_init_tasks'
)
- logger.info("LoRA Manager: All services initialized and background tasks scheduled")
+ logger.debug("LoRA Manager: All services initialized and background tasks scheduled")
except Exception as e:
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
@@ -240,7 +241,6 @@ class LoraManager:
# Run post-initialization tasks
post_tasks = [
asyncio.create_task(cls._cleanup_backup_files(), name='cleanup_bak_files'),
- asyncio.create_task(cls._cleanup_example_images_folders(), name='cleanup_example_images'),
# Add more post-initialization tasks here as needed
# asyncio.create_task(cls._another_post_task(), name='another_task'),
]
@@ -352,120 +352,37 @@ class LoraManager:
@classmethod
async def _cleanup_example_images_folders(cls):
- """Clean up invalid or empty folders in example images directory"""
+ """Invoke the example images cleanup service for manual execution."""
try:
- example_images_path = settings.get('example_images_path')
- if not example_images_path or not os.path.exists(example_images_path):
- logger.debug("Example images path not configured or doesn't exist, skipping cleanup")
- return
-
- logger.debug(f"Starting cleanup of example images folders in: {example_images_path}")
-
- # Get all scanner instances to check hash validity
- lora_scanner = await ServiceRegistry.get_lora_scanner()
- checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
- embedding_scanner = await ServiceRegistry.get_embedding_scanner()
-
- total_folders_checked = 0
- empty_folders_removed = 0
- invalid_hash_folders_removed = 0
-
- # Scan the example images directory
- try:
- with os.scandir(example_images_path) as it:
- for entry in it:
- if not entry.is_dir(follow_symlinks=False):
- continue
-
- folder_name = entry.name
- folder_path = entry.path
- total_folders_checked += 1
-
- try:
- # Check if folder is empty
- is_empty = cls._is_folder_empty(folder_path)
- if is_empty:
- logger.debug(f"Removing empty example images folder: {folder_name}")
- await cls._remove_folder_safely(folder_path)
- empty_folders_removed += 1
- continue
-
- # Check if folder name is a valid SHA256 hash (64 hex characters)
- if len(folder_name) != 64 or not all(c in '0123456789abcdefABCDEF' for c in folder_name):
- logger.debug(f"Removing invalid hash folder: {folder_name}")
- await cls._remove_folder_safely(folder_path)
- invalid_hash_folders_removed += 1
- continue
-
- # Check if hash exists in any of the scanners
- hash_exists = (
- lora_scanner.has_hash(folder_name) or
- checkpoint_scanner.has_hash(folder_name) or
- embedding_scanner.has_hash(folder_name)
- )
-
- if not hash_exists:
- logger.debug(f"Removing example images folder for deleted model: {folder_name}")
- await cls._remove_folder_safely(folder_path)
- invalid_hash_folders_removed += 1
- continue
-
- logger.debug(f"Keeping valid example images folder: {folder_name}")
-
+ service = ExampleImagesCleanupService()
+ result = await service.cleanup_example_image_folders()
- except Exception as e:
- logger.error(f"Error processing example images folder {folder_name}: {e}")
-
- # Yield control periodically
- await asyncio.sleep(0.01)
-
- except Exception as e:
- logger.error(f"Error scanning example images directory: {e}")
- return
-
- # Log final cleanup report
- total_removed = empty_folders_removed + invalid_hash_folders_removed
- if total_removed > 0:
- logger.info(f"Example images cleanup completed: checked {total_folders_checked} folders, "
- f"removed {empty_folders_removed} empty folders and {invalid_hash_folders_removed} "
- f"folders for deleted/invalid models (total: {total_removed} removed)")
+ if result.get('success'):
+ logger.debug(
+ "Manual example images cleanup completed: moved=%s",
+ result.get('moved_total'),
+ )
+ elif result.get('partial_success'):
+ logger.warning(
+ "Manual example images cleanup partially succeeded: moved=%s failures=%s",
+ result.get('moved_total'),
+ result.get('move_failures'),
+ )
else:
- logger.info(f"Example images cleanup completed: checked {total_folders_checked} folders, "
- f"no cleanup needed")
-
- except Exception as e:
+ logger.debug(
+ "Manual example images cleanup skipped or failed: %s",
+ result.get('error', 'no changes'),
+ )
+
+ return result
+
+ except Exception as e: # pragma: no cover - defensive guard
logger.error(f"Error during example images cleanup: {e}", exc_info=True)
-
- @classmethod
- def _is_folder_empty(cls, folder_path: str) -> bool:
- """Check if a folder is empty
-
- Args:
- folder_path: Path to the folder to check
-
- Returns:
- bool: True if folder is empty, False otherwise
- """
- try:
- with os.scandir(folder_path) as it:
- return not any(it)
- except Exception as e:
- logger.debug(f"Error checking if folder is empty {folder_path}: {e}")
- return False
-
- @classmethod
- async def _remove_folder_safely(cls, folder_path: str):
- """Safely remove a folder and all its contents
-
- Args:
- folder_path: Path to the folder to remove
- """
- try:
- import shutil
- loop = asyncio.get_event_loop()
- await loop.run_in_executor(None, shutil.rmtree, folder_path)
- except Exception as e:
- logger.warning(f"Failed to remove folder {folder_path}: {e}")
+ return {
+ 'success': False,
+ 'error': str(e),
+ 'error_code': 'unexpected_error',
+ }
@classmethod
async def _cleanup(cls, app):
@@ -473,11 +390,5 @@ class LoraManager:
try:
logger.info("LoRA Manager: Cleaning up services")
- # Close CivitaiClient gracefully
- civitai_client = await ServiceRegistry.get_service("civitai_client")
- if civitai_client:
- await civitai_client.close()
- logger.info("Closed CivitaiClient connection")
-
except Exception as e:
logger.error(f"Error during cleanup: {e}", exc_info=True)
diff --git a/py/metadata_collector/__init__.py b/py/metadata_collector/__init__.py
index 29f377e9..d71bf49f 100644
--- a/py/metadata_collector/__init__.py
+++ b/py/metadata_collector/__init__.py
@@ -1,9 +1,7 @@
import os
-import importlib
-import sys
# Check if running in standalone mode
-standalone_mode = 'nodes' not in sys.modules
+standalone_mode = os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
if not standalone_mode:
from .metadata_hook import MetadataHook
diff --git a/py/metadata_collector/metadata_processor.py b/py/metadata_collector/metadata_processor.py
index 437677d7..3532ceba 100644
--- a/py/metadata_collector/metadata_processor.py
+++ b/py/metadata_collector/metadata_processor.py
@@ -1,9 +1,9 @@
import json
-import sys
+import os
from .constants import IMAGES
# Check if running in standalone mode
-standalone_mode = 'nodes' not in sys.modules
+standalone_mode = os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IS_SAMPLER
diff --git a/py/middleware/__init__.py b/py/middleware/__init__.py
new file mode 100644
index 00000000..2d7c7c3a
--- /dev/null
+++ b/py/middleware/__init__.py
@@ -0,0 +1 @@
+"""Server middleware modules"""
diff --git a/py/middleware/cache_middleware.py b/py/middleware/cache_middleware.py
new file mode 100644
index 00000000..4df22b30
--- /dev/null
+++ b/py/middleware/cache_middleware.py
@@ -0,0 +1,53 @@
+"""Cache control middleware for ComfyUI server"""
+
+from aiohttp import web
+from typing import Callable, Awaitable
+
+# Time in seconds
+ONE_HOUR: int = 3600
+ONE_DAY: int = 86400
+IMG_EXTENSIONS = (
+ ".jpg",
+ ".jpeg",
+ ".png",
+ ".ppm",
+ ".bmp",
+ ".pgm",
+ ".tif",
+ ".tiff",
+ ".webp",
+ ".mp4"
+)
+
+
+@web.middleware
+async def cache_control(
+ request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
+) -> web.Response:
+ """Cache control middleware that sets appropriate cache headers based on file type and response status"""
+ response: web.Response = await handler(request)
+
+ if (
+ request.path.endswith(".js")
+ or request.path.endswith(".css")
+ or request.path.endswith("index.json")
+ ):
+ response.headers.setdefault("Cache-Control", "no-cache")
+ return response
+
+ # Early return for non-image files - no cache headers needed
+ if not request.path.lower().endswith(IMG_EXTENSIONS):
+ return response
+
+ # Handle image files
+ if response.status == 404:
+ response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}")
+ elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308):
+ # Success responses and permanent redirects - cache for 1 day
+ response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}")
+ elif response.status in (302, 303, 307):
+ # Temporary redirects - no cache
+ response.headers.setdefault("Cache-Control", "no-cache")
+ # Note: 304 Not Modified falls through - no cache headers set
+
+ return response
diff --git a/py/nodes/lora_loader.py b/py/nodes/lora_loader.py
index 7ff8c219..d431c263 100644
--- a/py/nodes/lora_loader.py
+++ b/py/nodes/lora_loader.py
@@ -115,7 +115,7 @@ class LoraManagerLoader:
formatted_loras = []
for item in loaded_loras:
parts = item.split(":")
- lora_name = parts[0].strip()
+ lora_name = parts[0]
strength_parts = parts[1].strip().split(",")
if len(strength_parts) > 1:
@@ -165,7 +165,7 @@ class LoraManagerTextLoader:
loras = []
for match in matches:
- lora_name = match[0].strip()
+ lora_name = match[0]
model_strength = float(match[1])
clip_strength = float(match[2]) if match[2] else model_strength
diff --git a/py/recipes/base.py b/py/recipes/base.py
index 78bb933f..3897c683 100644
--- a/py/recipes/base.py
+++ b/py/recipes/base.py
@@ -55,7 +55,7 @@ class RecipeMetadataParser(ABC):
# Unpack the tuple to get the actual data
civitai_info, error_msg = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
- if not civitai_info or civitai_info.get("error") == "Model not found":
+ if not civitai_info or error_msg == "Model not found":
# Model not found or deleted
lora_entry['isDeleted'] = True
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'
diff --git a/py/recipes/parsers/civitai_image.py b/py/recipes/parsers/civitai_image.py
index 8e96c99b..31234ab9 100644
--- a/py/recipes/parsers/civitai_image.py
+++ b/py/recipes/parsers/civitai_image.py
@@ -91,7 +91,7 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
result["base_model"] = metadata["baseModel"]
elif "Model hash" in metadata and metadata_provider:
model_hash = metadata["Model hash"]
- model_info = await metadata_provider.get_model_by_hash(model_hash)
+ model_info, error = await metadata_provider.get_model_by_hash(model_hash)
if model_info:
result["base_model"] = model_info.get("baseModel", "")
elif "Model" in metadata and isinstance(metadata.get("resources"), list):
@@ -100,7 +100,7 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
if resource.get("type") == "model" and resource.get("name") == metadata.get("Model"):
# This is likely the checkpoint model
if metadata_provider and resource.get("hash"):
- model_info = await metadata_provider.get_model_by_hash(resource.get("hash"))
+ model_info, error = await metadata_provider.get_model_by_hash(resource.get("hash"))
if model_info:
result["base_model"] = model_info.get("baseModel", "")
@@ -201,11 +201,7 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
if version_id and metadata_provider:
try:
# Use get_model_version_info instead of get_model_version
- civitai_info, error = await metadata_provider.get_model_version_info(version_id)
-
- if error:
- logger.warning(f"Error getting model version info: {error}")
- continue
+ civitai_info = await metadata_provider.get_model_version_info(version_id)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
@@ -267,26 +263,23 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
if version_id and metadata_provider:
try:
# Use get_model_version_info with the version ID
- civitai_info, error = await metadata_provider.get_model_version_info(version_id)
+ civitai_info = await metadata_provider.get_model_version_info(version_id)
- if error:
- logger.warning(f"Error getting model version info: {error}")
- else:
- populated_entry = await self.populate_lora_from_civitai(
- lora_entry,
- civitai_info,
- recipe_scanner,
- base_model_counts
- )
+ populated_entry = await self.populate_lora_from_civitai(
+ lora_entry,
+ civitai_info,
+ recipe_scanner,
+ base_model_counts
+ )
+
+ if populated_entry is None:
+ continue # Skip invalid LoRA types
- if populated_entry is None:
- continue # Skip invalid LoRA types
-
- lora_entry = populated_entry
-
- # Track this LoRA for deduplication
- if version_id:
- added_loras[version_id] = len(result["loras"])
+ lora_entry = populated_entry
+
+ # Track this LoRA for deduplication
+ if version_id:
+ added_loras[version_id] = len(result["loras"])
except Exception as e:
logger.error(f"Error fetching Civitai info for model ID {version_id}: {e}")
diff --git a/py/routes/base_model_routes.py b/py/routes/base_model_routes.py
index be0f2695..84b9f43f 100644
--- a/py/routes/base_model_routes.py
+++ b/py/routes/base_model_routes.py
@@ -1,968 +1,275 @@
-from abc import ABC, abstractmethod
-import asyncio
-import os
-import json
+from __future__ import annotations
+
import logging
-from aiohttp import web
-from typing import Dict
+from abc import ABC, abstractmethod
+from typing import Callable, Dict, Mapping
import jinja2
+from aiohttp import web
-from ..utils.routes_common import ModelRouteUtils
-from ..services.websocket_manager import ws_manager
-from ..services.settings_manager import settings
-from ..services.server_i18n import server_i18n
-from ..services.model_file_service import ModelFileService, ModelMoveService
-from ..services.websocket_progress_callback import WebSocketProgressCallback
from ..config import config
+from ..services.download_coordinator import DownloadCoordinator
+from ..services.downloader import get_downloader
+from ..services.metadata_service import get_default_metadata_provider, get_metadata_provider
+from ..services.metadata_sync_service import MetadataSyncService
+from ..services.model_file_service import ModelFileService, ModelMoveService
+from ..services.model_lifecycle_service import ModelLifecycleService
+from ..services.preview_asset_service import PreviewAssetService
+from ..services.server_i18n import server_i18n as default_server_i18n
+from ..services.service_registry import ServiceRegistry
+from ..services.settings_manager import settings as default_settings
+from ..services.tag_update_service import TagUpdateService
+from ..services.websocket_manager import ws_manager as default_ws_manager
+from ..services.use_cases import (
+ AutoOrganizeUseCase,
+ BulkMetadataRefreshUseCase,
+ DownloadModelUseCase,
+)
+from ..services.websocket_progress_callback import (
+ WebSocketBroadcastCallback,
+ WebSocketProgressCallback,
+)
+from ..utils.exif_utils import ExifUtils
+from ..utils.metadata_manager import MetadataManager
+from .model_route_registrar import COMMON_ROUTE_DEFINITIONS, ModelRouteRegistrar
+from .handlers.model_handlers import (
+ ModelAutoOrganizeHandler,
+ ModelCivitaiHandler,
+ ModelDownloadHandler,
+ ModelHandlerSet,
+ ModelListingHandler,
+ ModelManagementHandler,
+ ModelMoveHandler,
+ ModelPageView,
+ ModelQueryHandler,
+)
logger = logging.getLogger(__name__)
+
class BaseModelRoutes(ABC):
- """Base route controller for all model types"""
-
- def __init__(self, service):
- """Initialize the route controller
-
- Args:
- service: Model service instance (LoraService, CheckpointService, etc.)
- """
- self.service = service
- self.model_type = service.model_type
+ """Base route controller for all model types."""
+
+ template_name: str | None = None
+
+ def __init__(
+ self,
+ service=None,
+ *,
+ settings_service=default_settings,
+ ws_manager=default_ws_manager,
+ server_i18n=default_server_i18n,
+ metadata_provider_factory=get_default_metadata_provider,
+ ) -> None:
+ self.service = None
+ self.model_type = ""
+ self._settings = settings_service
+ self._ws_manager = ws_manager
+ self._server_i18n = server_i18n
+ self._metadata_provider_factory = metadata_provider_factory
+
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.templates_path),
- autoescape=True
+ autoescape=True,
)
-
- # Initialize file services with dependency injection
+
+ self.model_file_service: ModelFileService | None = None
+ self.model_move_service: ModelMoveService | None = None
+ self.model_lifecycle_service: ModelLifecycleService | None = None
+ self.websocket_progress_callback = WebSocketProgressCallback()
+ self.metadata_progress_callback = WebSocketBroadcastCallback()
+
+ self._handler_set: ModelHandlerSet | None = None
+ self._handler_mapping: Dict[str, Callable[[web.Request], web.StreamResponse]] | None = None
+
+ self._preview_service = PreviewAssetService(
+ metadata_manager=MetadataManager,
+ downloader_factory=get_downloader,
+ exif_utils=ExifUtils,
+ )
+ self._metadata_sync_service = MetadataSyncService(
+ metadata_manager=MetadataManager,
+ preview_service=self._preview_service,
+ settings=settings_service,
+ default_metadata_provider_factory=metadata_provider_factory,
+ metadata_provider_selector=get_metadata_provider,
+ )
+ self._tag_update_service = TagUpdateService(metadata_manager=MetadataManager)
+ self._download_coordinator = DownloadCoordinator(
+ ws_manager=self._ws_manager,
+ download_manager_factory=ServiceRegistry.get_download_manager,
+ )
+
+ if service is not None:
+ self.attach_service(service)
+
+ def attach_service(self, service) -> None:
+ """Attach a model service and rebuild handler dependencies."""
+ self.service = service
+ self.model_type = service.model_type
self.model_file_service = ModelFileService(service.scanner, service.model_type)
self.model_move_service = ModelMoveService(service.scanner)
- self.websocket_progress_callback = WebSocketProgressCallback()
-
- def setup_routes(self, app: web.Application, prefix: str):
- """Setup common routes for the model type
-
- Args:
- app: aiohttp application
- prefix: URL prefix (e.g., 'loras', 'checkpoints')
- """
- # Common model management routes
- app.router.add_get(f'/api/{prefix}/list', self.get_models)
- app.router.add_post(f'/api/{prefix}/delete', self.delete_model)
- app.router.add_post(f'/api/{prefix}/exclude', self.exclude_model)
- app.router.add_post(f'/api/{prefix}/fetch-civitai', self.fetch_civitai)
- app.router.add_post(f'/api/{prefix}/fetch-all-civitai', self.fetch_all_civitai)
- app.router.add_post(f'/api/{prefix}/relink-civitai', self.relink_civitai)
- app.router.add_post(f'/api/{prefix}/replace-preview', self.replace_preview)
- app.router.add_post(f'/api/{prefix}/save-metadata', self.save_metadata)
- app.router.add_post(f'/api/{prefix}/add-tags', self.add_tags)
- app.router.add_post(f'/api/{prefix}/rename', self.rename_model)
- app.router.add_post(f'/api/{prefix}/bulk-delete', self.bulk_delete_models)
- app.router.add_post(f'/api/{prefix}/verify-duplicates', self.verify_duplicates)
- app.router.add_post(f'/api/{prefix}/move_model', self.move_model)
- app.router.add_post(f'/api/{prefix}/move_models_bulk', self.move_models_bulk)
- app.router.add_get(f'/api/{prefix}/auto-organize', self.auto_organize_models)
- app.router.add_post(f'/api/{prefix}/auto-organize', self.auto_organize_models)
- app.router.add_get(f'/api/{prefix}/auto-organize-progress', self.get_auto_organize_progress)
-
- # Common query routes
- app.router.add_get(f'/api/{prefix}/top-tags', self.get_top_tags)
- app.router.add_get(f'/api/{prefix}/base-models', self.get_base_models)
- app.router.add_get(f'/api/{prefix}/scan', self.scan_models)
- app.router.add_get(f'/api/{prefix}/roots', self.get_model_roots)
- app.router.add_get(f'/api/{prefix}/folders', self.get_folders)
- app.router.add_get(f'/api/{prefix}/folder-tree', self.get_folder_tree)
- app.router.add_get(f'/api/{prefix}/unified-folder-tree', self.get_unified_folder_tree)
- app.router.add_get(f'/api/{prefix}/find-duplicates', self.find_duplicate_models)
- app.router.add_get(f'/api/{prefix}/find-filename-conflicts', self.find_filename_conflicts)
- app.router.add_get(f'/api/{prefix}/get-notes', self.get_model_notes)
- app.router.add_get(f'/api/{prefix}/preview-url', self.get_model_preview_url)
- app.router.add_get(f'/api/{prefix}/civitai-url', self.get_model_civitai_url)
- app.router.add_get(f'/api/{prefix}/metadata', self.get_model_metadata)
- app.router.add_get(f'/api/{prefix}/model-description', self.get_model_description)
-
- # Autocomplete route
- app.router.add_get(f'/api/{prefix}/relative-paths', self.get_relative_paths)
+ self.model_lifecycle_service = ModelLifecycleService(
+ scanner=service.scanner,
+ metadata_manager=MetadataManager,
+ metadata_loader=self._metadata_sync_service.load_local_metadata,
+ recipe_scanner_factory=ServiceRegistry.get_recipe_scanner,
+ )
+ self._handler_set = None
+ self._handler_mapping = None
+
+ def _ensure_handler_mapping(self) -> Mapping[str, Callable[[web.Request], web.StreamResponse]]:
+ if self._handler_mapping is None:
+ handler_set = self._create_handler_set()
+ self._handler_set = handler_set
+ self._handler_mapping = handler_set.to_route_mapping()
+ return self._handler_mapping
+
+ def _create_handler_set(self) -> ModelHandlerSet:
+ service = self._ensure_service()
+ page_view = ModelPageView(
+ template_env=self.template_env,
+ template_name=self.template_name or "",
+ service=service,
+ settings_service=self._settings,
+ server_i18n=self._server_i18n,
+ logger=logger,
+ )
+ listing = ModelListingHandler(
+ service=service,
+ parse_specific_params=self._parse_specific_params,
+ logger=logger,
+ )
+ management = ModelManagementHandler(
+ service=service,
+ logger=logger,
+ metadata_sync=self._metadata_sync_service,
+ preview_service=self._preview_service,
+ tag_update_service=self._tag_update_service,
+ lifecycle_service=self._ensure_lifecycle_service(),
+ )
+ query = ModelQueryHandler(service=service, logger=logger)
+ download_use_case = DownloadModelUseCase(download_coordinator=self._download_coordinator)
+ download = ModelDownloadHandler(
+ ws_manager=self._ws_manager,
+ logger=logger,
+ download_use_case=download_use_case,
+ download_coordinator=self._download_coordinator,
+ )
+ metadata_refresh_use_case = BulkMetadataRefreshUseCase(
+ service=service,
+ metadata_sync=self._metadata_sync_service,
+ settings_service=self._settings,
+ logger=logger,
+ )
+ civitai = ModelCivitaiHandler(
+ service=service,
+ settings_service=self._settings,
+ ws_manager=self._ws_manager,
+ logger=logger,
+ metadata_provider_factory=self._metadata_provider_factory,
+ validate_model_type=self._validate_civitai_model_type,
+ expected_model_types=self._get_expected_model_types,
+ find_model_file=self._find_model_file,
+ metadata_sync=self._metadata_sync_service,
+ metadata_refresh_use_case=metadata_refresh_use_case,
+ metadata_progress_callback=self.metadata_progress_callback,
+ )
+ move = ModelMoveHandler(move_service=self._ensure_move_service(), logger=logger)
+ auto_organize_use_case = AutoOrganizeUseCase(
+ file_service=self._ensure_file_service(),
+ lock_provider=self._ws_manager,
+ )
+ auto_organize = ModelAutoOrganizeHandler(
+ use_case=auto_organize_use_case,
+ progress_callback=self.websocket_progress_callback,
+ ws_manager=self._ws_manager,
+ logger=logger,
+ )
+ return ModelHandlerSet(
+ page_view=page_view,
+ listing=listing,
+ management=management,
+ query=query,
+ download=download,
+ civitai=civitai,
+ move=move,
+ auto_organize=auto_organize,
+ )
+
+ @property
+ def route_handlers(self) -> Mapping[str, Callable[[web.Request], web.StreamResponse]]:
+ return self._ensure_handler_mapping()
+
+ def setup_routes(self, app: web.Application, prefix: str) -> None:
+ registrar = ModelRouteRegistrar(app)
+ handler_lookup = {
+ definition.handler_name: self._make_handler_proxy(definition.handler_name)
+ for definition in COMMON_ROUTE_DEFINITIONS
+ }
+ registrar.register_common_routes(prefix, handler_lookup)
+ self.setup_specific_routes(registrar, prefix)
- # Common Download management
- app.router.add_post(f'/api/download-model', self.download_model)
- app.router.add_get(f'/api/download-model-get', self.download_model_get)
- app.router.add_get(f'/api/cancel-download-get', self.cancel_download_get)
- app.router.add_get(f'/api/download-progress/{{download_id}}', self.get_download_progress)
-
- # app.router.add_get(f'/api/civitai/versions/{{model_id}}', self.get_civitai_versions)
-
- # Add generic page route
- app.router.add_get(f'/{prefix}', self.handle_models_page)
-
- # Setup model-specific routes
- self.setup_specific_routes(app, prefix)
-
@abstractmethod
- def setup_specific_routes(self, app: web.Application, prefix: str):
- """Setup model-specific routes - to be implemented by subclasses"""
- pass
-
- async def handle_models_page(self, request: web.Request) -> web.Response:
- """
- Generic handler for model pages (e.g., /loras, /checkpoints).
- Subclasses should set self.template_env and template_name.
- """
- try:
- # Check if the scanner is initializing
- is_initializing = (
- self.service.scanner._cache is None or
- (hasattr(self.service.scanner, 'is_initializing') and callable(self.service.scanner.is_initializing) and self.service.scanner.is_initializing()) or
- (hasattr(self.service.scanner, '_is_initializing') and self.service.scanner._is_initializing)
- )
+ def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str) -> None:
+ """Setup model-specific routes."""
+ raise NotImplementedError
- template_name = getattr(self, "template_name", None)
- if not self.template_env or not template_name:
- return web.Response(text="Template environment or template name not set", status=500)
-
- # Get user's language setting
- user_language = settings.get('language', 'en')
-
- # Set server-side i18n locale
- server_i18n.set_locale(user_language)
-
- # Add i18n filter to the template environment if not already added
- if not hasattr(self.template_env, '_i18n_filter_added'):
- self.template_env.filters['t'] = server_i18n.create_template_filter()
- self.template_env._i18n_filter_added = True
-
- # Prepare template context
- template_context = {
- 'is_initializing': is_initializing,
- 'settings': settings,
- 'request': request,
- 'folders': [],
- 't': server_i18n.get_translation,
- }
-
- if not is_initializing:
- try:
- cache = await self.service.scanner.get_cached_data(force_refresh=False)
- template_context['folders'] = getattr(cache, "folders", [])
- except Exception as cache_error:
- logger.error(f"Error loading cache data: {cache_error}")
- template_context['is_initializing'] = True
-
- rendered = self.template_env.get_template(template_name).render(**template_context)
-
- return web.Response(
- text=rendered,
- content_type='text/html'
- )
- except Exception as e:
- logger.error(f"Error handling models page: {e}", exc_info=True)
- return web.Response(
- text="Error loading models page",
- status=500
- )
-
- async def get_models(self, request: web.Request) -> web.Response:
- """Get paginated model data"""
- try:
- # Parse common query parameters
- params = self._parse_common_params(request)
-
- # Get data from service
- result = await self.service.get_paginated_data(**params)
-
- # Format response items
- formatted_result = {
- 'items': [await self.service.format_response(item) for item in result['items']],
- 'total': result['total'],
- 'page': result['page'],
- 'page_size': result['page_size'],
- 'total_pages': result['total_pages']
- }
-
- return web.json_response(formatted_result)
-
- except Exception as e:
- logger.error(f"Error in get_{self.model_type}s: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- def _parse_common_params(self, request: web.Request) -> Dict:
- """Parse common query parameters"""
- # Parse basic pagination and sorting
- page = int(request.query.get('page', '1'))
- page_size = min(int(request.query.get('page_size', '20')), 100)
- sort_by = request.query.get('sort_by', 'name')
- folder = request.query.get('folder', None)
- search = request.query.get('search', None)
- fuzzy_search = request.query.get('fuzzy_search', 'false').lower() == 'true'
-
- # Parse filter arrays
- base_models = request.query.getall('base_model', [])
- tags = request.query.getall('tag', [])
- favorites_only = request.query.get('favorites_only', 'false').lower() == 'true'
-
- # Parse search options
- search_options = {
- 'filename': request.query.get('search_filename', 'true').lower() == 'true',
- 'modelname': request.query.get('search_modelname', 'true').lower() == 'true',
- 'tags': request.query.get('search_tags', 'false').lower() == 'true',
- 'creator': request.query.get('search_creator', 'false').lower() == 'true',
- 'recursive': request.query.get('recursive', 'true').lower() == 'true',
- }
-
- # Parse hash filters if provided
- hash_filters = {}
- if 'hash' in request.query:
- hash_filters['single_hash'] = request.query['hash']
- elif 'hashes' in request.query:
- try:
- hash_list = json.loads(request.query['hashes'])
- if isinstance(hash_list, list):
- hash_filters['multiple_hashes'] = hash_list
- except (json.JSONDecodeError, TypeError):
- pass
-
- return {
- 'page': page,
- 'page_size': page_size,
- 'sort_by': sort_by,
- 'folder': folder,
- 'search': search,
- 'fuzzy_search': fuzzy_search,
- 'base_models': base_models,
- 'tags': tags,
- 'search_options': search_options,
- 'hash_filters': hash_filters,
- 'favorites_only': favorites_only,
- # Add model-specific parameters
- **self._parse_specific_params(request)
- }
-
def _parse_specific_params(self, request: web.Request) -> Dict:
- """Parse model-specific parameters - to be overridden by subclasses"""
+ """Parse model-specific parameters - to be overridden by subclasses."""
return {}
-
- # Common route handlers
- async def delete_model(self, request: web.Request) -> web.Response:
- """Handle model deletion request"""
- return await ModelRouteUtils.handle_delete_model(request, self.service.scanner)
-
- async def exclude_model(self, request: web.Request) -> web.Response:
- """Handle model exclusion request"""
- return await ModelRouteUtils.handle_exclude_model(request, self.service.scanner)
-
- async def fetch_civitai(self, request: web.Request) -> web.Response:
- """Handle CivitAI metadata fetch request"""
- response = await ModelRouteUtils.handle_fetch_civitai(request, self.service.scanner)
-
- # If successful, format the metadata before returning
- if response.status == 200:
- data = json.loads(response.body.decode('utf-8'))
- if data.get("success") and data.get("metadata"):
- formatted_metadata = await self.service.format_response(data["metadata"])
- return web.json_response({
- "success": True,
- "metadata": formatted_metadata
- })
-
- return response
-
- async def relink_civitai(self, request: web.Request) -> web.Response:
- """Handle CivitAI metadata re-linking request"""
- return await ModelRouteUtils.handle_relink_civitai(request, self.service.scanner)
-
- async def replace_preview(self, request: web.Request) -> web.Response:
- """Handle preview image replacement"""
- return await ModelRouteUtils.handle_replace_preview(request, self.service.scanner)
-
- async def save_metadata(self, request: web.Request) -> web.Response:
- """Handle saving metadata updates"""
- return await ModelRouteUtils.handle_save_metadata(request, self.service.scanner)
-
- async def add_tags(self, request: web.Request) -> web.Response:
- """Handle adding tags to model metadata"""
- return await ModelRouteUtils.handle_add_tags(request, self.service.scanner)
-
- async def rename_model(self, request: web.Request) -> web.Response:
- """Handle renaming a model file and its associated files"""
- return await ModelRouteUtils.handle_rename_model(request, self.service.scanner)
-
- async def bulk_delete_models(self, request: web.Request) -> web.Response:
- """Handle bulk deletion of models"""
- return await ModelRouteUtils.handle_bulk_delete_models(request, self.service.scanner)
-
- async def verify_duplicates(self, request: web.Request) -> web.Response:
- """Handle verification of duplicate model hashes"""
- return await ModelRouteUtils.handle_verify_duplicates(request, self.service.scanner)
-
- async def get_top_tags(self, request: web.Request) -> web.Response:
- """Handle request for top tags sorted by frequency"""
- try:
- limit = int(request.query.get('limit', '20'))
- if limit < 1 or limit > 100:
- limit = 20
-
- top_tags = await self.service.get_top_tags(limit)
-
- return web.json_response({
- 'success': True,
- 'tags': top_tags
- })
-
- except Exception as e:
- logger.error(f"Error getting top tags: {str(e)}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': 'Internal server error'
- }, status=500)
-
- async def get_base_models(self, request: web.Request) -> web.Response:
- """Get base models used in models"""
- try:
- limit = int(request.query.get('limit', '20'))
- if limit < 1 or limit > 100:
- limit = 20
-
- base_models = await self.service.get_base_models(limit)
-
- return web.json_response({
- 'success': True,
- 'base_models': base_models
- })
- except Exception as e:
- logger.error(f"Error retrieving base models: {e}")
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def scan_models(self, request: web.Request) -> web.Response:
- """Force a rescan of model files"""
- try:
- full_rebuild = request.query.get('full_rebuild', 'false').lower() == 'true'
-
- await self.service.scan_models(force_refresh=True, rebuild_cache=full_rebuild)
- return web.json_response({
- "status": "success",
- "message": f"{self.model_type.capitalize()} scan completed"
- })
- except Exception as e:
- logger.error(f"Error in scan_{self.model_type}s: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def get_model_roots(self, request: web.Request) -> web.Response:
- """Return the model root directories"""
- try:
- roots = self.service.get_model_roots()
- return web.json_response({
- "success": True,
- "roots": roots
- })
- except Exception as e:
- logger.error(f"Error getting {self.model_type} roots: {e}", exc_info=True)
- return web.json_response({
- "success": False,
- "error": str(e)
- }, status=500)
-
- async def get_folders(self, request: web.Request) -> web.Response:
- """Get all folders in the cache"""
- try:
- cache = await self.service.scanner.get_cached_data()
- return web.json_response({
- 'folders': cache.folders
- })
- except Exception as e:
- logger.error(f"Error getting folders: {e}")
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_folder_tree(self, request: web.Request) -> web.Response:
- """Get hierarchical folder tree structure for download modal"""
- try:
- model_root = request.query.get('model_root')
- if not model_root:
- return web.json_response({
- 'success': False,
- 'error': 'model_root parameter is required'
- }, status=400)
-
- folder_tree = await self.service.get_folder_tree(model_root)
- return web.json_response({
- 'success': True,
- 'tree': folder_tree
- })
- except Exception as e:
- logger.error(f"Error getting folder tree: {e}")
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_unified_folder_tree(self, request: web.Request) -> web.Response:
- """Get unified folder tree across all model roots"""
- try:
- unified_tree = await self.service.get_unified_folder_tree()
- return web.json_response({
- 'success': True,
- 'tree': unified_tree
- })
- except Exception as e:
- logger.error(f"Error getting unified folder tree: {e}")
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def find_duplicate_models(self, request: web.Request) -> web.Response:
- """Find models with duplicate SHA256 hashes"""
- try:
- # Get duplicate hashes from service
- duplicates = self.service.find_duplicate_hashes()
-
- # Format the response
- result = []
- cache = await self.service.scanner.get_cached_data()
-
- for sha256, paths in duplicates.items():
- group = {
- "hash": sha256,
- "models": []
- }
- # Find matching models for each path
- for path in paths:
- model = next((m for m in cache.raw_data if m['file_path'] == path), None)
- if model:
- group["models"].append(await self.service.format_response(model))
-
- # Add the primary model too
- primary_path = self.service.get_path_by_hash(sha256)
- if primary_path and primary_path not in paths:
- primary_model = next((m for m in cache.raw_data if m['file_path'] == primary_path), None)
- if primary_model:
- group["models"].insert(0, await self.service.format_response(primary_model))
-
- if len(group["models"]) > 1: # Only include if we found multiple models
- result.append(group)
-
- return web.json_response({
- "success": True,
- "duplicates": result,
- "count": len(result)
- })
- except Exception as e:
- logger.error(f"Error finding duplicate {self.model_type}s: {e}", exc_info=True)
- return web.json_response({
- "success": False,
- "error": str(e)
- }, status=500)
-
- async def find_filename_conflicts(self, request: web.Request) -> web.Response:
- """Find models with conflicting filenames"""
- try:
- # Get duplicate filenames from service
- duplicates = self.service.find_duplicate_filenames()
-
- # Format the response
- result = []
- cache = await self.service.scanner.get_cached_data()
-
- for filename, paths in duplicates.items():
- group = {
- "filename": filename,
- "models": []
- }
- # Find matching models for each path
- for path in paths:
- model = next((m for m in cache.raw_data if m['file_path'] == path), None)
- if model:
- group["models"].append(await self.service.format_response(model))
-
- # Find the model from the main index too
- hash_val = self.service.scanner.get_hash_by_filename(filename)
- if hash_val:
- main_path = self.service.get_path_by_hash(hash_val)
- if main_path and main_path not in paths:
- main_model = next((m for m in cache.raw_data if m['file_path'] == main_path), None)
- if main_model:
- group["models"].insert(0, await self.service.format_response(main_model))
-
- if group["models"]:
- result.append(group)
-
- return web.json_response({
- "success": True,
- "conflicts": result,
- "count": len(result)
- })
- except Exception as e:
- logger.error(f"Error finding filename conflicts for {self.model_type}s: {e}", exc_info=True)
- return web.json_response({
- "success": False,
- "error": str(e)
- }, status=500)
-
- # Download management methods
- async def download_model(self, request: web.Request) -> web.Response:
- """Handle model download request"""
- return await ModelRouteUtils.handle_download_model(request)
-
- async def download_model_get(self, request: web.Request) -> web.Response:
- """Handle model download request via GET method"""
- try:
- # Extract query parameters
- model_id = request.query.get('model_id')
- if not model_id:
- return web.Response(
- status=400,
- text="Missing required parameter: Please provide 'model_id'"
- )
-
- # Get optional parameters
- model_version_id = request.query.get('model_version_id')
- download_id = request.query.get('download_id')
- use_default_paths = request.query.get('use_default_paths', 'false').lower() == 'true'
-
- # Create a data dictionary that mimics what would be received from a POST request
- data = {
- 'model_id': model_id
- }
-
- # Add optional parameters only if they are provided
- if model_version_id:
- data['model_version_id'] = model_version_id
-
- if download_id:
- data['download_id'] = download_id
-
- data['use_default_paths'] = use_default_paths
-
- # Create a mock request object with the data
- future = asyncio.get_event_loop().create_future()
- future.set_result(data)
-
- mock_request = type('MockRequest', (), {
- 'json': lambda self=None: future
- })()
-
- # Call the existing download handler
- return await ModelRouteUtils.handle_download_model(mock_request)
-
- except Exception as e:
- error_message = str(e)
- logger.error(f"Error downloading model via GET: {error_message}", exc_info=True)
- return web.Response(status=500, text=error_message)
-
- async def cancel_download_get(self, request: web.Request) -> web.Response:
- """Handle GET request for cancelling a download by download_id"""
- try:
- download_id = request.query.get('download_id')
- if not download_id:
- return web.json_response({
- 'success': False,
- 'error': 'Download ID is required'
- }, status=400)
-
- # Create a mock request with match_info for compatibility
- mock_request = type('MockRequest', (), {
- 'match_info': {'download_id': download_id}
- })()
- return await ModelRouteUtils.handle_cancel_download(mock_request)
- except Exception as e:
- logger.error(f"Error cancelling download via GET: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_download_progress(self, request: web.Request) -> web.Response:
- """Handle request for download progress by download_id"""
- try:
- # Get download_id from URL path
- download_id = request.match_info.get('download_id')
- if not download_id:
- return web.json_response({
- 'success': False,
- 'error': 'Download ID is required'
- }, status=400)
-
- progress_data = ws_manager.get_download_progress(download_id)
-
- if progress_data is None:
- return web.json_response({
- 'success': False,
- 'error': 'Download ID not found'
- }, status=404)
-
- return web.json_response({
- 'success': True,
- 'progress': progress_data.get('progress', 0)
- })
- except Exception as e:
- logger.error(f"Error getting download progress: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def fetch_all_civitai(self, request: web.Request) -> web.Response:
- """Fetch CivitAI metadata for all models in the background"""
- try:
- cache = await self.service.scanner.get_cached_data()
- total = len(cache.raw_data)
- processed = 0
- success = 0
- needs_resort = False
-
- # Prepare models to process, only those without CivitAI data or missing tags, description, or creator
- enable_metadata_archive_db = settings.get('enable_metadata_archive_db', False)
- to_process = [
- model for model in cache.raw_data
- if (
- model.get('sha256')
- and (
- not model.get('civitai')
- or not model['civitai'].get('id')
- # or not model.get('tags') # Skipping tag cause it could be empty legitimately
- or not model.get('modelDescription')
- or not (model.get('civitai') and model['civitai'].get('creator'))
- )
- and (
- (enable_metadata_archive_db)
- or (not enable_metadata_archive_db and model.get('from_civitai') is True)
- )
- )
- ]
- total_to_process = len(to_process)
-
- # Send initial progress
- await ws_manager.broadcast({
- 'status': 'started',
- 'total': total_to_process,
- 'processed': 0,
- 'success': 0
- })
-
- # Process each model
- for model in to_process:
- try:
- original_name = model.get('model_name')
- if await ModelRouteUtils.fetch_and_update_model(
- sha256=model['sha256'],
- file_path=model['file_path'],
- model_data=model,
- update_cache_func=self.service.scanner.update_single_model_cache
- ):
- success += 1
- if original_name != model.get('model_name'):
- needs_resort = True
-
- processed += 1
-
- # Send progress update
- await ws_manager.broadcast({
- 'status': 'processing',
- 'total': total_to_process,
- 'processed': processed,
- 'success': success,
- 'current_name': model.get('model_name', 'Unknown')
- })
-
- except Exception as e:
- logger.error(f"Error fetching CivitAI data for {model['file_path']}: {e}")
-
- if needs_resort:
- await cache.resort()
-
- # Send completion message
- await ws_manager.broadcast({
- 'status': 'completed',
- 'total': total_to_process,
- 'processed': processed,
- 'success': success
- })
-
- return web.json_response({
- "success": True,
- "message": f"Successfully updated {success} of {processed} processed {self.model_type}s (total: {total})"
- })
-
- except Exception as e:
- # Send error message
- await ws_manager.broadcast({
- 'status': 'error',
- 'error': str(e)
- })
- logger.error(f"Error in fetch_all_civitai for {self.model_type}s: {e}")
- return web.Response(text=str(e), status=500)
-
- async def get_civitai_versions(self, request: web.Request) -> web.Response:
- """Get available versions for a Civitai model with local availability info"""
- # This will be implemented by subclasses as they need CivitAI client access
- return web.json_response({
- "error": "Not implemented in base class"
- }, status=501)
-
- # Common model move handlers
- async def move_model(self, request: web.Request) -> web.Response:
- """Handle model move request"""
- try:
- data = await request.json()
- file_path = data.get('file_path')
- target_path = data.get('target_path')
-
- if not file_path or not target_path:
- return web.Response(text='File path and target path are required', status=400)
-
- result = await self.model_move_service.move_model(file_path, target_path)
-
- if result['success']:
- return web.json_response(result)
- else:
- return web.json_response(result, status=500)
-
- except Exception as e:
- logger.error(f"Error moving model: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
- async def move_models_bulk(self, request: web.Request) -> web.Response:
- """Handle bulk model move request"""
- try:
- data = await request.json()
- file_paths = data.get('file_paths', [])
- target_path = data.get('target_path')
-
- if not file_paths or not target_path:
- return web.Response(text='File paths and target path are required', status=400)
-
- result = await self.model_move_service.move_models_bulk(file_paths, target_path)
- return web.json_response(result)
-
- except Exception as e:
- logger.error(f"Error moving models in bulk: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
-
- async def auto_organize_models(self, request: web.Request) -> web.Response:
- """Auto-organize all models or a specific set of models based on current settings"""
- try:
- # Check if auto-organize is already running
- if ws_manager.is_auto_organize_running():
- return web.json_response({
- 'success': False,
- 'error': 'Auto-organize is already running. Please wait for it to complete.'
- }, status=409)
-
- # Acquire lock to prevent concurrent auto-organize operations
- auto_organize_lock = await ws_manager.get_auto_organize_lock()
-
- if auto_organize_lock.locked():
- return web.json_response({
- 'success': False,
- 'error': 'Auto-organize is already running. Please wait for it to complete.'
- }, status=409)
-
- # Get specific file paths from request if this is a POST with selected models
- file_paths = None
- if request.method == 'POST':
- try:
- data = await request.json()
- file_paths = data.get('file_paths')
- except Exception:
- pass # Continue with all models if no valid JSON
-
- async with auto_organize_lock:
- # Use the service layer for business logic
- result = await self.model_file_service.auto_organize_models(
- file_paths=file_paths,
- progress_callback=self.websocket_progress_callback
- )
-
- return web.json_response(result.to_dict())
-
- except Exception as e:
- logger.error(f"Error in auto_organize_models: {e}", exc_info=True)
-
- # Send error message via WebSocket
- await ws_manager.broadcast_auto_organize_progress({
- 'type': 'auto_organize_progress',
- 'status': 'error',
- 'error': str(e)
- })
-
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_auto_organize_progress(self, request: web.Request) -> web.Response:
- """Get current auto-organize progress for polling"""
- try:
- progress_data = ws_manager.get_auto_organize_progress()
-
- if progress_data is None:
- return web.json_response({
- 'success': False,
- 'error': 'No auto-organize operation in progress'
- }, status=404)
-
- return web.json_response({
- 'success': True,
- 'progress': progress_data
- })
- except Exception as e:
- logger.error(f"Error getting auto-organize progress: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_model_notes(self, request: web.Request) -> web.Response:
- """Get notes for a specific model file"""
- try:
- model_name = request.query.get('name')
- if not model_name:
- return web.Response(text=f'{self.model_type.capitalize()} file name is required', status=400)
-
- notes = await self.service.get_model_notes(model_name)
- if notes is not None:
- return web.json_response({
- 'success': True,
- 'notes': notes
- })
- else:
- return web.json_response({
- 'success': False,
- 'error': f'{self.model_type.capitalize()} not found in cache'
- }, status=404)
-
- except Exception as e:
- logger.error(f"Error getting {self.model_type} notes: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_model_preview_url(self, request: web.Request) -> web.Response:
- """Get the static preview URL for a model file"""
- try:
- model_name = request.query.get('name')
- if not model_name:
- return web.Response(text=f'{self.model_type.capitalize()} file name is required', status=400)
-
- preview_url = await self.service.get_model_preview_url(model_name)
- if preview_url:
- return web.json_response({
- 'success': True,
- 'preview_url': preview_url
- })
- else:
- return web.json_response({
- 'success': False,
- 'error': f'No preview URL found for the specified {self.model_type}'
- }, status=404)
-
- except Exception as e:
- logger.error(f"Error getting {self.model_type} preview URL: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_model_civitai_url(self, request: web.Request) -> web.Response:
- """Get the Civitai URL for a model file"""
- try:
- model_name = request.query.get('name')
- if not model_name:
- return web.Response(text=f'{self.model_type.capitalize()} file name is required', status=400)
-
- result = await self.service.get_model_civitai_url(model_name)
- if result['civitai_url']:
- return web.json_response({
- 'success': True,
- **result
- })
- else:
- return web.json_response({
- 'success': False,
- 'error': f'No Civitai data found for the specified {self.model_type}'
- }, status=404)
-
- except Exception as e:
- logger.error(f"Error getting {self.model_type} Civitai URL: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_model_metadata(self, request: web.Request) -> web.Response:
- """Get filtered CivitAI metadata for a model by file path"""
- try:
- file_path = request.query.get('file_path')
- if not file_path:
- return web.Response(text='File path is required', status=400)
-
- metadata = await self.service.get_model_metadata(file_path)
- if metadata is not None:
- return web.json_response({
- 'success': True,
- 'metadata': metadata
- })
- else:
- return web.json_response({
- 'success': False,
- 'error': f'{self.model_type.capitalize()} not found or no CivitAI metadata available'
- }, status=404)
-
- except Exception as e:
- logger.error(f"Error getting {self.model_type} metadata: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_model_description(self, request: web.Request) -> web.Response:
- """Get model description by file path"""
- try:
- file_path = request.query.get('file_path')
- if not file_path:
- return web.Response(text='File path is required', status=400)
-
- description = await self.service.get_model_description(file_path)
- if description is not None:
- return web.json_response({
- 'success': True,
- 'description': description
- })
- else:
- return web.json_response({
- 'success': False,
- 'error': f'{self.model_type.capitalize()} not found or no description available'
- }, status=404)
-
- except Exception as e:
- logger.error(f"Error getting {self.model_type} description: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_relative_paths(self, request: web.Request) -> web.Response:
- """Get model relative file paths for autocomplete functionality"""
- try:
- search = request.query.get('search', '').strip()
- limit = min(int(request.query.get('limit', '15')), 50) # Max 50 items
-
- matching_paths = await self.service.search_relative_paths(search, limit)
-
- return web.json_response({
- 'success': True,
- 'relative_paths': matching_paths
- })
-
- except Exception as e:
- logger.error(f"Error getting relative paths for autocomplete: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
\ No newline at end of file
+ def _validate_civitai_model_type(self, model_type: str) -> bool:
+ """Validate CivitAI model type - to be overridden by subclasses."""
+ return True
+
+ def _get_expected_model_types(self) -> str:
+ """Get expected model types string for error messages - to be overridden by subclasses."""
+ return "any model type"
+
+ def _find_model_file(self, files):
+ """Find the appropriate model file from the files list - can be overridden by subclasses."""
+ return next((file for file in files if file.get("type") == "Model" and file.get("primary") is True), None)
+
+ def get_handler(self, name: str) -> Callable[[web.Request], web.StreamResponse]:
+ """Expose handlers for subclasses or tests."""
+ return self._ensure_handler_mapping()[name]
+
+ def _ensure_service(self):
+ if self.service is None:
+ raise RuntimeError("Model service has not been attached")
+ return self.service
+
+ def _ensure_file_service(self) -> ModelFileService:
+ if self.model_file_service is None:
+ service = self._ensure_service()
+ self.model_file_service = ModelFileService(service.scanner, service.model_type)
+ return self.model_file_service
+
+ def _ensure_move_service(self) -> ModelMoveService:
+ if self.model_move_service is None:
+ service = self._ensure_service()
+ self.model_move_service = ModelMoveService(service.scanner)
+ return self.model_move_service
+
+ def _ensure_lifecycle_service(self) -> ModelLifecycleService:
+ if self.model_lifecycle_service is None:
+ service = self._ensure_service()
+ self.model_lifecycle_service = ModelLifecycleService(
+ scanner=service.scanner,
+ metadata_manager=MetadataManager,
+ metadata_loader=self._metadata_sync_service.load_local_metadata,
+ recipe_scanner_factory=ServiceRegistry.get_recipe_scanner,
+ )
+ return self.model_lifecycle_service
+
+ def _make_handler_proxy(self, name: str) -> Callable[[web.Request], web.StreamResponse]:
+ async def proxy(request: web.Request) -> web.StreamResponse:
+ try:
+ handler = self.get_handler(name)
+ except RuntimeError:
+ return web.json_response({"success": False, "error": "Service not ready"}, status=503)
+ return await handler(request)
+
+ return proxy
+
diff --git a/py/routes/base_recipe_routes.py b/py/routes/base_recipe_routes.py
new file mode 100644
index 00000000..4447bb7b
--- /dev/null
+++ b/py/routes/base_recipe_routes.py
@@ -0,0 +1,217 @@
+"""Base infrastructure shared across recipe routes."""
+from __future__ import annotations
+
+import logging
+import os
+from typing import Callable, Mapping
+
+import jinja2
+from aiohttp import web
+
+from ..config import config
+from ..recipes import RecipeParserFactory
+from ..services.downloader import get_downloader
+from ..services.recipes import (
+ RecipeAnalysisService,
+ RecipePersistenceService,
+ RecipeSharingService,
+)
+from ..services.server_i18n import server_i18n
+from ..services.service_registry import ServiceRegistry
+from ..services.settings_manager import settings
+from ..utils.constants import CARD_PREVIEW_WIDTH
+from ..utils.exif_utils import ExifUtils
+from .handlers.recipe_handlers import (
+ RecipeAnalysisHandler,
+ RecipeHandlerSet,
+ RecipeListingHandler,
+ RecipeManagementHandler,
+ RecipePageView,
+ RecipeQueryHandler,
+ RecipeSharingHandler,
+)
+from .recipe_route_registrar import ROUTE_DEFINITIONS
+
+logger = logging.getLogger(__name__)
+
+
+class BaseRecipeRoutes:
+ """Common dependency and startup wiring for recipe routes."""
+
+ _HANDLER_NAMES: tuple[str, ...] = tuple(
+ definition.handler_name for definition in ROUTE_DEFINITIONS
+ )
+
+ template_name: str = "recipes.html"
+
+ def __init__(self) -> None:
+ self.recipe_scanner = None
+ self.lora_scanner = None
+ self.civitai_client = None
+ self.settings = settings
+ self.server_i18n = server_i18n
+ self.template_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(config.templates_path),
+ autoescape=True,
+ )
+
+ self._i18n_registered = False
+ self._startup_hooks_registered = False
+ self._handler_set: RecipeHandlerSet | None = None
+ self._handler_mapping: dict[str, Callable] | None = None
+
+ async def attach_dependencies(self, app: web.Application | None = None) -> None:
+ """Resolve shared services from the registry."""
+
+ await self._ensure_services()
+ self._ensure_i18n_filter()
+
+ async def ensure_dependencies_ready(self) -> None:
+ """Ensure dependencies are available for request handlers."""
+
+ if self.recipe_scanner is None or self.civitai_client is None:
+ await self.attach_dependencies()
+
+ def register_startup_hooks(self, app: web.Application) -> None:
+ """Register startup hooks once for dependency wiring."""
+
+ if self._startup_hooks_registered:
+ return
+
+ app.on_startup.append(self.attach_dependencies)
+ app.on_startup.append(self.prewarm_cache)
+ self._startup_hooks_registered = True
+
+ async def prewarm_cache(self, app: web.Application | None = None) -> None:
+ """Pre-load recipe and LoRA caches on startup."""
+
+ try:
+ await self.attach_dependencies(app)
+
+ if self.lora_scanner is not None:
+ await self.lora_scanner.get_cached_data()
+ hash_index = getattr(self.lora_scanner, "_hash_index", None)
+ if hash_index is not None and hasattr(hash_index, "_hash_to_path"):
+ _ = len(hash_index._hash_to_path)
+
+ if self.recipe_scanner is not None:
+ await self.recipe_scanner.get_cached_data(force_refresh=True)
+ except Exception as exc:
+ logger.error("Error pre-warming recipe cache: %s", exc, exc_info=True)
+
+ def to_route_mapping(self) -> Mapping[str, Callable]:
+ """Return a mapping of handler name to coroutine for registrar binding."""
+
+ if self._handler_mapping is None:
+ handler_set = self._create_handler_set()
+ self._handler_set = handler_set
+ self._handler_mapping = handler_set.to_route_mapping()
+ return self._handler_mapping
+
+ # Internal helpers -------------------------------------------------
+
+ async def _ensure_services(self) -> None:
+ if self.recipe_scanner is None:
+ self.recipe_scanner = await ServiceRegistry.get_recipe_scanner()
+ self.lora_scanner = getattr(self.recipe_scanner, "_lora_scanner", None)
+
+ if self.civitai_client is None:
+ self.civitai_client = await ServiceRegistry.get_civitai_client()
+
+ def _ensure_i18n_filter(self) -> None:
+ if not self._i18n_registered:
+ self.template_env.filters["t"] = self.server_i18n.create_template_filter()
+ self._i18n_registered = True
+
+ def get_handler_owner(self):
+ """Return the object supplying bound handler coroutines."""
+
+ if self._handler_set is None:
+ self._handler_set = self._create_handler_set()
+ return self._handler_set
+
+ def _create_handler_set(self) -> RecipeHandlerSet:
+ recipe_scanner_getter = lambda: self.recipe_scanner
+ civitai_client_getter = lambda: self.civitai_client
+
+ standalone_mode = os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
+ if not standalone_mode:
+ from ..metadata_collector import get_metadata # type: ignore[import-not-found]
+ from ..metadata_collector.metadata_processor import ( # type: ignore[import-not-found]
+ MetadataProcessor,
+ )
+ from ..metadata_collector.metadata_registry import ( # type: ignore[import-not-found]
+ MetadataRegistry,
+ )
+ else: # pragma: no cover - optional dependency path
+ get_metadata = None # type: ignore[assignment]
+ MetadataProcessor = None # type: ignore[assignment]
+ MetadataRegistry = None # type: ignore[assignment]
+
+ analysis_service = RecipeAnalysisService(
+ exif_utils=ExifUtils,
+ recipe_parser_factory=RecipeParserFactory,
+ downloader_factory=get_downloader,
+ metadata_collector=get_metadata,
+ metadata_processor_cls=MetadataProcessor,
+ metadata_registry_cls=MetadataRegistry,
+ standalone_mode=standalone_mode,
+ logger=logger,
+ )
+ persistence_service = RecipePersistenceService(
+ exif_utils=ExifUtils,
+ card_preview_width=CARD_PREVIEW_WIDTH,
+ logger=logger,
+ )
+ sharing_service = RecipeSharingService(logger=logger)
+
+ page_view = RecipePageView(
+ ensure_dependencies_ready=self.ensure_dependencies_ready,
+ settings_service=self.settings,
+ server_i18n=self.server_i18n,
+ template_env=self.template_env,
+ template_name=self.template_name,
+ recipe_scanner_getter=recipe_scanner_getter,
+ logger=logger,
+ )
+ listing = RecipeListingHandler(
+ ensure_dependencies_ready=self.ensure_dependencies_ready,
+ recipe_scanner_getter=recipe_scanner_getter,
+ logger=logger,
+ )
+ query = RecipeQueryHandler(
+ ensure_dependencies_ready=self.ensure_dependencies_ready,
+ recipe_scanner_getter=recipe_scanner_getter,
+ format_recipe_file_url=listing.format_recipe_file_url,
+ logger=logger,
+ )
+ management = RecipeManagementHandler(
+ ensure_dependencies_ready=self.ensure_dependencies_ready,
+ recipe_scanner_getter=recipe_scanner_getter,
+ logger=logger,
+ persistence_service=persistence_service,
+ analysis_service=analysis_service,
+ )
+ analysis = RecipeAnalysisHandler(
+ ensure_dependencies_ready=self.ensure_dependencies_ready,
+ recipe_scanner_getter=recipe_scanner_getter,
+ civitai_client_getter=civitai_client_getter,
+ logger=logger,
+ analysis_service=analysis_service,
+ )
+ sharing = RecipeSharingHandler(
+ ensure_dependencies_ready=self.ensure_dependencies_ready,
+ recipe_scanner_getter=recipe_scanner_getter,
+ logger=logger,
+ sharing_service=sharing_service,
+ )
+
+ return RecipeHandlerSet(
+ page_view=page_view,
+ listing=listing,
+ query=query,
+ management=management,
+ analysis=analysis,
+ sharing=sharing,
+ )
+
diff --git a/py/routes/checkpoint_routes.py b/py/routes/checkpoint_routes.py
index a0f6a027..ad4c538a 100644
--- a/py/routes/checkpoint_routes.py
+++ b/py/routes/checkpoint_routes.py
@@ -2,9 +2,9 @@ import logging
from aiohttp import web
from .base_model_routes import BaseModelRoutes
+from .model_route_registrar import ModelRouteRegistrar
from ..services.checkpoint_service import CheckpointService
from ..services.service_registry import ServiceRegistry
-from ..services.metadata_service import get_default_metadata_provider
from ..config import config
logger = logging.getLogger(__name__)
@@ -14,8 +14,7 @@ class CheckpointRoutes(BaseModelRoutes):
def __init__(self):
"""Initialize Checkpoint routes with Checkpoint service"""
- # Service will be initialized later via setup_routes
- self.service = None
+ super().__init__()
self.template_name = "checkpoints.html"
async def initialize_services(self):
@@ -23,8 +22,8 @@ class CheckpointRoutes(BaseModelRoutes):
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
self.service = CheckpointService(checkpoint_scanner)
- # Initialize parent with the service
- super().__init__(self.service)
+ # Attach service dependencies
+ self.attach_service(self.service)
def setup_routes(self, app: web.Application):
"""Setup Checkpoint routes"""
@@ -34,17 +33,22 @@ class CheckpointRoutes(BaseModelRoutes):
# Setup common routes with 'checkpoints' prefix (includes page route)
super().setup_routes(app, 'checkpoints')
- def setup_specific_routes(self, app: web.Application, prefix: str):
+ def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str):
"""Setup Checkpoint-specific routes"""
- # Checkpoint-specific CivitAI integration
- app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_checkpoint)
-
# Checkpoint info by name
- app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_checkpoint_info)
-
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/info/{name}', prefix, self.get_checkpoint_info)
+
# Checkpoint roots and Unet roots
- app.router.add_get(f'/api/{prefix}/checkpoints_roots', self.get_checkpoints_roots)
- app.router.add_get(f'/api/{prefix}/unet_roots', self.get_unet_roots)
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/checkpoints_roots', prefix, self.get_checkpoints_roots)
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/unet_roots', prefix, self.get_unet_roots)
+
+ def _validate_civitai_model_type(self, model_type: str) -> bool:
+ """Validate CivitAI model type for Checkpoint"""
+ return model_type.lower() == 'checkpoint'
+
+ def _get_expected_model_types(self) -> str:
+ """Get expected model types string for error messages"""
+ return "Checkpoint"
async def get_checkpoint_info(self, request: web.Request) -> web.Response:
"""Get detailed information for a specific checkpoint by name"""
@@ -61,54 +65,6 @@ class CheckpointRoutes(BaseModelRoutes):
logger.error(f"Error in get_checkpoint_info: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
- async def get_civitai_versions_checkpoint(self, request: web.Request) -> web.Response:
- """Get available versions for a Civitai checkpoint model with local availability info"""
- try:
- model_id = request.match_info['model_id']
- metadata_provider = await get_default_metadata_provider()
- response = await metadata_provider.get_model_versions(model_id)
- if not response or not response.get('modelVersions'):
- return web.Response(status=404, text="Model not found")
-
- versions = response.get('modelVersions', [])
- model_type = response.get('type', '')
-
- # Check model type - should be Checkpoint
- if model_type.lower() != 'checkpoint':
- return web.json_response({
- 'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
- }, status=400)
-
- # Check local availability for each version
- for version in versions:
- # Find the primary model file (type="Model" and primary=true) in the files list
- model_file = next((file for file in version.get('files', [])
- if file.get('type') == 'Model' and file.get('primary') == True), None)
-
- # If no primary file found, try to find any model file
- if not model_file:
- model_file = next((file for file in version.get('files', [])
- if file.get('type') == 'Model'), None)
-
- if model_file:
- sha256 = model_file.get('hashes', {}).get('SHA256')
- if sha256:
- # Set existsLocally and localPath at the version level
- version['existsLocally'] = self.service.has_hash(sha256)
- if version['existsLocally']:
- version['localPath'] = self.service.get_path_by_hash(sha256)
-
- # Also set the model file size at the version level for easier access
- version['modelSizeKB'] = model_file.get('sizeKB')
- else:
- # No model file found in this version
- version['existsLocally'] = False
-
- return web.json_response(versions)
- except Exception as e:
- logger.error(f"Error fetching checkpoint model versions: {e}")
- return web.Response(status=500, text=str(e))
-
async def get_checkpoints_roots(self, request: web.Request) -> web.Response:
"""Return the list of checkpoint roots from config"""
try:
diff --git a/py/routes/embedding_routes.py b/py/routes/embedding_routes.py
index ab028666..d7d361ce 100644
--- a/py/routes/embedding_routes.py
+++ b/py/routes/embedding_routes.py
@@ -2,9 +2,9 @@ import logging
from aiohttp import web
from .base_model_routes import BaseModelRoutes
+from .model_route_registrar import ModelRouteRegistrar
from ..services.embedding_service import EmbeddingService
from ..services.service_registry import ServiceRegistry
-from ..services.metadata_service import get_default_metadata_provider
logger = logging.getLogger(__name__)
@@ -13,8 +13,7 @@ class EmbeddingRoutes(BaseModelRoutes):
def __init__(self):
"""Initialize Embedding routes with Embedding service"""
- # Service will be initialized later via setup_routes
- self.service = None
+ super().__init__()
self.template_name = "embeddings.html"
async def initialize_services(self):
@@ -22,8 +21,8 @@ class EmbeddingRoutes(BaseModelRoutes):
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
self.service = EmbeddingService(embedding_scanner)
- # Initialize parent with the service
- super().__init__(self.service)
+ # Attach service dependencies
+ self.attach_service(self.service)
def setup_routes(self, app: web.Application):
"""Setup Embedding routes"""
@@ -33,13 +32,18 @@ class EmbeddingRoutes(BaseModelRoutes):
# Setup common routes with 'embeddings' prefix (includes page route)
super().setup_routes(app, 'embeddings')
- def setup_specific_routes(self, app: web.Application, prefix: str):
+ def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str):
"""Setup Embedding-specific routes"""
- # Embedding-specific CivitAI integration
- app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_embedding)
-
# Embedding info by name
- app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_embedding_info)
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/info/{name}', prefix, self.get_embedding_info)
+
+ def _validate_civitai_model_type(self, model_type: str) -> bool:
+ """Validate CivitAI model type for Embedding"""
+ return model_type.lower() == 'textualinversion'
+
+ def _get_expected_model_types(self) -> str:
+ """Get expected model types string for error messages"""
+ return "TextualInversion"
async def get_embedding_info(self, request: web.Request) -> web.Response:
"""Get detailed information for a specific embedding by name"""
@@ -55,51 +59,3 @@ class EmbeddingRoutes(BaseModelRoutes):
except Exception as e:
logger.error(f"Error in get_embedding_info: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
-
- async def get_civitai_versions_embedding(self, request: web.Request) -> web.Response:
- """Get available versions for a Civitai embedding model with local availability info"""
- try:
- model_id = request.match_info['model_id']
- metadata_provider = await get_default_metadata_provider()
- response = await metadata_provider.get_model_versions(model_id)
- if not response or not response.get('modelVersions'):
- return web.Response(status=404, text="Model not found")
-
- versions = response.get('modelVersions', [])
- model_type = response.get('type', '')
-
- # Check model type - should be TextualInversion (Embedding)
- if model_type.lower() not in ['textualinversion', 'embedding']:
- return web.json_response({
- 'error': f"Model type mismatch. Expected TextualInversion/Embedding, got {model_type}"
- }, status=400)
-
- # Check local availability for each version
- for version in versions:
- # Find the primary model file (type="Model" and primary=true) in the files list
- model_file = next((file for file in version.get('files', [])
- if file.get('type') == 'Model' and file.get('primary') == True), None)
-
- # If no primary file found, try to find any model file
- if not model_file:
- model_file = next((file for file in version.get('files', [])
- if file.get('type') == 'Model'), None)
-
- if model_file:
- sha256 = model_file.get('hashes', {}).get('SHA256')
- if sha256:
- # Set existsLocally and localPath at the version level
- version['existsLocally'] = self.service.has_hash(sha256)
- if version['existsLocally']:
- version['localPath'] = self.service.get_path_by_hash(sha256)
-
- # Also set the model file size at the version level for easier access
- version['modelSizeKB'] = model_file.get('sizeKB')
- else:
- # No model file found in this version
- version['existsLocally'] = False
-
- return web.json_response(versions)
- except Exception as e:
- logger.error(f"Error fetching embedding model versions: {e}")
- return web.Response(status=500, text=str(e))
diff --git a/py/routes/example_images_route_registrar.py b/py/routes/example_images_route_registrar.py
new file mode 100644
index 00000000..aa12c3b1
--- /dev/null
+++ b/py/routes/example_images_route_registrar.py
@@ -0,0 +1,62 @@
+"""Route registrar for example image endpoints."""
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Callable, Iterable, Mapping
+
+from aiohttp import web
+
+
+@dataclass(frozen=True)
+class RouteDefinition:
+ """Declarative configuration for a HTTP route."""
+
+ method: str
+ path: str
+ handler_name: str
+
+
+ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
+ RouteDefinition("POST", "/api/lm/download-example-images", "download_example_images"),
+ RouteDefinition("POST", "/api/lm/import-example-images", "import_example_images"),
+ RouteDefinition("GET", "/api/lm/example-images-status", "get_example_images_status"),
+ RouteDefinition("POST", "/api/lm/pause-example-images", "pause_example_images"),
+ RouteDefinition("POST", "/api/lm/resume-example-images", "resume_example_images"),
+ RouteDefinition("POST", "/api/lm/open-example-images-folder", "open_example_images_folder"),
+ RouteDefinition("GET", "/api/lm/example-image-files", "get_example_image_files"),
+ RouteDefinition("GET", "/api/lm/has-example-images", "has_example_images"),
+ RouteDefinition("POST", "/api/lm/delete-example-image", "delete_example_image"),
+ RouteDefinition("POST", "/api/lm/force-download-example-images", "force_download_example_images"),
+ RouteDefinition("POST", "/api/lm/cleanup-example-image-folders", "cleanup_example_image_folders"),
+)
+
+
+class ExampleImagesRouteRegistrar:
+ """Bind declarative example image routes to an aiohttp router."""
+
+ _METHOD_MAP = {
+ "GET": "add_get",
+ "POST": "add_post",
+ "PUT": "add_put",
+ "DELETE": "add_delete",
+ }
+
+ def __init__(self, app: web.Application) -> None:
+ self._app = app
+
+ def register_routes(
+ self,
+ handler_lookup: Mapping[str, Callable[[web.Request], object]],
+ *,
+ definitions: Iterable[RouteDefinition] = ROUTE_DEFINITIONS,
+ ) -> None:
+ """Register each route definition using the supplied handlers."""
+
+ for definition in definitions:
+ handler = handler_lookup[definition.handler_name]
+ self._bind_route(definition.method, definition.path, handler)
+
+ def _bind_route(self, method: str, path: str, handler: Callable[[web.Request], object]) -> None:
+ add_method_name = self._METHOD_MAP[method.upper()]
+ add_method = getattr(self._app.router, add_method_name)
+ add_method(path, handler)
diff --git a/py/routes/example_images_routes.py b/py/routes/example_images_routes.py
index 9f20b470..aed4e0fd 100644
--- a/py/routes/example_images_routes.py
+++ b/py/routes/example_images_routes.py
@@ -1,74 +1,88 @@
+from __future__ import annotations
+
import logging
-from ..utils.example_images_download_manager import DownloadManager
-from ..utils.example_images_processor import ExampleImagesProcessor
+from typing import Callable, Mapping
+
+from aiohttp import web
+
+from .example_images_route_registrar import ExampleImagesRouteRegistrar
+from .handlers.example_images_handlers import (
+ ExampleImagesDownloadHandler,
+ ExampleImagesFileHandler,
+ ExampleImagesHandlerSet,
+ ExampleImagesManagementHandler,
+)
+from ..services.use_cases.example_images import (
+ DownloadExampleImagesUseCase,
+ ImportExampleImagesUseCase,
+)
+from ..utils.example_images_download_manager import (
+ DownloadManager,
+ get_default_download_manager,
+)
from ..utils.example_images_file_manager import ExampleImagesFileManager
-from ..services.websocket_manager import ws_manager
+from ..utils.example_images_processor import ExampleImagesProcessor
+from ..services.example_images_cleanup_service import ExampleImagesCleanupService
logger = logging.getLogger(__name__)
+
class ExampleImagesRoutes:
- """Routes for example images related functionality"""
-
- @staticmethod
- def setup_routes(app):
- """Register example images routes"""
- app.router.add_post('/api/download-example-images', ExampleImagesRoutes.download_example_images)
- app.router.add_post('/api/import-example-images', ExampleImagesRoutes.import_example_images)
- app.router.add_get('/api/example-images-status', ExampleImagesRoutes.get_example_images_status)
- app.router.add_post('/api/pause-example-images', ExampleImagesRoutes.pause_example_images)
- app.router.add_post('/api/resume-example-images', ExampleImagesRoutes.resume_example_images)
- app.router.add_post('/api/open-example-images-folder', ExampleImagesRoutes.open_example_images_folder)
- app.router.add_get('/api/example-image-files', ExampleImagesRoutes.get_example_image_files)
- app.router.add_get('/api/has-example-images', ExampleImagesRoutes.has_example_images)
- app.router.add_post('/api/delete-example-image', ExampleImagesRoutes.delete_example_image)
- app.router.add_post('/api/force-download-example-images', ExampleImagesRoutes.force_download_example_images)
+ """Route controller for example image endpoints."""
- @staticmethod
- async def download_example_images(request):
- """Download example images for models from Civitai"""
- return await DownloadManager.start_download(request)
+ def __init__(
+ self,
+ *,
+ ws_manager,
+ download_manager: DownloadManager | None = None,
+ processor=ExampleImagesProcessor,
+ file_manager=ExampleImagesFileManager,
+ cleanup_service: ExampleImagesCleanupService | None = None,
+ ) -> None:
+ if ws_manager is None:
+ raise ValueError("ws_manager is required")
+ self._download_manager = download_manager or get_default_download_manager(ws_manager)
+ self._processor = processor
+ self._file_manager = file_manager
+ self._cleanup_service = cleanup_service or ExampleImagesCleanupService()
+ self._handler_set: ExampleImagesHandlerSet | None = None
+ self._handler_mapping: Mapping[str, Callable[[web.Request], web.StreamResponse]] | None = None
- @staticmethod
- async def get_example_images_status(request):
- """Get the current status of example images download"""
- return await DownloadManager.get_status(request)
+ @classmethod
+ def setup_routes(cls, app: web.Application, *, ws_manager) -> None:
+ """Register routes on the given aiohttp application using default wiring."""
- @staticmethod
- async def pause_example_images(request):
- """Pause the example images download"""
- return await DownloadManager.pause_download(request)
+ controller = cls(ws_manager=ws_manager)
+ controller.register(app)
- @staticmethod
- async def resume_example_images(request):
- """Resume the example images download"""
- return await DownloadManager.resume_download(request)
-
- @staticmethod
- async def open_example_images_folder(request):
- """Open the example images folder for a specific model"""
- return await ExampleImagesFileManager.open_folder(request)
+ def register(self, app: web.Application) -> None:
+ """Bind the controller's handlers to the aiohttp router."""
- @staticmethod
- async def get_example_image_files(request):
- """Get list of example image files for a specific model"""
- return await ExampleImagesFileManager.get_files(request)
+ registrar = ExampleImagesRouteRegistrar(app)
+ registrar.register_routes(self.to_route_mapping())
- @staticmethod
- async def import_example_images(request):
- """Import local example images for a model"""
- return await ExampleImagesProcessor.import_images(request)
-
- @staticmethod
- async def has_example_images(request):
- """Check if example images folder exists and is not empty for a model"""
- return await ExampleImagesFileManager.has_images(request)
+ def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], web.StreamResponse]]:
+ """Return the registrar-compatible mapping of handler names to callables."""
- @staticmethod
- async def delete_example_image(request):
- """Delete a custom example image for a model"""
- return await ExampleImagesProcessor.delete_custom_image(request)
+ if self._handler_mapping is None:
+ handler_set = self._build_handler_set()
+ self._handler_set = handler_set
+ self._handler_mapping = handler_set.to_route_mapping()
+ return self._handler_mapping
- @staticmethod
- async def force_download_example_images(request):
- """Force download example images for specific models"""
- return await DownloadManager.start_force_download(request)
\ No newline at end of file
+ def _build_handler_set(self) -> ExampleImagesHandlerSet:
+ logger.debug("Building ExampleImagesHandlerSet with %s, %s, %s", self._download_manager, self._processor, self._file_manager)
+ download_use_case = DownloadExampleImagesUseCase(download_manager=self._download_manager)
+ download_handler = ExampleImagesDownloadHandler(download_use_case, self._download_manager)
+ import_use_case = ImportExampleImagesUseCase(processor=self._processor)
+ management_handler = ExampleImagesManagementHandler(
+ import_use_case,
+ self._processor,
+ self._cleanup_service,
+ )
+ file_handler = ExampleImagesFileHandler(self._file_manager)
+ return ExampleImagesHandlerSet(
+ download=download_handler,
+ management=management_handler,
+ files=file_handler,
+ )
diff --git a/py/routes/handlers/example_images_handlers.py b/py/routes/handlers/example_images_handlers.py
new file mode 100644
index 00000000..111013e2
--- /dev/null
+++ b/py/routes/handlers/example_images_handlers.py
@@ -0,0 +1,159 @@
+"""Handler set for example image routes."""
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Callable, Mapping
+
+from aiohttp import web
+
+from ...services.use_cases.example_images import (
+ DownloadExampleImagesConfigurationError,
+ DownloadExampleImagesInProgressError,
+ DownloadExampleImagesUseCase,
+ ImportExampleImagesUseCase,
+ ImportExampleImagesValidationError,
+)
+from ...utils.example_images_download_manager import (
+ DownloadConfigurationError,
+ DownloadInProgressError,
+ DownloadNotRunningError,
+ ExampleImagesDownloadError,
+)
+from ...utils.example_images_processor import ExampleImagesImportError
+
+
+class ExampleImagesDownloadHandler:
+ """HTTP adapters for download-related example image endpoints."""
+
+ def __init__(
+ self,
+ download_use_case: DownloadExampleImagesUseCase,
+ download_manager,
+ ) -> None:
+ self._download_use_case = download_use_case
+ self._download_manager = download_manager
+
+ async def download_example_images(self, request: web.Request) -> web.StreamResponse:
+ try:
+ payload = await request.json()
+ result = await self._download_use_case.execute(payload)
+ return web.json_response(result)
+ except DownloadExampleImagesInProgressError as exc:
+ response = {
+ 'success': False,
+ 'error': str(exc),
+ 'status': exc.progress,
+ }
+ return web.json_response(response, status=400)
+ except DownloadExampleImagesConfigurationError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=400)
+ except ExampleImagesDownloadError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=500)
+
+ async def get_example_images_status(self, request: web.Request) -> web.StreamResponse:
+ result = await self._download_manager.get_status(request)
+ return web.json_response(result)
+
+ async def pause_example_images(self, request: web.Request) -> web.StreamResponse:
+ try:
+ result = await self._download_manager.pause_download(request)
+ return web.json_response(result)
+ except DownloadNotRunningError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=400)
+
+ async def resume_example_images(self, request: web.Request) -> web.StreamResponse:
+ try:
+ result = await self._download_manager.resume_download(request)
+ return web.json_response(result)
+ except DownloadNotRunningError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=400)
+
+ async def force_download_example_images(self, request: web.Request) -> web.StreamResponse:
+ try:
+ payload = await request.json()
+ result = await self._download_manager.start_force_download(payload)
+ return web.json_response(result)
+ except DownloadInProgressError as exc:
+ response = {
+ 'success': False,
+ 'error': str(exc),
+ 'status': exc.progress_snapshot,
+ }
+ return web.json_response(response, status=400)
+ except DownloadConfigurationError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=400)
+ except ExampleImagesDownloadError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=500)
+
+
+class ExampleImagesManagementHandler:
+ """HTTP adapters for import/delete endpoints."""
+
+ def __init__(self, import_use_case: ImportExampleImagesUseCase, processor, cleanup_service) -> None:
+ self._import_use_case = import_use_case
+ self._processor = processor
+ self._cleanup_service = cleanup_service
+
+ async def import_example_images(self, request: web.Request) -> web.StreamResponse:
+ try:
+ result = await self._import_use_case.execute(request)
+ return web.json_response(result)
+ except ImportExampleImagesValidationError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=400)
+ except ExampleImagesImportError as exc:
+ return web.json_response({'success': False, 'error': str(exc)}, status=500)
+
+ async def delete_example_image(self, request: web.Request) -> web.StreamResponse:
+ return await self._processor.delete_custom_image(request)
+
+ async def cleanup_example_image_folders(self, request: web.Request) -> web.StreamResponse:
+ result = await self._cleanup_service.cleanup_example_image_folders()
+
+ if result.get('success') or result.get('partial_success'):
+ return web.json_response(result, status=200)
+
+ error_code = result.get('error_code')
+ status = 400 if error_code in {'path_not_configured', 'path_not_found'} else 500
+ return web.json_response(result, status=status)
+
+
+class ExampleImagesFileHandler:
+ """HTTP adapters for filesystem-centric endpoints."""
+
+ def __init__(self, file_manager) -> None:
+ self._file_manager = file_manager
+
+ async def open_example_images_folder(self, request: web.Request) -> web.StreamResponse:
+ return await self._file_manager.open_folder(request)
+
+ async def get_example_image_files(self, request: web.Request) -> web.StreamResponse:
+ return await self._file_manager.get_files(request)
+
+ async def has_example_images(self, request: web.Request) -> web.StreamResponse:
+ return await self._file_manager.has_images(request)
+
+
+@dataclass(frozen=True)
+class ExampleImagesHandlerSet:
+ """Aggregate of handlers exposed to the registrar."""
+
+ download: ExampleImagesDownloadHandler
+ management: ExampleImagesManagementHandler
+ files: ExampleImagesFileHandler
+
+ def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], web.StreamResponse]]:
+ """Flatten handler methods into the registrar mapping."""
+
+ return {
+ "download_example_images": self.download.download_example_images,
+ "get_example_images_status": self.download.get_example_images_status,
+ "pause_example_images": self.download.pause_example_images,
+ "resume_example_images": self.download.resume_example_images,
+ "force_download_example_images": self.download.force_download_example_images,
+ "import_example_images": self.management.import_example_images,
+ "delete_example_image": self.management.delete_example_image,
+ "cleanup_example_image_folders": self.management.cleanup_example_image_folders,
+ "open_example_images_folder": self.files.open_example_images_folder,
+ "get_example_image_files": self.files.get_example_image_files,
+ "has_example_images": self.files.has_example_images,
+ }
diff --git a/py/routes/handlers/model_handlers.py b/py/routes/handlers/model_handlers.py
new file mode 100644
index 00000000..a6fe4091
--- /dev/null
+++ b/py/routes/handlers/model_handlers.py
@@ -0,0 +1,1020 @@
+"""Handlers for base model routes."""
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+from dataclasses import dataclass
+from typing import Awaitable, Callable, Dict, Iterable, Mapping, Optional
+
+from aiohttp import web
+import jinja2
+
+from ...config import config
+from ...services.download_coordinator import DownloadCoordinator
+from ...services.metadata_sync_service import MetadataSyncService
+from ...services.model_file_service import ModelMoveService
+from ...services.preview_asset_service import PreviewAssetService
+from ...services.settings_manager import SettingsManager
+from ...services.tag_update_service import TagUpdateService
+from ...services.use_cases import (
+ AutoOrganizeInProgressError,
+ AutoOrganizeUseCase,
+ BulkMetadataRefreshUseCase,
+ DownloadModelEarlyAccessError,
+ DownloadModelUseCase,
+ DownloadModelValidationError,
+ MetadataRefreshProgressReporter,
+)
+from ...services.websocket_manager import WebSocketManager
+from ...services.websocket_progress_callback import WebSocketProgressCallback
+from ...utils.file_utils import calculate_sha256
+
+
+class ModelPageView:
+ """Render the HTML view for model listings."""
+
+ def __init__(
+ self,
+ *,
+ template_env: jinja2.Environment,
+ template_name: str,
+ service,
+ settings_service: SettingsManager,
+ server_i18n,
+ logger: logging.Logger,
+ ) -> None:
+ self._template_env = template_env
+ self._template_name = template_name
+ self._service = service
+ self._settings = settings_service
+ self._server_i18n = server_i18n
+ self._logger = logger
+
+ async def handle(self, request: web.Request) -> web.Response:
+ try:
+ is_initializing = (
+ self._service.scanner._cache is None
+ or (
+ hasattr(self._service.scanner, "is_initializing")
+ and callable(self._service.scanner.is_initializing)
+ and self._service.scanner.is_initializing()
+ )
+ or (
+ hasattr(self._service.scanner, "_is_initializing")
+ and self._service.scanner._is_initializing
+ )
+ )
+
+ if not self._template_env or not self._template_name:
+ return web.Response(
+ text="Template environment or template name not set",
+ status=500,
+ )
+
+ user_language = self._settings.get("language", "en")
+ self._server_i18n.set_locale(user_language)
+
+ if not hasattr(self._template_env, "_i18n_filter_added"):
+ self._template_env.filters["t"] = self._server_i18n.create_template_filter()
+ self._template_env._i18n_filter_added = True # type: ignore[attr-defined]
+
+ template_context = {
+ "is_initializing": is_initializing,
+ "settings": self._settings,
+ "request": request,
+ "folders": [],
+ "t": self._server_i18n.get_translation,
+ }
+
+ if not is_initializing:
+ try:
+ cache = await self._service.scanner.get_cached_data(force_refresh=False)
+ template_context["folders"] = getattr(cache, "folders", [])
+ except Exception as cache_error: # pragma: no cover - logging path
+ self._logger.error("Error loading cache data: %s", cache_error)
+ template_context["is_initializing"] = True
+
+ rendered = self._template_env.get_template(self._template_name).render(**template_context)
+ return web.Response(text=rendered, content_type="text/html")
+ except Exception as exc: # pragma: no cover - logging path
+ self._logger.error("Error handling models page: %s", exc, exc_info=True)
+ return web.Response(text="Error loading models page", status=500)
+
+
+class ModelListingHandler:
+ """Provide paginated model listings."""
+
+ def __init__(
+ self,
+ *,
+ service,
+ parse_specific_params: Callable[[web.Request], Dict],
+ logger: logging.Logger,
+ ) -> None:
+ self._service = service
+ self._parse_specific_params = parse_specific_params
+ self._logger = logger
+
+ async def get_models(self, request: web.Request) -> web.Response:
+ try:
+ params = self._parse_common_params(request)
+ result = await self._service.get_paginated_data(**params)
+ formatted_result = {
+ "items": [await self._service.format_response(item) for item in result["items"]],
+ "total": result["total"],
+ "page": result["page"],
+ "page_size": result["page_size"],
+ "total_pages": result["total_pages"],
+ }
+ return web.json_response(formatted_result)
+ except Exception as exc:
+ self._logger.error("Error retrieving %ss: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ def _parse_common_params(self, request: web.Request) -> Dict:
+ page = int(request.query.get("page", "1"))
+ page_size = min(int(request.query.get("page_size", "20")), 100)
+ sort_by = request.query.get("sort_by", "name")
+ folder = request.query.get("folder")
+ search = request.query.get("search")
+ fuzzy_search = request.query.get("fuzzy_search", "false").lower() == "true"
+
+ base_models = request.query.getall("base_model", [])
+ tags = request.query.getall("tag", [])
+ favorites_only = request.query.get("favorites_only", "false").lower() == "true"
+
+ search_options = {
+ "filename": request.query.get("search_filename", "true").lower() == "true",
+ "modelname": request.query.get("search_modelname", "true").lower() == "true",
+ "tags": request.query.get("search_tags", "false").lower() == "true",
+ "creator": request.query.get("search_creator", "false").lower() == "true",
+ "recursive": request.query.get("recursive", "true").lower() == "true",
+ }
+
+ hash_filters: Dict[str, object] = {}
+ if "hash" in request.query:
+ hash_filters["single_hash"] = request.query["hash"]
+ elif "hashes" in request.query:
+ try:
+ hash_list = json.loads(request.query["hashes"])
+ if isinstance(hash_list, list):
+ hash_filters["multiple_hashes"] = hash_list
+ except (json.JSONDecodeError, TypeError):
+ pass
+
+ return {
+ "page": page,
+ "page_size": page_size,
+ "sort_by": sort_by,
+ "folder": folder,
+ "search": search,
+ "fuzzy_search": fuzzy_search,
+ "base_models": base_models,
+ "tags": tags,
+ "search_options": search_options,
+ "hash_filters": hash_filters,
+ "favorites_only": favorites_only,
+ **self._parse_specific_params(request),
+ }
+
+
+class ModelManagementHandler:
+ """Handle mutation operations on models."""
+
+ def __init__(
+ self,
+ *,
+ service,
+ logger: logging.Logger,
+ metadata_sync: MetadataSyncService,
+ preview_service: PreviewAssetService,
+ tag_update_service: TagUpdateService,
+ lifecycle_service,
+ ) -> None:
+ self._service = service
+ self._logger = logger
+ self._metadata_sync = metadata_sync
+ self._preview_service = preview_service
+ self._tag_update_service = tag_update_service
+ self._lifecycle_service = lifecycle_service
+
+ async def delete_model(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ if not file_path:
+ return web.Response(text="Model path is required", status=400)
+
+ result = await self._lifecycle_service.delete_model(file_path)
+ return web.json_response(result)
+ except ValueError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except Exception as exc:
+ self._logger.error("Error deleting model: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+ async def exclude_model(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ if not file_path:
+ return web.Response(text="Model path is required", status=400)
+
+ result = await self._lifecycle_service.exclude_model(file_path)
+ return web.json_response(result)
+ except ValueError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except Exception as exc:
+ self._logger.error("Error excluding model: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+ async def fetch_civitai(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ if not file_path:
+ return web.json_response({"success": False, "error": "File path is required"}, status=400)
+
+ cache = await self._service.scanner.get_cached_data()
+ model_data = next((item for item in cache.raw_data if item["file_path"] == file_path), None)
+ if not model_data:
+ return web.json_response({"success": False, "error": "Model not found in cache"}, status=404)
+ if not model_data.get("sha256"):
+ return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
+
+ success, error = await self._metadata_sync.fetch_and_update_model(
+ sha256=model_data["sha256"],
+ file_path=file_path,
+ model_data=model_data,
+ update_cache_func=self._service.scanner.update_single_model_cache,
+ )
+ if not success:
+ return web.json_response({"success": False, "error": error})
+
+ formatted_metadata = await self._service.format_response(model_data)
+ return web.json_response({"success": True, "metadata": formatted_metadata})
+ except Exception as exc:
+ self._logger.error("Error fetching from CivitAI: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def relink_civitai(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ model_id = data.get("model_id")
+ model_version_id = data.get("model_version_id")
+
+ if not file_path or model_id is None:
+ return web.json_response(
+ {"success": False, "error": "Both file_path and model_id are required"},
+ status=400,
+ )
+
+ metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
+ local_metadata = await self._metadata_sync.load_local_metadata(metadata_path)
+
+ updated_metadata = await self._metadata_sync.relink_metadata(
+ file_path=file_path,
+ metadata=local_metadata,
+ model_id=int(model_id),
+ model_version_id=int(model_version_id) if model_version_id else None,
+ )
+
+ await self._service.scanner.update_single_model_cache(
+ file_path, file_path, updated_metadata
+ )
+
+ message = (
+ f"Model successfully re-linked to Civitai model {model_id}"
+ + (f" version {model_version_id}" if model_version_id else "")
+ )
+ return web.json_response(
+ {"success": True, "message": message, "hash": updated_metadata.get("sha256", "")}
+ )
+ except Exception as exc:
+ self._logger.error("Error re-linking to CivitAI: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def replace_preview(self, request: web.Request) -> web.Response:
+ try:
+ reader = await request.multipart()
+
+ field = await reader.next()
+ if field is None or field.name != "preview_file":
+ raise ValueError("Expected 'preview_file' field")
+ content_type = field.headers.get("Content-Type", "image/png")
+ content_disposition = field.headers.get("Content-Disposition", "")
+
+ original_filename = None
+ import re
+
+ match = re.search(r'filename="(.*?)"', content_disposition)
+ if match:
+ original_filename = match.group(1)
+
+ preview_data = await field.read()
+
+ field = await reader.next()
+ if field is None or field.name != "model_path":
+ raise ValueError("Expected 'model_path' field")
+ model_path = (await field.read()).decode()
+
+ nsfw_level = 0
+ field = await reader.next()
+ if field and field.name == "nsfw_level":
+ try:
+ nsfw_level = int((await field.read()).decode())
+ except (ValueError, TypeError):
+ self._logger.warning("Invalid NSFW level format, using default 0")
+
+ result = await self._preview_service.replace_preview(
+ model_path=model_path,
+ preview_data=preview_data,
+ content_type=content_type,
+ original_filename=original_filename,
+ nsfw_level=nsfw_level,
+ update_preview_in_cache=self._service.scanner.update_preview_in_cache,
+ metadata_loader=self._metadata_sync.load_local_metadata,
+ )
+
+ return web.json_response(
+ {
+ "success": True,
+ "preview_url": config.get_preview_static_url(result["preview_path"]),
+ "preview_nsfw_level": result["preview_nsfw_level"],
+ }
+ )
+ except Exception as exc:
+ self._logger.error("Error replacing preview: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+ async def save_metadata(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ if not file_path:
+ return web.Response(text="File path is required", status=400)
+
+ metadata_updates = {k: v for k, v in data.items() if k != "file_path"}
+
+ await self._metadata_sync.save_metadata_updates(
+ file_path=file_path,
+ updates=metadata_updates,
+ metadata_loader=self._metadata_sync.load_local_metadata,
+ update_cache=self._service.scanner.update_single_model_cache,
+ )
+
+ if "model_name" in metadata_updates:
+ cache = await self._service.scanner.get_cached_data()
+ await cache.resort()
+
+ return web.json_response({"success": True})
+ except Exception as exc:
+ self._logger.error("Error saving metadata: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+ async def add_tags(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ new_tags = data.get("tags", [])
+
+ if not file_path:
+ return web.Response(text="File path is required", status=400)
+
+ if not isinstance(new_tags, list):
+ return web.Response(text="Tags must be a list", status=400)
+
+ tags = await self._tag_update_service.add_tags(
+ file_path=file_path,
+ new_tags=new_tags,
+ metadata_loader=self._metadata_sync.load_local_metadata,
+ update_cache=self._service.scanner.update_single_model_cache,
+ )
+
+ return web.json_response({"success": True, "tags": tags})
+ except Exception as exc:
+ self._logger.error("Error adding tags: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+ async def rename_model(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ new_file_name = data.get("new_file_name")
+
+ if not file_path or not new_file_name:
+ return web.json_response(
+ {
+ "success": False,
+ "error": "File path and new file name are required",
+ },
+ status=400,
+ )
+
+ result = await self._lifecycle_service.rename_model(
+ file_path=file_path, new_file_name=new_file_name
+ )
+
+ return web.json_response(
+ {
+ **result,
+ "new_preview_path": config.get_preview_static_url(
+ result.get("new_preview_path")
+ ),
+ }
+ )
+ except ValueError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except Exception as exc:
+ self._logger.error("Error renaming model: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def bulk_delete_models(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_paths = data.get("file_paths", [])
+ if not file_paths:
+ return web.json_response(
+ {
+ "success": False,
+ "error": "No file paths provided for deletion",
+ },
+ status=400,
+ )
+
+ result = await self._lifecycle_service.bulk_delete_models(file_paths)
+ return web.json_response(result)
+ except ValueError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except Exception as exc:
+ self._logger.error("Error in bulk delete: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def verify_duplicates(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_paths = data.get("file_paths", [])
+
+ if not file_paths:
+ return web.json_response(
+ {"success": False, "error": "No file paths provided for verification"},
+ status=400,
+ )
+
+ results = await self._metadata_sync.verify_duplicate_hashes(
+ file_paths=file_paths,
+ metadata_loader=self._metadata_sync.load_local_metadata,
+ hash_calculator=calculate_sha256,
+ update_cache=self._service.scanner.update_single_model_cache,
+ )
+
+ return web.json_response({"success": True, **results})
+ except Exception as exc:
+ self._logger.error("Error verifying duplicate models: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+
+class ModelQueryHandler:
+ """Serve read-only model queries."""
+
+ def __init__(self, *, service, logger: logging.Logger) -> None:
+ self._service = service
+ self._logger = logger
+
+ async def get_top_tags(self, request: web.Request) -> web.Response:
+ try:
+ limit = int(request.query.get("limit", "20"))
+ if limit < 1 or limit > 100:
+ limit = 20
+ top_tags = await self._service.get_top_tags(limit)
+ return web.json_response({"success": True, "tags": top_tags})
+ except Exception as exc:
+ self._logger.error("Error getting top tags: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": "Internal server error"}, status=500)
+
+ async def get_base_models(self, request: web.Request) -> web.Response:
+ try:
+ limit = int(request.query.get("limit", "20"))
+ if limit < 1 or limit > 100:
+ limit = 20
+ base_models = await self._service.get_base_models(limit)
+ return web.json_response({"success": True, "base_models": base_models})
+ except Exception as exc:
+ self._logger.error("Error retrieving base models: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def scan_models(self, request: web.Request) -> web.Response:
+ try:
+ full_rebuild = request.query.get("full_rebuild", "false").lower() == "true"
+ await self._service.scan_models(force_refresh=True, rebuild_cache=full_rebuild)
+ return web.json_response({"status": "success", "message": f"{self._service.model_type.capitalize()} scan completed"})
+ except Exception as exc:
+ self._logger.error("Error scanning %ss: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def get_model_roots(self, request: web.Request) -> web.Response:
+ try:
+ roots = self._service.get_model_roots()
+ return web.json_response({"success": True, "roots": roots})
+ except Exception as exc:
+ self._logger.error("Error getting %s roots: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_folders(self, request: web.Request) -> web.Response:
+ try:
+ cache = await self._service.scanner.get_cached_data()
+ return web.json_response({"folders": cache.folders})
+ except Exception as exc:
+ self._logger.error("Error getting folders: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_folder_tree(self, request: web.Request) -> web.Response:
+ try:
+ model_root = request.query.get("model_root")
+ if not model_root:
+ return web.json_response({"success": False, "error": "model_root parameter is required"}, status=400)
+ folder_tree = await self._service.get_folder_tree(model_root)
+ return web.json_response({"success": True, "tree": folder_tree})
+ except Exception as exc:
+ self._logger.error("Error getting folder tree: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_unified_folder_tree(self, request: web.Request) -> web.Response:
+ try:
+ unified_tree = await self._service.get_unified_folder_tree()
+ return web.json_response({"success": True, "tree": unified_tree})
+ except Exception as exc:
+ self._logger.error("Error getting unified folder tree: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def find_duplicate_models(self, request: web.Request) -> web.Response:
+ try:
+ duplicates = self._service.find_duplicate_hashes()
+ result = []
+ cache = await self._service.scanner.get_cached_data()
+ for sha256, paths in duplicates.items():
+ group = {"hash": sha256, "models": []}
+ for path in paths:
+ model = next((m for m in cache.raw_data if m["file_path"] == path), None)
+ if model:
+ group["models"].append(await self._service.format_response(model))
+ primary_path = self._service.get_path_by_hash(sha256)
+ if primary_path and primary_path not in paths:
+ primary_model = next((m for m in cache.raw_data if m["file_path"] == primary_path), None)
+ if primary_model:
+ group["models"].insert(0, await self._service.format_response(primary_model))
+ if len(group["models"]) > 1:
+ result.append(group)
+ return web.json_response({"success": True, "duplicates": result, "count": len(result)})
+ except Exception as exc:
+ self._logger.error("Error finding duplicate %ss: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def find_filename_conflicts(self, request: web.Request) -> web.Response:
+ try:
+ duplicates = self._service.find_duplicate_filenames()
+ result = []
+ cache = await self._service.scanner.get_cached_data()
+ for filename, paths in duplicates.items():
+ group = {"filename": filename, "models": []}
+ for path in paths:
+ model = next((m for m in cache.raw_data if m["file_path"] == path), None)
+ if model:
+ group["models"].append(await self._service.format_response(model))
+ hash_val = self._service.scanner.get_hash_by_filename(filename)
+ if hash_val:
+ main_path = self._service.get_path_by_hash(hash_val)
+ if main_path and main_path not in paths:
+ main_model = next((m for m in cache.raw_data if m["file_path"] == main_path), None)
+ if main_model:
+ group["models"].insert(0, await self._service.format_response(main_model))
+ if group["models"]:
+ result.append(group)
+ return web.json_response({"success": True, "conflicts": result, "count": len(result)})
+ except Exception as exc:
+ self._logger.error("Error finding filename conflicts for %ss: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_model_notes(self, request: web.Request) -> web.Response:
+ try:
+ model_name = request.query.get("name")
+ if not model_name:
+ return web.Response(text=f"{self._service.model_type.capitalize()} file name is required", status=400)
+ notes = await self._service.get_model_notes(model_name)
+ if notes is not None:
+ return web.json_response({"success": True, "notes": notes})
+ return web.json_response({"success": False, "error": f"{self._service.model_type.capitalize()} not found in cache"}, status=404)
+ except Exception as exc:
+ self._logger.error("Error getting %s notes: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_model_preview_url(self, request: web.Request) -> web.Response:
+ try:
+ model_name = request.query.get("name")
+ if not model_name:
+ return web.Response(text=f"{self._service.model_type.capitalize()} file name is required", status=400)
+ preview_url = await self._service.get_model_preview_url(model_name)
+ if preview_url:
+ return web.json_response({"success": True, "preview_url": preview_url})
+ return web.json_response({"success": False, "error": f"No preview URL found for the specified {self._service.model_type}"}, status=404)
+ except Exception as exc:
+ self._logger.error("Error getting %s preview URL: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_model_civitai_url(self, request: web.Request) -> web.Response:
+ try:
+ model_name = request.query.get("name")
+ if not model_name:
+ return web.Response(text=f"{self._service.model_type.capitalize()} file name is required", status=400)
+ result = await self._service.get_model_civitai_url(model_name)
+ if result["civitai_url"]:
+ return web.json_response({"success": True, **result})
+ return web.json_response({"success": False, "error": f"No Civitai data found for the specified {self._service.model_type}"}, status=404)
+ except Exception as exc:
+ self._logger.error("Error getting %s Civitai URL: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_model_metadata(self, request: web.Request) -> web.Response:
+ try:
+ file_path = request.query.get("file_path")
+ if not file_path:
+ return web.Response(text="File path is required", status=400)
+ metadata = await self._service.get_model_metadata(file_path)
+ if metadata is not None:
+ return web.json_response({"success": True, "metadata": metadata})
+ return web.json_response({"success": False, "error": f"{self._service.model_type.capitalize()} not found or no CivitAI metadata available"}, status=404)
+ except Exception as exc:
+ self._logger.error("Error getting %s metadata: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_model_description(self, request: web.Request) -> web.Response:
+ try:
+ file_path = request.query.get("file_path")
+ if not file_path:
+ return web.Response(text="File path is required", status=400)
+ description = await self._service.get_model_description(file_path)
+ if description is not None:
+ return web.json_response({"success": True, "description": description})
+ return web.json_response({"success": False, "error": f"{self._service.model_type.capitalize()} not found or no description available"}, status=404)
+ except Exception as exc:
+ self._logger.error("Error getting %s description: %s", self._service.model_type, exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_relative_paths(self, request: web.Request) -> web.Response:
+ try:
+ search = request.query.get("search", "").strip()
+ limit = min(int(request.query.get("limit", "15")), 50)
+ matching_paths = await self._service.search_relative_paths(search, limit)
+ return web.json_response({"success": True, "relative_paths": matching_paths})
+ except Exception as exc:
+ self._logger.error("Error getting relative paths for autocomplete: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+
+class ModelDownloadHandler:
+ """Coordinate downloads and progress reporting."""
+
+ def __init__(
+ self,
+ *,
+ ws_manager: WebSocketManager,
+ logger: logging.Logger,
+ download_use_case: DownloadModelUseCase,
+ download_coordinator: DownloadCoordinator,
+ ) -> None:
+ self._ws_manager = ws_manager
+ self._logger = logger
+ self._download_use_case = download_use_case
+ self._download_coordinator = download_coordinator
+
+ async def download_model(self, request: web.Request) -> web.Response:
+ try:
+ payload = await request.json()
+ result = await self._download_use_case.execute(payload)
+ if not result.get("success", False):
+ return web.json_response(result, status=500)
+ return web.json_response(result)
+ except DownloadModelValidationError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except DownloadModelEarlyAccessError as exc:
+ self._logger.warning("Early access error: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=401)
+ except Exception as exc:
+ error_message = str(exc)
+ self._logger.error("Error downloading model: %s", error_message, exc_info=True)
+ return web.json_response({"success": False, "error": error_message}, status=500)
+
+ async def download_model_get(self, request: web.Request) -> web.Response:
+ try:
+ model_id = request.query.get("model_id")
+ if not model_id:
+ return web.Response(status=400, text="Missing required parameter: Please provide 'model_id'")
+
+ model_version_id = request.query.get("model_version_id")
+ download_id = request.query.get("download_id")
+ use_default_paths = request.query.get("use_default_paths", "false").lower() == "true"
+ source = request.query.get("source")
+
+ data = {"model_id": model_id, "use_default_paths": use_default_paths}
+ if model_version_id:
+ data["model_version_id"] = model_version_id
+ if download_id:
+ data["download_id"] = download_id
+ if source:
+ data["source"] = source
+
+ loop = asyncio.get_event_loop()
+ future = loop.create_future()
+ future.set_result(data)
+
+ mock_request = type("MockRequest", (), {"json": lambda self=None: future})()
+ result = await self._download_use_case.execute(data)
+ if not result.get("success", False):
+ return web.json_response(result, status=500)
+ return web.json_response(result)
+ except DownloadModelValidationError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except DownloadModelEarlyAccessError as exc:
+ self._logger.warning("Early access error: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=401)
+ except Exception as exc:
+ self._logger.error("Error downloading model via GET: %s", exc, exc_info=True)
+ return web.Response(status=500, text=str(exc))
+
+ async def cancel_download_get(self, request: web.Request) -> web.Response:
+ try:
+ download_id = request.query.get("download_id")
+ if not download_id:
+ return web.json_response({"success": False, "error": "Download ID is required"}, status=400)
+ result = await self._download_coordinator.cancel_download(download_id)
+ return web.json_response(result)
+ except Exception as exc:
+ self._logger.error("Error cancelling download via GET: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_download_progress(self, request: web.Request) -> web.Response:
+ try:
+ download_id = request.match_info.get("download_id")
+ if not download_id:
+ return web.json_response({"success": False, "error": "Download ID is required"}, status=400)
+ progress_data = self._ws_manager.get_download_progress(download_id)
+ if progress_data is None:
+ return web.json_response({"success": False, "error": "Download ID not found"}, status=404)
+ return web.json_response({"success": True, "progress": progress_data.get("progress", 0)})
+ except Exception as exc:
+ self._logger.error("Error getting download progress: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+
+class ModelCivitaiHandler:
+ """CivitAI integration endpoints."""
+
+ def __init__(
+ self,
+ *,
+ service,
+ settings_service: SettingsManager,
+ ws_manager: WebSocketManager,
+ logger: logging.Logger,
+ metadata_provider_factory: Callable[[], Awaitable],
+ validate_model_type: Callable[[str], bool],
+ expected_model_types: Callable[[], str],
+ find_model_file: Callable[[Iterable[Mapping[str, object]]], Optional[Mapping[str, object]]],
+ metadata_sync: MetadataSyncService,
+ metadata_refresh_use_case: BulkMetadataRefreshUseCase,
+ metadata_progress_callback: MetadataRefreshProgressReporter,
+ ) -> None:
+ self._service = service
+ self._settings = settings_service
+ self._ws_manager = ws_manager
+ self._logger = logger
+ self._metadata_provider_factory = metadata_provider_factory
+ self._validate_model_type = validate_model_type
+ self._expected_model_types = expected_model_types
+ self._find_model_file = find_model_file
+ self._metadata_sync = metadata_sync
+ self._metadata_refresh_use_case = metadata_refresh_use_case
+ self._metadata_progress_callback = metadata_progress_callback
+
+ async def fetch_all_civitai(self, request: web.Request) -> web.Response:
+ try:
+ result = await self._metadata_refresh_use_case.execute_with_error_handling(
+ progress_callback=self._metadata_progress_callback
+ )
+ return web.json_response(result)
+ except Exception as exc:
+ self._logger.error("Error in fetch_all_civitai for %ss: %s", self._service.model_type, exc)
+ return web.Response(text=str(exc), status=500)
+
+ async def get_civitai_versions(self, request: web.Request) -> web.Response:
+ try:
+ model_id = request.match_info["model_id"]
+ metadata_provider = await self._metadata_provider_factory()
+ response = await metadata_provider.get_model_versions(model_id)
+ if not response or not response.get("modelVersions"):
+ return web.Response(status=404, text="Model not found")
+
+ versions = response.get("modelVersions", [])
+ model_type = response.get("type", "")
+ if not self._validate_model_type(model_type):
+ return web.json_response(
+ {"error": f"Model type mismatch. Expected {self._expected_model_types()}, got {model_type}"},
+ status=400,
+ )
+
+ for version in versions:
+ model_file = self._find_model_file(version.get("files", [])) if isinstance(version.get("files"), Iterable) else None
+ if model_file:
+ hashes = model_file.get("hashes", {}) if isinstance(model_file, Mapping) else {}
+ sha256 = hashes.get("SHA256") if isinstance(hashes, Mapping) else None
+ if sha256:
+ version["existsLocally"] = self._service.has_hash(sha256)
+ if version["existsLocally"]:
+ version["localPath"] = self._service.get_path_by_hash(sha256)
+ version["modelSizeKB"] = model_file.get("sizeKB") if isinstance(model_file, Mapping) else None
+ else:
+ version["existsLocally"] = False
+ return web.json_response(versions)
+ except Exception as exc:
+ self._logger.error("Error fetching %s model versions: %s", self._service.model_type, exc)
+ return web.Response(status=500, text=str(exc))
+
+ async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
+ try:
+ model_version_id = request.match_info.get("modelVersionId")
+ metadata_provider = await self._metadata_provider_factory()
+ model, error_msg = await metadata_provider.get_model_version_info(model_version_id)
+ if not model:
+ self._logger.warning("Failed to fetch model version %s: %s", model_version_id, error_msg)
+ status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
+ return web.json_response({"success": False, "error": error_msg or "Failed to fetch model information"}, status=status_code)
+ return web.json_response(model)
+ except Exception as exc:
+ self._logger.error("Error fetching model details: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
+ try:
+ hash_value = request.match_info.get("hash")
+ metadata_provider = await self._metadata_provider_factory()
+ model, error = await metadata_provider.get_model_by_hash(hash_value)
+ if error:
+ self._logger.warning("Error getting model by hash: %s", error)
+ return web.json_response({"success": False, "error": error}, status=404)
+ return web.json_response(model)
+ except Exception as exc:
+ self._logger.error("Error fetching model details by hash: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+
+class ModelMoveHandler:
+ """Move model files between folders."""
+
+ def __init__(self, *, move_service: ModelMoveService, logger: logging.Logger) -> None:
+ self._move_service = move_service
+ self._logger = logger
+
+ async def move_model(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_path = data.get("file_path")
+ target_path = data.get("target_path")
+ if not file_path or not target_path:
+ return web.Response(text="File path and target path are required", status=400)
+ result = await self._move_service.move_model(file_path, target_path)
+ status = 200 if result.get("success") else 500
+ return web.json_response(result, status=status)
+ except Exception as exc:
+ self._logger.error("Error moving model: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+ async def move_models_bulk(self, request: web.Request) -> web.Response:
+ try:
+ data = await request.json()
+ file_paths = data.get("file_paths", [])
+ target_path = data.get("target_path")
+ if not file_paths or not target_path:
+ return web.Response(text="File paths and target path are required", status=400)
+ result = await self._move_service.move_models_bulk(file_paths, target_path)
+ return web.json_response(result)
+ except Exception as exc:
+ self._logger.error("Error moving models in bulk: %s", exc, exc_info=True)
+ return web.Response(text=str(exc), status=500)
+
+
+class ModelAutoOrganizeHandler:
+ """Manage auto-organize operations."""
+
+ def __init__(
+ self,
+ *,
+ use_case: AutoOrganizeUseCase,
+ progress_callback: WebSocketProgressCallback,
+ ws_manager: WebSocketManager,
+ logger: logging.Logger,
+ ) -> None:
+ self._use_case = use_case
+ self._progress_callback = progress_callback
+ self._ws_manager = ws_manager
+ self._logger = logger
+
+ async def auto_organize_models(self, request: web.Request) -> web.Response:
+ try:
+ file_paths = None
+ if request.method == "POST":
+ try:
+ data = await request.json()
+ file_paths = data.get("file_paths")
+ except Exception: # pragma: no cover - permissive path
+ pass
+
+ result = await self._use_case.execute(
+ file_paths=file_paths,
+ progress_callback=self._progress_callback,
+ )
+ return web.json_response(result.to_dict())
+ except AutoOrganizeInProgressError:
+ return web.json_response(
+ {"success": False, "error": "Auto-organize is already running. Please wait for it to complete."},
+ status=409,
+ )
+ except Exception as exc:
+ self._logger.error("Error in auto_organize_models: %s", exc, exc_info=True)
+ try:
+ await self._progress_callback.on_progress(
+ {"type": "auto_organize_progress", "status": "error", "error": str(exc)}
+ )
+ except Exception: # pragma: no cover - defensive reporting
+ pass
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_auto_organize_progress(self, request: web.Request) -> web.Response:
+ try:
+ progress_data = self._ws_manager.get_auto_organize_progress()
+ if progress_data is None:
+ return web.json_response({"success": False, "error": "No auto-organize operation in progress"}, status=404)
+ return web.json_response({"success": True, "progress": progress_data})
+ except Exception as exc:
+ self._logger.error("Error getting auto-organize progress: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+
+@dataclass
+class ModelHandlerSet:
+ """Aggregate concrete handlers into a flat mapping."""
+
+ page_view: ModelPageView
+ listing: ModelListingHandler
+ management: ModelManagementHandler
+ query: ModelQueryHandler
+ download: ModelDownloadHandler
+ civitai: ModelCivitaiHandler
+ move: ModelMoveHandler
+ auto_organize: ModelAutoOrganizeHandler
+
+ def to_route_mapping(self) -> Dict[str, Callable[[web.Request], Awaitable[web.Response]]]:
+ return {
+ "handle_models_page": self.page_view.handle,
+ "get_models": self.listing.get_models,
+ "delete_model": self.management.delete_model,
+ "exclude_model": self.management.exclude_model,
+ "fetch_civitai": self.management.fetch_civitai,
+ "fetch_all_civitai": self.civitai.fetch_all_civitai,
+ "relink_civitai": self.management.relink_civitai,
+ "replace_preview": self.management.replace_preview,
+ "save_metadata": self.management.save_metadata,
+ "add_tags": self.management.add_tags,
+ "rename_model": self.management.rename_model,
+ "bulk_delete_models": self.management.bulk_delete_models,
+ "verify_duplicates": self.management.verify_duplicates,
+ "get_top_tags": self.query.get_top_tags,
+ "get_base_models": self.query.get_base_models,
+ "scan_models": self.query.scan_models,
+ "get_model_roots": self.query.get_model_roots,
+ "get_folders": self.query.get_folders,
+ "get_folder_tree": self.query.get_folder_tree,
+ "get_unified_folder_tree": self.query.get_unified_folder_tree,
+ "find_duplicate_models": self.query.find_duplicate_models,
+ "find_filename_conflicts": self.query.find_filename_conflicts,
+ "download_model": self.download.download_model,
+ "download_model_get": self.download.download_model_get,
+ "cancel_download_get": self.download.cancel_download_get,
+ "get_download_progress": self.download.get_download_progress,
+ "get_civitai_versions": self.civitai.get_civitai_versions,
+ "get_civitai_model_by_version": self.civitai.get_civitai_model_by_version,
+ "get_civitai_model_by_hash": self.civitai.get_civitai_model_by_hash,
+ "move_model": self.move.move_model,
+ "move_models_bulk": self.move.move_models_bulk,
+ "auto_organize_models": self.auto_organize.auto_organize_models,
+ "get_auto_organize_progress": self.auto_organize.get_auto_organize_progress,
+ "get_model_notes": self.query.get_model_notes,
+ "get_model_preview_url": self.query.get_model_preview_url,
+ "get_model_civitai_url": self.query.get_model_civitai_url,
+ "get_model_metadata": self.query.get_model_metadata,
+ "get_model_description": self.query.get_model_description,
+ "get_relative_paths": self.query.get_relative_paths,
+ }
+
diff --git a/py/routes/handlers/recipe_handlers.py b/py/routes/handlers/recipe_handlers.py
new file mode 100644
index 00000000..aa912477
--- /dev/null
+++ b/py/routes/handlers/recipe_handlers.py
@@ -0,0 +1,725 @@
+"""Dedicated handler objects for recipe-related routes."""
+from __future__ import annotations
+
+import json
+import logging
+import os
+from dataclasses import dataclass
+from typing import Any, Awaitable, Callable, Dict, Mapping, Optional
+
+from aiohttp import web
+
+from ...config import config
+from ...services.server_i18n import server_i18n as default_server_i18n
+from ...services.settings_manager import SettingsManager
+from ...services.recipes import (
+ RecipeAnalysisService,
+ RecipeDownloadError,
+ RecipeNotFoundError,
+ RecipePersistenceService,
+ RecipeSharingService,
+ RecipeValidationError,
+)
+
+Logger = logging.Logger
+EnsureDependenciesCallable = Callable[[], Awaitable[None]]
+RecipeScannerGetter = Callable[[], Any]
+CivitaiClientGetter = Callable[[], Any]
+
+
+@dataclass(frozen=True)
+class RecipeHandlerSet:
+ """Group of handlers providing recipe route implementations."""
+
+ page_view: "RecipePageView"
+ listing: "RecipeListingHandler"
+ query: "RecipeQueryHandler"
+ management: "RecipeManagementHandler"
+ analysis: "RecipeAnalysisHandler"
+ sharing: "RecipeSharingHandler"
+
+ def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], Awaitable[web.StreamResponse]]]:
+ """Expose handler coroutines keyed by registrar handler names."""
+
+ return {
+ "render_page": self.page_view.render_page,
+ "list_recipes": self.listing.list_recipes,
+ "get_recipe": self.listing.get_recipe,
+ "analyze_uploaded_image": self.analysis.analyze_uploaded_image,
+ "analyze_local_image": self.analysis.analyze_local_image,
+ "save_recipe": self.management.save_recipe,
+ "delete_recipe": self.management.delete_recipe,
+ "get_top_tags": self.query.get_top_tags,
+ "get_base_models": self.query.get_base_models,
+ "share_recipe": self.sharing.share_recipe,
+ "download_shared_recipe": self.sharing.download_shared_recipe,
+ "get_recipe_syntax": self.query.get_recipe_syntax,
+ "update_recipe": self.management.update_recipe,
+ "reconnect_lora": self.management.reconnect_lora,
+ "find_duplicates": self.query.find_duplicates,
+ "bulk_delete": self.management.bulk_delete,
+ "save_recipe_from_widget": self.management.save_recipe_from_widget,
+ "get_recipes_for_lora": self.query.get_recipes_for_lora,
+ "scan_recipes": self.query.scan_recipes,
+ }
+
+
+class RecipePageView:
+ """Render the recipe shell page."""
+
+ def __init__(
+ self,
+ *,
+ ensure_dependencies_ready: EnsureDependenciesCallable,
+ settings_service: SettingsManager,
+ server_i18n=default_server_i18n,
+ template_env,
+ template_name: str,
+ recipe_scanner_getter: RecipeScannerGetter,
+ logger: Logger,
+ ) -> None:
+ self._ensure_dependencies_ready = ensure_dependencies_ready
+ self._settings = settings_service
+ self._server_i18n = server_i18n
+ self._template_env = template_env
+ self._template_name = template_name
+ self._recipe_scanner_getter = recipe_scanner_getter
+ self._logger = logger
+
+ async def render_page(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None: # pragma: no cover - defensive guard
+ raise RuntimeError("Recipe scanner not available")
+
+ user_language = self._settings.get("language", "en")
+ self._server_i18n.set_locale(user_language)
+
+ try:
+ await recipe_scanner.get_cached_data(force_refresh=False)
+ rendered = self._template_env.get_template(self._template_name).render(
+ recipes=[],
+ is_initializing=False,
+ settings=self._settings,
+ request=request,
+ t=self._server_i18n.get_translation,
+ )
+ except Exception as cache_error: # pragma: no cover - logging path
+ self._logger.error("Error loading recipe cache data: %s", cache_error)
+ rendered = self._template_env.get_template(self._template_name).render(
+ is_initializing=True,
+ settings=self._settings,
+ request=request,
+ t=self._server_i18n.get_translation,
+ )
+ return web.Response(text=rendered, content_type="text/html")
+ except Exception as exc: # pragma: no cover - logging path
+ self._logger.error("Error handling recipes request: %s", exc, exc_info=True)
+ return web.Response(text="Error loading recipes page", status=500)
+
+
+class RecipeListingHandler:
+ """Provide listing and detail APIs for recipes."""
+
+ def __init__(
+ self,
+ *,
+ ensure_dependencies_ready: EnsureDependenciesCallable,
+ recipe_scanner_getter: RecipeScannerGetter,
+ logger: Logger,
+ ) -> None:
+ self._ensure_dependencies_ready = ensure_dependencies_ready
+ self._recipe_scanner_getter = recipe_scanner_getter
+ self._logger = logger
+
+ async def list_recipes(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ page = int(request.query.get("page", "1"))
+ page_size = int(request.query.get("page_size", "20"))
+ sort_by = request.query.get("sort_by", "date")
+ search = request.query.get("search")
+
+ search_options = {
+ "title": request.query.get("search_title", "true").lower() == "true",
+ "tags": request.query.get("search_tags", "true").lower() == "true",
+ "lora_name": request.query.get("search_lora_name", "true").lower() == "true",
+ "lora_model": request.query.get("search_lora_model", "true").lower() == "true",
+ }
+
+ filters: Dict[str, list[str]] = {}
+ base_models = request.query.get("base_models")
+ if base_models:
+ filters["base_model"] = base_models.split(",")
+
+ tags = request.query.get("tags")
+ if tags:
+ filters["tags"] = tags.split(",")
+
+ lora_hash = request.query.get("lora_hash")
+
+ result = await recipe_scanner.get_paginated_data(
+ page=page,
+ page_size=page_size,
+ sort_by=sort_by,
+ search=search,
+ filters=filters,
+ search_options=search_options,
+ lora_hash=lora_hash,
+ )
+
+ for item in result.get("items", []):
+ file_path = item.get("file_path")
+ if file_path:
+ item["file_url"] = self.format_recipe_file_url(file_path)
+ else:
+ item.setdefault("file_url", "/loras_static/images/no-preview.png")
+ item.setdefault("loras", [])
+ item.setdefault("base_model", "")
+
+ return web.json_response(result)
+ except Exception as exc:
+ self._logger.error("Error retrieving recipes: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def get_recipe(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ recipe_id = request.match_info["recipe_id"]
+ recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
+
+ if not recipe:
+ return web.json_response({"error": "Recipe not found"}, status=404)
+ return web.json_response(recipe)
+ except Exception as exc:
+ self._logger.error("Error retrieving recipe details: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ def format_recipe_file_url(self, file_path: str) -> str:
+ try:
+ recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, "/")
+ normalized_path = file_path.replace(os.sep, "/")
+ if normalized_path.startswith(recipes_dir):
+ relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, "/")
+ return f"/loras_static/root1/preview/{relative_path}"
+
+ file_name = os.path.basename(file_path)
+ return f"/loras_static/root1/preview/recipes/{file_name}"
+ except Exception as exc: # pragma: no cover - logging path
+ self._logger.error("Error formatting recipe file URL: %s", exc, exc_info=True)
+ return "/loras_static/images/no-preview.png"
+
+
+class RecipeQueryHandler:
+ """Provide read-only insights on recipe data."""
+
+ def __init__(
+ self,
+ *,
+ ensure_dependencies_ready: EnsureDependenciesCallable,
+ recipe_scanner_getter: RecipeScannerGetter,
+ format_recipe_file_url: Callable[[str], str],
+ logger: Logger,
+ ) -> None:
+ self._ensure_dependencies_ready = ensure_dependencies_ready
+ self._recipe_scanner_getter = recipe_scanner_getter
+ self._format_recipe_file_url = format_recipe_file_url
+ self._logger = logger
+
+ async def get_top_tags(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ limit = int(request.query.get("limit", "20"))
+ cache = await recipe_scanner.get_cached_data()
+
+ tag_counts: Dict[str, int] = {}
+ for recipe in getattr(cache, "raw_data", []):
+ for tag in recipe.get("tags", []) or []:
+ tag_counts[tag] = tag_counts.get(tag, 0) + 1
+
+ sorted_tags = [{"tag": tag, "count": count} for tag, count in tag_counts.items()]
+ sorted_tags.sort(key=lambda entry: entry["count"], reverse=True)
+ return web.json_response({"success": True, "tags": sorted_tags[:limit]})
+ except Exception as exc:
+ self._logger.error("Error retrieving top tags: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_base_models(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ cache = await recipe_scanner.get_cached_data()
+
+ base_model_counts: Dict[str, int] = {}
+ for recipe in getattr(cache, "raw_data", []):
+ base_model = recipe.get("base_model")
+ if base_model:
+ base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
+
+ sorted_models = [{"name": model, "count": count} for model, count in base_model_counts.items()]
+ sorted_models.sort(key=lambda entry: entry["count"], reverse=True)
+ return web.json_response({"success": True, "base_models": sorted_models})
+ except Exception as exc:
+ self._logger.error("Error retrieving base models: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_recipes_for_lora(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ lora_hash = request.query.get("hash")
+ if not lora_hash:
+ return web.json_response({"success": False, "error": "Lora hash is required"}, status=400)
+
+ matching_recipes = await recipe_scanner.get_recipes_for_lora(lora_hash)
+ return web.json_response({"success": True, "recipes": matching_recipes})
+ except Exception as exc:
+ self._logger.error("Error getting recipes for Lora: %s", exc)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def scan_recipes(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ self._logger.info("Manually triggering recipe cache rebuild")
+ await recipe_scanner.get_cached_data(force_refresh=True)
+ return web.json_response({"success": True, "message": "Recipe cache refreshed successfully"})
+ except Exception as exc:
+ self._logger.error("Error refreshing recipe cache: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def find_duplicates(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ duplicate_groups = await recipe_scanner.find_all_duplicate_recipes()
+ response_data = []
+
+ for fingerprint, recipe_ids in duplicate_groups.items():
+ if len(recipe_ids) <= 1:
+ continue
+
+ recipes = []
+ for recipe_id in recipe_ids:
+ recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
+ if recipe:
+ recipes.append(
+ {
+ "id": recipe.get("id"),
+ "title": recipe.get("title"),
+ "file_url": recipe.get("file_url")
+ or self._format_recipe_file_url(recipe.get("file_path", "")),
+ "modified": recipe.get("modified"),
+ "created_date": recipe.get("created_date"),
+ "lora_count": len(recipe.get("loras", [])),
+ }
+ )
+
+ if len(recipes) >= 2:
+ recipes.sort(key=lambda entry: entry.get("modified", 0), reverse=True)
+ response_data.append(
+ {
+ "fingerprint": fingerprint,
+ "count": len(recipes),
+ "recipes": recipes,
+ }
+ )
+
+ response_data.sort(key=lambda entry: entry["count"], reverse=True)
+ return web.json_response({"success": True, "duplicate_groups": response_data})
+ except Exception as exc:
+ self._logger.error("Error finding duplicate recipes: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def get_recipe_syntax(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ recipe_id = request.match_info["recipe_id"]
+ try:
+ syntax_parts = await recipe_scanner.get_recipe_syntax_tokens(recipe_id)
+ except RecipeNotFoundError:
+ return web.json_response({"error": "Recipe not found"}, status=404)
+
+ if not syntax_parts:
+ return web.json_response({"error": "No LoRAs found in this recipe"}, status=400)
+
+ return web.json_response({"success": True, "syntax": " ".join(syntax_parts)})
+ except Exception as exc:
+ self._logger.error("Error generating recipe syntax: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+
+class RecipeManagementHandler:
+ """Handle create/update/delete style recipe operations."""
+
+ def __init__(
+ self,
+ *,
+ ensure_dependencies_ready: EnsureDependenciesCallable,
+ recipe_scanner_getter: RecipeScannerGetter,
+ logger: Logger,
+ persistence_service: RecipePersistenceService,
+ analysis_service: RecipeAnalysisService,
+ ) -> None:
+ self._ensure_dependencies_ready = ensure_dependencies_ready
+ self._recipe_scanner_getter = recipe_scanner_getter
+ self._logger = logger
+ self._persistence_service = persistence_service
+ self._analysis_service = analysis_service
+
+ async def save_recipe(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ reader = await request.multipart()
+ payload = await self._parse_save_payload(reader)
+
+ result = await self._persistence_service.save_recipe(
+ recipe_scanner=recipe_scanner,
+ image_bytes=payload["image_bytes"],
+ image_base64=payload["image_base64"],
+ name=payload["name"],
+ tags=payload["tags"],
+ metadata=payload["metadata"],
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeValidationError as exc:
+ return web.json_response({"error": str(exc)}, status=400)
+ except Exception as exc:
+ self._logger.error("Error saving recipe: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def delete_recipe(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ recipe_id = request.match_info["recipe_id"]
+ result = await self._persistence_service.delete_recipe(
+ recipe_scanner=recipe_scanner, recipe_id=recipe_id
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc)}, status=404)
+ except Exception as exc:
+ self._logger.error("Error deleting recipe: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def update_recipe(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ recipe_id = request.match_info["recipe_id"]
+ data = await request.json()
+ result = await self._persistence_service.update_recipe(
+ recipe_scanner=recipe_scanner, recipe_id=recipe_id, updates=data
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeValidationError as exc:
+ return web.json_response({"error": str(exc)}, status=400)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc)}, status=404)
+ except Exception as exc:
+ self._logger.error("Error updating recipe: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def reconnect_lora(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ data = await request.json()
+ for field in ("recipe_id", "lora_index", "target_name"):
+ if field not in data:
+ raise RecipeValidationError(f"Missing required field: {field}")
+
+ result = await self._persistence_service.reconnect_lora(
+ recipe_scanner=recipe_scanner,
+ recipe_id=data["recipe_id"],
+ lora_index=int(data["lora_index"]),
+ target_name=data["target_name"],
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeValidationError as exc:
+ return web.json_response({"error": str(exc)}, status=400)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc)}, status=404)
+ except Exception as exc:
+ self._logger.error("Error reconnecting LoRA: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def bulk_delete(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ data = await request.json()
+ recipe_ids = data.get("recipe_ids", [])
+ result = await self._persistence_service.bulk_delete(
+ recipe_scanner=recipe_scanner, recipe_ids=recipe_ids
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeValidationError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=400)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"success": False, "error": str(exc)}, status=404)
+ except Exception as exc:
+ self._logger.error("Error performing bulk delete: %s", exc, exc_info=True)
+ return web.json_response({"success": False, "error": str(exc)}, status=500)
+
+ async def save_recipe_from_widget(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ analysis = await self._analysis_service.analyze_widget_metadata(
+ recipe_scanner=recipe_scanner
+ )
+ metadata = analysis.payload.get("metadata")
+ image_bytes = analysis.payload.get("image_bytes")
+ if not metadata or image_bytes is None:
+ raise RecipeValidationError("Unable to extract metadata from widget")
+
+ result = await self._persistence_service.save_recipe_from_widget(
+ recipe_scanner=recipe_scanner,
+ metadata=metadata,
+ image_bytes=image_bytes,
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeValidationError as exc:
+ return web.json_response({"error": str(exc)}, status=400)
+ except Exception as exc:
+ self._logger.error("Error saving recipe from widget: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def _parse_save_payload(self, reader) -> dict[str, Any]:
+ image_bytes: Optional[bytes] = None
+ image_base64: Optional[str] = None
+ name: Optional[str] = None
+ tags: list[str] = []
+ metadata: Optional[Dict[str, Any]] = None
+
+ while True:
+ field = await reader.next()
+ if field is None:
+ break
+ if field.name == "image":
+ image_chunks = bytearray()
+ while True:
+ chunk = await field.read_chunk()
+ if not chunk:
+ break
+ image_chunks.extend(chunk)
+ image_bytes = bytes(image_chunks)
+ elif field.name == "image_base64":
+ image_base64 = await field.text()
+ elif field.name == "name":
+ name = await field.text()
+ elif field.name == "tags":
+ tags_text = await field.text()
+ try:
+ parsed_tags = json.loads(tags_text)
+ tags = parsed_tags if isinstance(parsed_tags, list) else []
+ except Exception:
+ tags = []
+ elif field.name == "metadata":
+ metadata_text = await field.text()
+ try:
+ metadata = json.loads(metadata_text)
+ except Exception:
+ metadata = {}
+
+ return {
+ "image_bytes": image_bytes,
+ "image_base64": image_base64,
+ "name": name,
+ "tags": tags,
+ "metadata": metadata,
+ }
+
+
+class RecipeAnalysisHandler:
+ """Analyze images to extract recipe metadata."""
+
+ def __init__(
+ self,
+ *,
+ ensure_dependencies_ready: EnsureDependenciesCallable,
+ recipe_scanner_getter: RecipeScannerGetter,
+ civitai_client_getter: CivitaiClientGetter,
+ logger: Logger,
+ analysis_service: RecipeAnalysisService,
+ ) -> None:
+ self._ensure_dependencies_ready = ensure_dependencies_ready
+ self._recipe_scanner_getter = recipe_scanner_getter
+ self._civitai_client_getter = civitai_client_getter
+ self._logger = logger
+ self._analysis_service = analysis_service
+
+ async def analyze_uploaded_image(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ civitai_client = self._civitai_client_getter()
+ if recipe_scanner is None or civitai_client is None:
+ raise RuntimeError("Required services unavailable")
+
+ content_type = request.headers.get("Content-Type", "")
+ if "multipart/form-data" in content_type:
+ reader = await request.multipart()
+ field = await reader.next()
+ if field is None or field.name != "image":
+ raise RecipeValidationError("No image field found")
+ image_chunks = bytearray()
+ while True:
+ chunk = await field.read_chunk()
+ if not chunk:
+ break
+ image_chunks.extend(chunk)
+ result = await self._analysis_service.analyze_uploaded_image(
+ image_bytes=bytes(image_chunks),
+ recipe_scanner=recipe_scanner,
+ )
+ return web.json_response(result.payload, status=result.status)
+
+ if "application/json" in content_type:
+ data = await request.json()
+ result = await self._analysis_service.analyze_remote_image(
+ url=data.get("url"),
+ recipe_scanner=recipe_scanner,
+ civitai_client=civitai_client,
+ )
+ return web.json_response(result.payload, status=result.status)
+
+ raise RecipeValidationError("Unsupported content type")
+ except RecipeValidationError as exc:
+ return web.json_response({"error": str(exc), "loras": []}, status=400)
+ except RecipeDownloadError as exc:
+ return web.json_response({"error": str(exc), "loras": []}, status=400)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc), "loras": []}, status=404)
+ except Exception as exc:
+ self._logger.error("Error analyzing recipe image: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc), "loras": []}, status=500)
+
+ async def analyze_local_image(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ data = await request.json()
+ result = await self._analysis_service.analyze_local_image(
+ file_path=data.get("path"),
+ recipe_scanner=recipe_scanner,
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeValidationError as exc:
+ return web.json_response({"error": str(exc), "loras": []}, status=400)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc), "loras": []}, status=404)
+ except Exception as exc:
+ self._logger.error("Error analyzing local image: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc), "loras": []}, status=500)
+
+
+class RecipeSharingHandler:
+ """Serve endpoints related to recipe sharing."""
+
+ def __init__(
+ self,
+ *,
+ ensure_dependencies_ready: EnsureDependenciesCallable,
+ recipe_scanner_getter: RecipeScannerGetter,
+ logger: Logger,
+ sharing_service: RecipeSharingService,
+ ) -> None:
+ self._ensure_dependencies_ready = ensure_dependencies_ready
+ self._recipe_scanner_getter = recipe_scanner_getter
+ self._logger = logger
+ self._sharing_service = sharing_service
+
+ async def share_recipe(self, request: web.Request) -> web.Response:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ recipe_id = request.match_info["recipe_id"]
+ result = await self._sharing_service.share_recipe(
+ recipe_scanner=recipe_scanner, recipe_id=recipe_id
+ )
+ return web.json_response(result.payload, status=result.status)
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc)}, status=404)
+ except Exception as exc:
+ self._logger.error("Error sharing recipe: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
+
+ async def download_shared_recipe(self, request: web.Request) -> web.StreamResponse:
+ try:
+ await self._ensure_dependencies_ready()
+ recipe_scanner = self._recipe_scanner_getter()
+ if recipe_scanner is None:
+ raise RuntimeError("Recipe scanner unavailable")
+
+ recipe_id = request.match_info["recipe_id"]
+ download_info = await self._sharing_service.prepare_download(
+ recipe_scanner=recipe_scanner, recipe_id=recipe_id
+ )
+ return web.FileResponse(
+ download_info.file_path,
+ headers={
+ "Content-Disposition": f'attachment; filename="{download_info.download_filename}"'
+ },
+ )
+ except RecipeNotFoundError as exc:
+ return web.json_response({"error": str(exc)}, status=404)
+ except Exception as exc:
+ self._logger.error("Error downloading shared recipe: %s", exc, exc_info=True)
+ return web.json_response({"error": str(exc)}, status=500)
diff --git a/py/routes/lora_routes.py b/py/routes/lora_routes.py
index 4e261004..ee6bc151 100644
--- a/py/routes/lora_routes.py
+++ b/py/routes/lora_routes.py
@@ -5,9 +5,9 @@ from typing import Dict
from server import PromptServer # type: ignore
from .base_model_routes import BaseModelRoutes
+from .model_route_registrar import ModelRouteRegistrar
from ..services.lora_service import LoraService
from ..services.service_registry import ServiceRegistry
-from ..services.metadata_service import get_default_metadata_provider
from ..utils.utils import get_lora_info
logger = logging.getLogger(__name__)
@@ -17,8 +17,7 @@ class LoraRoutes(BaseModelRoutes):
def __init__(self):
"""Initialize LoRA routes with LoRA service"""
- # Service will be initialized later via setup_routes
- self.service = None
+ super().__init__()
self.template_name = "loras.html"
async def initialize_services(self):
@@ -26,31 +25,26 @@ class LoraRoutes(BaseModelRoutes):
lora_scanner = await ServiceRegistry.get_lora_scanner()
self.service = LoraService(lora_scanner)
- # Initialize parent with the service
- super().__init__(self.service)
+ # Attach service dependencies
+ self.attach_service(self.service)
def setup_routes(self, app: web.Application):
"""Setup LoRA routes"""
# Schedule service initialization on app startup
app.on_startup.append(lambda _: self.initialize_services())
-
+
# Setup common routes with 'loras' prefix (includes page route)
super().setup_routes(app, 'loras')
-
- def setup_specific_routes(self, app: web.Application, prefix: str):
+
+ def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str):
"""Setup LoRA-specific routes"""
# LoRA-specific query routes
- app.router.add_get(f'/api/{prefix}/letter-counts', self.get_letter_counts)
- app.router.add_get(f'/api/{prefix}/get-trigger-words', self.get_lora_trigger_words)
- app.router.add_get(f'/api/{prefix}/usage-tips-by-path', self.get_lora_usage_tips_by_path)
-
- # CivitAI integration with LoRA-specific validation
- app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_lora)
- app.router.add_get(f'/api/{prefix}/civitai/model/version/{{modelVersionId}}', self.get_civitai_model_by_version)
- app.router.add_get(f'/api/{prefix}/civitai/model/hash/{{hash}}', self.get_civitai_model_by_hash)
-
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/letter-counts', prefix, self.get_letter_counts)
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/get-trigger-words', prefix, self.get_lora_trigger_words)
+ registrar.add_prefixed_route('GET', '/api/lm/{prefix}/usage-tips-by-path', prefix, self.get_lora_usage_tips_by_path)
+
# ComfyUI integration
- app.router.add_post(f'/api/{prefix}/get_trigger_words', self.get_trigger_words)
+ registrar.add_prefixed_route('POST', '/api/lm/{prefix}/get_trigger_words', prefix, self.get_trigger_words)
def _parse_specific_params(self, request: web.Request) -> Dict:
"""Parse LoRA-specific parameters"""
@@ -76,6 +70,15 @@ class LoraRoutes(BaseModelRoutes):
return params
+ def _validate_civitai_model_type(self, model_type: str) -> bool:
+ """Validate CivitAI model type for LoRA"""
+ from ..utils.constants import VALID_LORA_TYPES
+ return model_type.lower() in VALID_LORA_TYPES
+
+ def _get_expected_model_types(self) -> str:
+ """Get expected model types string for error messages"""
+ return "LORA, LoCon, or DORA"
+
# LoRA-specific route handlers
async def get_letter_counts(self, request: web.Request) -> web.Response:
"""Get count of LoRAs for each letter of the alphabet"""
@@ -210,94 +213,6 @@ class LoraRoutes(BaseModelRoutes):
'error': str(e)
}, status=500)
- # CivitAI integration methods
- async def get_civitai_versions_lora(self, request: web.Request) -> web.Response:
- """Get available versions for a Civitai LoRA model with local availability info"""
- try:
- model_id = request.match_info['model_id']
- metadata_provider = await get_default_metadata_provider()
- response = await metadata_provider.get_model_versions(model_id)
- if not response or not response.get('modelVersions'):
- return web.Response(status=404, text="Model not found")
-
- versions = response.get('modelVersions', [])
- model_type = response.get('type', '')
-
- # Check model type - should be LORA, LoCon, or DORA
- from ..utils.constants import VALID_LORA_TYPES
- if model_type.lower() not in VALID_LORA_TYPES:
- return web.json_response({
- 'error': f"Model type mismatch. Expected LORA or LoCon, got {model_type}"
- }, status=400)
-
- # Check local availability for each version
- for version in versions:
- # Find the model file (type="Model") in the files list
- model_file = next((file for file in version.get('files', [])
- if file.get('type') == 'Model'), None)
-
- if model_file:
- sha256 = model_file.get('hashes', {}).get('SHA256')
- if sha256:
- # Set existsLocally and localPath at the version level
- version['existsLocally'] = self.service.has_hash(sha256)
- if version['existsLocally']:
- version['localPath'] = self.service.get_path_by_hash(sha256)
-
- # Also set the model file size at the version level for easier access
- version['modelSizeKB'] = model_file.get('sizeKB')
- else:
- # No model file found in this version
- version['existsLocally'] = False
-
- return web.json_response(versions)
- except Exception as e:
- logger.error(f"Error fetching LoRA model versions: {e}")
- return web.Response(status=500, text=str(e))
-
- async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
- """Get CivitAI model details by model version ID"""
- try:
- model_version_id = request.match_info.get('modelVersionId')
-
- # Get model details from metadata provider
- metadata_provider = await get_default_metadata_provider()
- model, error_msg = await metadata_provider.get_model_version_info(model_version_id)
-
- if not model:
- # Log warning for failed model retrieval
- logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
-
- # Determine status code based on error message
- status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
-
- return web.json_response({
- "success": False,
- "error": error_msg or "Failed to fetch model information"
- }, status=status_code)
-
- return web.json_response(model)
- except Exception as e:
- logger.error(f"Error fetching model details: {e}")
- return web.json_response({
- "success": False,
- "error": str(e)
- }, status=500)
-
- async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
- """Get CivitAI model details by hash"""
- try:
- hash = request.match_info.get('hash')
- metadata_provider = await get_default_metadata_provider()
- model = await metadata_provider.get_model_by_hash(hash)
- return web.json_response(model)
- except Exception as e:
- logger.error(f"Error fetching model details by hash: {e}")
- return web.json_response({
- "success": False,
- "error": str(e)
- }, status=500)
-
async def get_trigger_words(self, request: web.Request) -> web.Response:
"""Get trigger words for specified LoRA models"""
try:
diff --git a/py/routes/misc_routes.py b/py/routes/misc_routes.py
index 5ae2eadd..3a6439da 100644
--- a/py/routes/misc_routes.py
+++ b/py/routes/misc_routes.py
@@ -4,6 +4,7 @@ import sys
import threading
import asyncio
import subprocess
+import re
from server import PromptServer # type: ignore
from aiohttp import web
from ..services.settings_manager import settings
@@ -12,11 +13,12 @@ from ..utils.lora_metadata import extract_trained_words
from ..config import config
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS, NODE_TYPES, DEFAULT_NODE_COLOR
from ..services.service_registry import ServiceRegistry
-from ..services.metadata_service import get_metadata_archive_manager, update_metadata_providers
+from ..services.metadata_service import get_metadata_archive_manager, update_metadata_providers, get_metadata_provider
from ..services.websocket_manager import ws_manager
+from ..services.downloader import get_downloader
logger = logging.getLogger(__name__)
-standalone_mode = 'nodes' not in sys.modules
+standalone_mode = os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
# Node registry for tracking active workflow nodes
class NodeRegistry:
@@ -84,52 +86,157 @@ node_registry = NodeRegistry()
class MiscRoutes:
"""Miscellaneous routes for various utility functions"""
+ @staticmethod
+ def is_dedicated_example_images_folder(folder_path):
+ """
+ Check if a folder is a dedicated example images folder.
+
+ A dedicated folder should either be:
+ 1. Empty
+ 2. Only contain .download_progress.json file and/or folders with valid SHA256 hash names (64 hex characters)
+
+ Args:
+ folder_path (str): Path to the folder to check
+
+ Returns:
+ bool: True if the folder is dedicated, False otherwise
+ """
+ try:
+ if not os.path.exists(folder_path) or not os.path.isdir(folder_path):
+ return False
+
+ items = os.listdir(folder_path)
+
+ # Empty folder is considered dedicated
+ if not items:
+ return True
+
+ # Check each item in the folder
+ for item in items:
+ item_path = os.path.join(folder_path, item)
+
+ # Allow .download_progress.json file
+ if item == '.download_progress.json' and os.path.isfile(item_path):
+ continue
+
+ # Allow folders with valid SHA256 hash names (64 hex characters)
+ if os.path.isdir(item_path):
+ # Check if the folder name is a valid SHA256 hash
+ if re.match(r'^[a-fA-F0-9]{64}$', item):
+ continue
+
+ # If we encounter anything else, it's not a dedicated folder
+ return False
+
+ return True
+
+ except Exception as e:
+ logger.error(f"Error checking if folder is dedicated: {e}")
+ return False
+
@staticmethod
def setup_routes(app):
"""Register miscellaneous routes"""
- app.router.add_post('/api/settings', MiscRoutes.update_settings)
+ app.router.add_get('/api/lm/settings', MiscRoutes.get_settings)
+ app.router.add_post('/api/lm/settings', MiscRoutes.update_settings)
- app.router.add_get('/api/health-check', lambda request: web.json_response({'status': 'ok'}))
+ app.router.add_get('/api/lm/health-check', lambda request: web.json_response({'status': 'ok'}))
- app.router.add_post('/api/open-file-location', MiscRoutes.open_file_location)
+ app.router.add_post('/api/lm/open-file-location', MiscRoutes.open_file_location)
# Usage stats routes
- app.router.add_post('/api/update-usage-stats', MiscRoutes.update_usage_stats)
- app.router.add_get('/api/get-usage-stats', MiscRoutes.get_usage_stats)
+ app.router.add_post('/api/lm/update-usage-stats', MiscRoutes.update_usage_stats)
+ app.router.add_get('/api/lm/get-usage-stats', MiscRoutes.get_usage_stats)
# Lora code update endpoint
- app.router.add_post('/api/update-lora-code', MiscRoutes.update_lora_code)
+ app.router.add_post('/api/lm/update-lora-code', MiscRoutes.update_lora_code)
# Add new route for getting trained words
- app.router.add_get('/api/trained-words', MiscRoutes.get_trained_words)
+ app.router.add_get('/api/lm/trained-words', MiscRoutes.get_trained_words)
# Add new route for getting model example files
- app.router.add_get('/api/model-example-files', MiscRoutes.get_model_example_files)
+ app.router.add_get('/api/lm/model-example-files', MiscRoutes.get_model_example_files)
# Node registry endpoints
- app.router.add_post('/api/register-nodes', MiscRoutes.register_nodes)
- app.router.add_get('/api/get-registry', MiscRoutes.get_registry)
+ app.router.add_post('/api/lm/register-nodes', MiscRoutes.register_nodes)
+ app.router.add_get('/api/lm/get-registry', MiscRoutes.get_registry)
# Add new route for checking if a model exists in the library
- app.router.add_get('/api/check-model-exists', MiscRoutes.check_model_exists)
+ app.router.add_get('/api/lm/check-model-exists', MiscRoutes.check_model_exists)
# Add routes for metadata archive database management
- app.router.add_post('/api/download-metadata-archive', MiscRoutes.download_metadata_archive)
- app.router.add_post('/api/remove-metadata-archive', MiscRoutes.remove_metadata_archive)
- app.router.add_get('/api/metadata-archive-status', MiscRoutes.get_metadata_archive_status)
+ app.router.add_post('/api/lm/download-metadata-archive', MiscRoutes.download_metadata_archive)
+ app.router.add_post('/api/lm/remove-metadata-archive', MiscRoutes.remove_metadata_archive)
+ app.router.add_get('/api/lm/metadata-archive-status', MiscRoutes.get_metadata_archive_status)
+
+ # Add route for checking model versions in library
+ app.router.add_get('/api/lm/model-versions-status', MiscRoutes.get_model_versions_status)
+
+ @staticmethod
+ async def get_settings(request):
+ """Get application settings that should be synced to frontend"""
+ try:
+ # Define keys that should be synced from backend to frontend
+ sync_keys = [
+ 'civitai_api_key',
+ 'default_lora_root',
+ 'default_checkpoint_root',
+ 'default_embedding_root',
+ 'base_model_path_mappings',
+ 'download_path_templates',
+ 'enable_metadata_archive_db',
+ 'language',
+ 'proxy_enabled',
+ 'proxy_type',
+ 'proxy_host',
+ 'proxy_port',
+ 'proxy_username',
+ 'proxy_password',
+ 'example_images_path',
+ 'optimize_example_images',
+ 'auto_download_example_images',
+ 'blur_mature_content',
+ 'autoplay_on_hover',
+ 'display_density',
+ 'card_info_display',
+ 'include_trigger_words',
+ 'show_only_sfw',
+ 'compact_mode'
+ ]
+
+ # Build response with only the keys that should be synced
+ response_data = {}
+ for key in sync_keys:
+ value = settings.get(key)
+ if value is not None:
+ response_data[key] = value
+
+ return web.json_response({
+ 'success': True,
+ 'settings': response_data
+ })
+
+ except Exception as e:
+ logger.error(f"Error getting settings: {e}", exc_info=True)
+ return web.json_response({
+ 'success': False,
+ 'error': str(e)
+ }, status=500)
@staticmethod
async def update_settings(request):
"""Update application settings"""
try:
data = await request.json()
+ proxy_keys = {'proxy_enabled', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password', 'proxy_type'}
+ proxy_changed = False
# Validate and update settings
for key, value in data.items():
if value == settings.get(key):
# No change, skip
continue
- # Special handling for example_images_path - verify path exists
+ # Special handling for example_images_path - verify path exists and is dedicated
if key == 'example_images_path' and value:
if not os.path.exists(value):
return web.json_response({
@@ -137,16 +244,34 @@ class MiscRoutes:
'error': f"Path does not exist: {value}"
})
+ # Check if folder is dedicated for example images
+ if not MiscRoutes.is_dedicated_example_images_folder(value):
+ return web.json_response({
+ 'success': False,
+ 'error': "Please set a dedicated folder for example images."
+ })
+
# Path changed - server restart required for new path to take effect
old_path = settings.get('example_images_path')
if old_path != value:
logger.info(f"Example images path changed to {value} - server restart required")
- # Save to settings
- settings.set(key, value)
+ # Handle deletion for proxy credentials
+ if value == '__DELETE__' and key in ('proxy_username', 'proxy_password'):
+ settings.delete(key)
+ else:
+ # Save to settings
+ settings.set(key, value)
if key == 'enable_metadata_archive_db':
await update_metadata_providers()
+
+ if key in proxy_keys:
+ proxy_changed = True
+
+ if proxy_changed:
+ downloader = await get_downloader()
+ await downloader.refresh_session()
return web.json_response({'success': True})
except Exception as e:
@@ -773,6 +898,113 @@ class MiscRoutes:
'success': False,
'error': str(e)
}, status=500)
+
+ @staticmethod
+ async def get_model_versions_status(request):
+ """
+ Get all versions of a model from metadata provider and check their library status
+
+ Expects query parameters:
+ - modelId: int - Civitai model ID (required)
+
+ Returns:
+ - JSON with model type and versions list, each version includes 'inLibrary' flag
+ """
+ try:
+ # Get the modelId from query parameters
+ model_id_str = request.query.get('modelId')
+
+ # Validate modelId parameter (required)
+ if not model_id_str:
+ return web.json_response({
+ 'success': False,
+ 'error': 'Missing required parameter: modelId'
+ }, status=400)
+
+ try:
+ # Convert modelId to integer
+ model_id = int(model_id_str)
+ except ValueError:
+ return web.json_response({
+ 'success': False,
+ 'error': 'Parameter modelId must be an integer'
+ }, status=400)
+
+ # Get metadata provider
+ metadata_provider = await get_metadata_provider()
+ if not metadata_provider:
+ return web.json_response({
+ 'success': False,
+ 'error': 'Metadata provider not available'
+ }, status=503)
+
+ # Get model versions from metadata provider
+ response = await metadata_provider.get_model_versions(model_id)
+ if not response or not response.get('modelVersions'):
+ return web.json_response({
+ 'success': False,
+ 'error': 'Model not found'
+ }, status=404)
+
+ versions = response.get('modelVersions', [])
+ model_name = response.get('name', '')
+ model_type = response.get('type', '').lower()
+
+ # Determine scanner based on model type
+ scanner = None
+ normalized_type = None
+
+ if model_type in ['lora', 'locon', 'dora']:
+ scanner = await ServiceRegistry.get_lora_scanner()
+ normalized_type = 'lora'
+ elif model_type == 'checkpoint':
+ scanner = await ServiceRegistry.get_checkpoint_scanner()
+ normalized_type = 'checkpoint'
+ elif model_type == 'textualinversion':
+ scanner = await ServiceRegistry.get_embedding_scanner()
+ normalized_type = 'embedding'
+ else:
+ return web.json_response({
+ 'success': False,
+ 'error': f'Model type "{model_type}" is not supported'
+ }, status=400)
+
+ if not scanner:
+ return web.json_response({
+ 'success': False,
+ 'error': f'Scanner for type "{normalized_type}" is not available'
+ }, status=503)
+
+ # Get local versions from scanner
+ local_versions = await scanner.get_model_versions_by_id(model_id)
+ local_version_ids = set(version['versionId'] for version in local_versions)
+
+ # Add inLibrary flag to each version
+ enriched_versions = []
+ for version in versions:
+ version_id = version.get('id')
+ enriched_version = {
+ 'id': version_id,
+ 'name': version.get('name', ''),
+ 'thumbnailUrl': version.get('images')[0]['url'] if version.get('images') else None,
+ 'inLibrary': version_id in local_version_ids
+ }
+ enriched_versions.append(enriched_version)
+
+ return web.json_response({
+ 'success': True,
+ 'modelId': model_id,
+ 'modelName': model_name,
+ 'modelType': model_type,
+ 'versions': enriched_versions
+ })
+
+ except Exception as e:
+ logger.error(f"Failed to get model versions status: {e}", exc_info=True)
+ return web.json_response({
+ 'success': False,
+ 'error': str(e)
+ }, status=500)
@staticmethod
async def open_file_location(request):
diff --git a/py/routes/model_route_registrar.py b/py/routes/model_route_registrar.py
new file mode 100644
index 00000000..96f65fc5
--- /dev/null
+++ b/py/routes/model_route_registrar.py
@@ -0,0 +1,99 @@
+"""Route registrar for model endpoints."""
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Callable, Iterable, Mapping
+
+from aiohttp import web
+
+
+@dataclass(frozen=True)
+class RouteDefinition:
+ """Declarative definition for a HTTP route."""
+
+ method: str
+ path_template: str
+ handler_name: str
+
+ def build_path(self, prefix: str) -> str:
+ return self.path_template.replace("{prefix}", prefix)
+
+
+COMMON_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
+ RouteDefinition("GET", "/api/lm/{prefix}/list", "get_models"),
+ RouteDefinition("POST", "/api/lm/{prefix}/delete", "delete_model"),
+ RouteDefinition("POST", "/api/lm/{prefix}/exclude", "exclude_model"),
+ RouteDefinition("POST", "/api/lm/{prefix}/fetch-civitai", "fetch_civitai"),
+ RouteDefinition("POST", "/api/lm/{prefix}/fetch-all-civitai", "fetch_all_civitai"),
+ RouteDefinition("POST", "/api/lm/{prefix}/relink-civitai", "relink_civitai"),
+ RouteDefinition("POST", "/api/lm/{prefix}/replace-preview", "replace_preview"),
+ RouteDefinition("POST", "/api/lm/{prefix}/save-metadata", "save_metadata"),
+ RouteDefinition("POST", "/api/lm/{prefix}/add-tags", "add_tags"),
+ RouteDefinition("POST", "/api/lm/{prefix}/rename", "rename_model"),
+ RouteDefinition("POST", "/api/lm/{prefix}/bulk-delete", "bulk_delete_models"),
+ RouteDefinition("POST", "/api/lm/{prefix}/verify-duplicates", "verify_duplicates"),
+ RouteDefinition("POST", "/api/lm/{prefix}/move_model", "move_model"),
+ RouteDefinition("POST", "/api/lm/{prefix}/move_models_bulk", "move_models_bulk"),
+ RouteDefinition("GET", "/api/lm/{prefix}/auto-organize", "auto_organize_models"),
+ RouteDefinition("POST", "/api/lm/{prefix}/auto-organize", "auto_organize_models"),
+ RouteDefinition("GET", "/api/lm/{prefix}/auto-organize-progress", "get_auto_organize_progress"),
+ RouteDefinition("GET", "/api/lm/{prefix}/top-tags", "get_top_tags"),
+ RouteDefinition("GET", "/api/lm/{prefix}/base-models", "get_base_models"),
+ RouteDefinition("GET", "/api/lm/{prefix}/scan", "scan_models"),
+ RouteDefinition("GET", "/api/lm/{prefix}/roots", "get_model_roots"),
+ RouteDefinition("GET", "/api/lm/{prefix}/folders", "get_folders"),
+ RouteDefinition("GET", "/api/lm/{prefix}/folder-tree", "get_folder_tree"),
+ RouteDefinition("GET", "/api/lm/{prefix}/unified-folder-tree", "get_unified_folder_tree"),
+ RouteDefinition("GET", "/api/lm/{prefix}/find-duplicates", "find_duplicate_models"),
+ RouteDefinition("GET", "/api/lm/{prefix}/find-filename-conflicts", "find_filename_conflicts"),
+ RouteDefinition("GET", "/api/lm/{prefix}/get-notes", "get_model_notes"),
+ RouteDefinition("GET", "/api/lm/{prefix}/preview-url", "get_model_preview_url"),
+ RouteDefinition("GET", "/api/lm/{prefix}/civitai-url", "get_model_civitai_url"),
+ RouteDefinition("GET", "/api/lm/{prefix}/metadata", "get_model_metadata"),
+ RouteDefinition("GET", "/api/lm/{prefix}/model-description", "get_model_description"),
+ RouteDefinition("GET", "/api/lm/{prefix}/relative-paths", "get_relative_paths"),
+ RouteDefinition("GET", "/api/lm/{prefix}/civitai/versions/{model_id}", "get_civitai_versions"),
+ RouteDefinition("GET", "/api/lm/{prefix}/civitai/model/version/{modelVersionId}", "get_civitai_model_by_version"),
+ RouteDefinition("GET", "/api/lm/{prefix}/civitai/model/hash/{hash}", "get_civitai_model_by_hash"),
+ RouteDefinition("POST", "/api/lm/download-model", "download_model"),
+ RouteDefinition("GET", "/api/lm/download-model-get", "download_model_get"),
+ RouteDefinition("GET", "/api/lm/cancel-download-get", "cancel_download_get"),
+ RouteDefinition("GET", "/api/lm/download-progress/{download_id}", "get_download_progress"),
+ RouteDefinition("GET", "/{prefix}", "handle_models_page"),
+)
+
+
+class ModelRouteRegistrar:
+ """Bind declarative definitions to an aiohttp router."""
+
+ _METHOD_MAP = {
+ "GET": "add_get",
+ "POST": "add_post",
+ "PUT": "add_put",
+ "DELETE": "add_delete",
+ }
+
+ def __init__(self, app: web.Application) -> None:
+ self._app = app
+
+ def register_common_routes(
+ self,
+ prefix: str,
+ handler_lookup: Mapping[str, Callable[[web.Request], object]],
+ *,
+ definitions: Iterable[RouteDefinition] = COMMON_ROUTE_DEFINITIONS,
+ ) -> None:
+ for definition in definitions:
+ self._bind_route(definition.method, definition.build_path(prefix), handler_lookup[definition.handler_name])
+
+ def add_route(self, method: str, path: str, handler: Callable) -> None:
+ self._bind_route(method, path, handler)
+
+ def add_prefixed_route(self, method: str, path_template: str, prefix: str, handler: Callable) -> None:
+ self._bind_route(method, path_template.replace("{prefix}", prefix), handler)
+
+ def _bind_route(self, method: str, path: str, handler: Callable) -> None:
+ add_method_name = self._METHOD_MAP[method.upper()]
+ add_method = getattr(self._app.router, add_method_name)
+ add_method(path, handler)
+
diff --git a/py/routes/recipe_route_registrar.py b/py/routes/recipe_route_registrar.py
new file mode 100644
index 00000000..471edf19
--- /dev/null
+++ b/py/routes/recipe_route_registrar.py
@@ -0,0 +1,64 @@
+"""Route registrar for recipe endpoints."""
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Callable, Mapping
+
+from aiohttp import web
+
+
+@dataclass(frozen=True)
+class RouteDefinition:
+ """Declarative definition for a recipe HTTP route."""
+
+ method: str
+ path: str
+ handler_name: str
+
+
+ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
+ RouteDefinition("GET", "/loras/recipes", "render_page"),
+ RouteDefinition("GET", "/api/lm/recipes", "list_recipes"),
+ RouteDefinition("GET", "/api/lm/recipe/{recipe_id}", "get_recipe"),
+ RouteDefinition("POST", "/api/lm/recipes/analyze-image", "analyze_uploaded_image"),
+ RouteDefinition("POST", "/api/lm/recipes/analyze-local-image", "analyze_local_image"),
+ RouteDefinition("POST", "/api/lm/recipes/save", "save_recipe"),
+ RouteDefinition("DELETE", "/api/lm/recipe/{recipe_id}", "delete_recipe"),
+ RouteDefinition("GET", "/api/lm/recipes/top-tags", "get_top_tags"),
+ RouteDefinition("GET", "/api/lm/recipes/base-models", "get_base_models"),
+ RouteDefinition("GET", "/api/lm/recipe/{recipe_id}/share", "share_recipe"),
+ RouteDefinition("GET", "/api/lm/recipe/{recipe_id}/share/download", "download_shared_recipe"),
+ RouteDefinition("GET", "/api/lm/recipe/{recipe_id}/syntax", "get_recipe_syntax"),
+ RouteDefinition("PUT", "/api/lm/recipe/{recipe_id}/update", "update_recipe"),
+ RouteDefinition("POST", "/api/lm/recipe/lora/reconnect", "reconnect_lora"),
+ RouteDefinition("GET", "/api/lm/recipes/find-duplicates", "find_duplicates"),
+ RouteDefinition("POST", "/api/lm/recipes/bulk-delete", "bulk_delete"),
+ RouteDefinition("POST", "/api/lm/recipes/save-from-widget", "save_recipe_from_widget"),
+ RouteDefinition("GET", "/api/lm/recipes/for-lora", "get_recipes_for_lora"),
+ RouteDefinition("GET", "/api/lm/recipes/scan", "scan_recipes"),
+)
+
+
+class RecipeRouteRegistrar:
+ """Bind declarative recipe definitions to an aiohttp router."""
+
+ _METHOD_MAP = {
+ "GET": "add_get",
+ "POST": "add_post",
+ "PUT": "add_put",
+ "DELETE": "add_delete",
+ }
+
+ def __init__(self, app: web.Application) -> None:
+ self._app = app
+
+ def register_routes(self, handler_lookup: Mapping[str, Callable[[web.Request], object]]) -> None:
+ for definition in ROUTE_DEFINITIONS:
+ handler = handler_lookup[definition.handler_name]
+ self._bind_route(definition.method, definition.path, handler)
+
+ def _bind_route(self, method: str, path: str, handler: Callable) -> None:
+ add_method_name = self._METHOD_MAP[method.upper()]
+ add_method = getattr(self._app.router, add_method_name)
+ add_method(path, handler)
+
diff --git a/py/routes/recipe_routes.py b/py/routes/recipe_routes.py
index 003d869a..2c233d01 100644
--- a/py/routes/recipe_routes.py
+++ b/py/routes/recipe_routes.py
@@ -1,1652 +1,21 @@
-import os
-import time
-import base64
-import jinja2
-import numpy as np
-from PIL import Image
-import io
-import logging
+"""Concrete recipe route configuration."""
+
from aiohttp import web
-from typing import Dict
-import tempfile
-import json
-import asyncio
-import sys
-from ..utils.exif_utils import ExifUtils
-from ..recipes import RecipeParserFactory
-from ..utils.constants import CARD_PREVIEW_WIDTH
-from ..services.settings_manager import settings
-from ..services.server_i18n import server_i18n
-from ..config import config
+from .base_recipe_routes import BaseRecipeRoutes
+from .recipe_route_registrar import RecipeRouteRegistrar
-# Check if running in standalone mode
-standalone_mode = 'nodes' not in sys.modules
-from ..services.service_registry import ServiceRegistry # Add ServiceRegistry import
-from ..services.downloader import get_downloader
+class RecipeRoutes(BaseRecipeRoutes):
+ """API route handlers for Recipe management."""
-# Only import MetadataRegistry in non-standalone mode
-if not standalone_mode:
- # Import metadata_collector functions and classes conditionally
- from ..metadata_collector import get_metadata # Add MetadataCollector import
- from ..metadata_collector.metadata_processor import MetadataProcessor # Add MetadataProcessor import
- from ..metadata_collector.metadata_registry import MetadataRegistry
-
-logger = logging.getLogger(__name__)
-
-class RecipeRoutes:
- """API route handlers for Recipe management"""
-
- def __init__(self):
- # Initialize service references as None, will be set during async init
- self.recipe_scanner = None
- self.civitai_client = None
- self.template_env = jinja2.Environment(
- loader=jinja2.FileSystemLoader(config.templates_path),
- autoescape=True
- )
-
- # Pre-warm the cache
- self._init_cache_task = None
-
- async def init_services(self):
- """Initialize services from ServiceRegistry"""
- self.recipe_scanner = await ServiceRegistry.get_recipe_scanner()
- self.civitai_client = await ServiceRegistry.get_civitai_client()
+ template_name = "recipes.html"
@classmethod
def setup_routes(cls, app: web.Application):
- """Register API routes"""
+ """Register API routes using the declarative registrar."""
+
routes = cls()
- app.router.add_get('/loras/recipes', routes.handle_recipes_page)
-
- app.router.add_get('/api/recipes', routes.get_recipes)
- app.router.add_get('/api/recipe/{recipe_id}', routes.get_recipe_detail)
- app.router.add_post('/api/recipes/analyze-image', routes.analyze_recipe_image)
- app.router.add_post('/api/recipes/analyze-local-image', routes.analyze_local_image)
- app.router.add_post('/api/recipes/save', routes.save_recipe)
- app.router.add_delete('/api/recipe/{recipe_id}', routes.delete_recipe)
-
- # Add new filter-related endpoints
- app.router.add_get('/api/recipes/top-tags', routes.get_top_tags)
- app.router.add_get('/api/recipes/base-models', routes.get_base_models)
-
- # Add new sharing endpoints
- app.router.add_get('/api/recipe/{recipe_id}/share', routes.share_recipe)
- app.router.add_get('/api/recipe/{recipe_id}/share/download', routes.download_shared_recipe)
-
- # Add new endpoint for getting recipe syntax
- app.router.add_get('/api/recipe/{recipe_id}/syntax', routes.get_recipe_syntax)
-
- # Add new endpoint for updating recipe metadata (name, tags and source_path)
- app.router.add_put('/api/recipe/{recipe_id}/update', routes.update_recipe)
-
- # Add new endpoint for reconnecting deleted LoRAs
- app.router.add_post('/api/recipe/lora/reconnect', routes.reconnect_lora)
-
- # Add new endpoint for finding duplicate recipes
- app.router.add_get('/api/recipes/find-duplicates', routes.find_duplicates)
-
- # Add new endpoint for bulk deletion of recipes
- app.router.add_post('/api/recipes/bulk-delete', routes.bulk_delete)
-
- # Start cache initialization
- app.on_startup.append(routes._init_cache)
-
- app.router.add_post('/api/recipes/save-from-widget', routes.save_recipe_from_widget)
-
- # Add route to get recipes for a specific Lora
- app.router.add_get('/api/recipes/for-lora', routes.get_recipes_for_lora)
-
- # Add new endpoint for scanning and rebuilding the recipe cache
- app.router.add_get('/api/recipes/scan', routes.scan_recipes)
-
- async def _init_cache(self, app):
- """Initialize cache on startup"""
- try:
- # Initialize services first
- await self.init_services()
-
- # Now that services are initialized, get the lora scanner
- lora_scanner = self.recipe_scanner._lora_scanner
-
- # Get lora cache to ensure it's initialized
- lora_cache = await lora_scanner.get_cached_data()
-
- # Verify hash index is built
- if hasattr(lora_scanner, '_hash_index'):
- hash_index_size = len(lora_scanner._hash_index._hash_to_path) if hasattr(lora_scanner._hash_index, '_hash_to_path') else 0
-
- # Now that lora scanner is initialized, initialize recipe cache
- await self.recipe_scanner.get_cached_data(force_refresh=True)
- except Exception as e:
- logger.error(f"Error pre-warming recipe cache: {e}", exc_info=True)
-
- async def handle_recipes_page(self, request: web.Request) -> web.Response:
- """Handle GET /loras/recipes request"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # 获取用户语言设置
- user_language = settings.get('language', 'en')
-
- # 设置服务端i18n语言
- server_i18n.set_locale(user_language)
-
- # 为模板环境添加i18n过滤器
- if not hasattr(self.template_env, '_i18n_filter_added'):
- self.template_env.filters['t'] = server_i18n.create_template_filter()
- self.template_env._i18n_filter_added = True
-
- # Skip initialization check and directly try to get cached data
- try:
- # Recipe scanner will initialize cache if needed
- await self.recipe_scanner.get_cached_data(force_refresh=False)
- template = self.template_env.get_template('recipes.html')
- rendered = template.render(
- recipes=[], # Frontend will load recipes via API
- is_initializing=False,
- settings=settings,
- request=request,
- # 添加服务端翻译函数
- t=server_i18n.get_translation,
- )
- except Exception as cache_error:
- logger.error(f"Error loading recipe cache data: {cache_error}")
- # Still keep error handling - show initializing page on error
- template = self.template_env.get_template('recipes.html')
- rendered = template.render(
- is_initializing=True,
- settings=settings,
- request=request,
- # 添加服务端翻译函数
- t=server_i18n.get_translation,
- )
- logger.info("Recipe cache error, returning initialization page")
-
- return web.Response(
- text=rendered,
- content_type='text/html'
- )
-
- except Exception as e:
- logger.error(f"Error handling recipes request: {e}", exc_info=True)
- return web.Response(
- text="Error loading recipes page",
- status=500
- )
-
- async def get_recipes(self, request: web.Request) -> web.Response:
- """API endpoint for getting paginated recipes"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Get query parameters with defaults
- page = int(request.query.get('page', '1'))
- page_size = int(request.query.get('page_size', '20'))
- sort_by = request.query.get('sort_by', 'date')
- search = request.query.get('search', None)
-
- # Get search options (renamed for better clarity)
- search_title = request.query.get('search_title', 'true').lower() == 'true'
- search_tags = request.query.get('search_tags', 'true').lower() == 'true'
- search_lora_name = request.query.get('search_lora_name', 'true').lower() == 'true'
- search_lora_model = request.query.get('search_lora_model', 'true').lower() == 'true'
-
- # Get filter parameters
- base_models = request.query.get('base_models', None)
- tags = request.query.get('tags', None)
-
- # New parameter: get LoRA hash filter
- lora_hash = request.query.get('lora_hash', None)
-
- # Parse filter parameters
- filters = {}
- if base_models:
- filters['base_model'] = base_models.split(',')
- if tags:
- filters['tags'] = tags.split(',')
-
- # Add search options to filters
- search_options = {
- 'title': search_title,
- 'tags': search_tags,
- 'lora_name': search_lora_name,
- 'lora_model': search_lora_model
- }
-
- # Get paginated data with the new lora_hash parameter
- result = await self.recipe_scanner.get_paginated_data(
- page=page,
- page_size=page_size,
- sort_by=sort_by,
- search=search,
- filters=filters,
- search_options=search_options,
- lora_hash=lora_hash
- )
-
- # Format the response data with static URLs for file paths
- for item in result['items']:
- # Always ensure file_url is set
- if 'file_path' in item:
- item['file_url'] = self._format_recipe_file_url(item['file_path'])
- else:
- item['file_url'] = '/loras_static/images/no-preview.png'
-
- # 确保 loras 数组存在
- if 'loras' not in item:
- item['loras'] = []
-
- # 确保有 base_model 字段
- if 'base_model' not in item:
- item['base_model'] = ""
-
- return web.json_response(result)
- except Exception as e:
- logger.error(f"Error retrieving recipes: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def get_recipe_detail(self, request: web.Request) -> web.Response:
- """Get detailed information about a specific recipe"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- recipe_id = request.match_info['recipe_id']
-
- # Use the new get_recipe_by_id method from recipe_scanner
- recipe = await self.recipe_scanner.get_recipe_by_id(recipe_id)
-
- if not recipe:
- return web.json_response({"error": "Recipe not found"}, status=404)
-
- return web.json_response(recipe)
- except Exception as e:
- logger.error(f"Error retrieving recipe details: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- def _format_recipe_file_url(self, file_path: str) -> str:
- """Format file path for recipe image as a URL"""
- try:
- # Return the file URL directly for the first lora root's preview
- recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/')
- if file_path.replace(os.sep, '/').startswith(recipes_dir):
- relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/')
- return f"/loras_static/root1/preview/{relative_path}"
-
- # If not in recipes dir, try to create a valid URL from the file path
- file_name = os.path.basename(file_path)
- return f"/loras_static/root1/preview/recipes/{file_name}"
- except Exception as e:
- logger.error(f"Error formatting recipe file URL: {e}", exc_info=True)
- return '/loras_static/images/no-preview.png' # Return default image on error
-
- def _format_recipe_data(self, recipe: Dict) -> Dict:
- """Format recipe data for API response"""
- formatted = {**recipe} # Copy all fields
-
- # Format file paths to URLs
- if 'file_path' in formatted:
- formatted['file_url'] = self._format_recipe_file_url(formatted['file_path'])
-
- # Format dates for display
- for date_field in ['created_date', 'modified']:
- if date_field in formatted:
- formatted[f"{date_field}_formatted"] = self._format_timestamp(formatted[date_field])
-
- return formatted
-
- def _format_timestamp(self, timestamp: float) -> str:
- """Format timestamp for display"""
- from datetime import datetime
- return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
-
- async def analyze_recipe_image(self, request: web.Request) -> web.Response:
- """Analyze an uploaded image or URL for recipe metadata"""
- temp_path = None
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Check if request contains multipart data (image) or JSON data (url)
- content_type = request.headers.get('Content-Type', '')
-
- is_url_mode = False
- metadata = None # Initialize metadata variable
-
- if 'multipart/form-data' in content_type:
- # Handle image upload
- reader = await request.multipart()
- field = await reader.next()
-
- if field.name != 'image':
- return web.json_response({
- "error": "No image field found",
- "loras": []
- }, status=400)
-
- # Create a temporary file to store the uploaded image
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
- while True:
- chunk = await field.read_chunk()
- if not chunk:
- break
- temp_file.write(chunk)
- temp_path = temp_file.name
-
- elif 'application/json' in content_type:
- # Handle URL input
- data = await request.json()
- url = data.get('url')
- is_url_mode = True
-
- if not url:
- return web.json_response({
- "error": "No URL provided",
- "loras": []
- }, status=400)
-
- # Check if this is a Civitai image URL
- import re
- civitai_image_match = re.match(r'https://civitai\.com/images/(\d+)', url)
-
- if civitai_image_match:
- # Extract image ID and fetch image info using get_image_info
- image_id = civitai_image_match.group(1)
- image_info = await self.civitai_client.get_image_info(image_id)
-
- if not image_info:
- return web.json_response({
- "error": "Failed to fetch image information from Civitai",
- "loras": []
- }, status=400)
-
- # Get image URL from response
- image_url = image_info.get('url')
- if not image_url:
- return web.json_response({
- "error": "No image URL found in Civitai response",
- "loras": []
- }, status=400)
-
- # Download image using unified downloader
- downloader = await get_downloader()
- # Create a temporary file to save the downloaded image
- with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
- temp_path = temp_file.name
-
- success, result = await downloader.download_file(
- image_url,
- temp_path,
- use_auth=False # Image downloads typically don't need auth
- )
-
- if not success:
- return web.json_response({
- "error": f"Failed to download image from URL: {result}",
- "loras": []
- }, status=400)
-
- # Use meta field from image_info as metadata
- if 'meta' in image_info:
- metadata = image_info['meta']
-
- # If metadata wasn't obtained from Civitai API, extract it from the image
- if metadata is None:
- # Extract metadata from the image using ExifUtils
- metadata = ExifUtils.extract_image_metadata(temp_path)
-
- # If no metadata found, return a more specific error
- if not metadata:
- result = {
- "error": "No metadata found in this image",
- "loras": [] # Return empty loras array to prevent client-side errors
- }
-
- # For URL mode, include the image data as base64
- if is_url_mode and temp_path:
- with open(temp_path, "rb") as image_file:
- result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
-
- return web.json_response(result, status=200)
-
- # Use the parser factory to get the appropriate parser
- parser = RecipeParserFactory.create_parser(metadata)
-
- if parser is None:
- result = {
- "error": "No parser found for this image",
- "loras": [] # Return empty loras array to prevent client-side errors
- }
-
- # For URL mode, include the image data as base64
- if is_url_mode and temp_path:
- with open(temp_path, "rb") as image_file:
- result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
-
- return web.json_response(result, status=200)
-
- # Parse the metadata
- result = await parser.parse_metadata(
- metadata,
- recipe_scanner=self.recipe_scanner
- )
-
- # For URL mode, include the image data as base64
- if is_url_mode and temp_path:
- with open(temp_path, "rb") as image_file:
- result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
-
- # Check for errors
- if "error" in result and not result.get("loras"):
- return web.json_response(result, status=200)
-
- # Calculate fingerprint from parsed loras
- from ..utils.utils import calculate_recipe_fingerprint
- fingerprint = calculate_recipe_fingerprint(result.get("loras", []))
-
- # Add fingerprint to result
- result["fingerprint"] = fingerprint
-
- # Find matching recipes with the same fingerprint
- matching_recipes = []
- if fingerprint:
- matching_recipes = await self.recipe_scanner.find_recipes_by_fingerprint(fingerprint)
-
- # Add matching recipes to result
- result["matching_recipes"] = matching_recipes
-
- return web.json_response(result)
-
- except Exception as e:
- logger.error(f"Error analyzing recipe image: {e}", exc_info=True)
- return web.json_response({
- "error": str(e),
- "loras": [] # Return empty loras array to prevent client-side errors
- }, status=500)
- finally:
- # Clean up the temporary file in the finally block
- if temp_path and os.path.exists(temp_path):
- try:
- os.unlink(temp_path)
- except Exception as e:
- logger.error(f"Error deleting temporary file: {e}")
-
- async def analyze_local_image(self, request: web.Request) -> web.Response:
- """Analyze a local image file for recipe metadata"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Get JSON data from request
- data = await request.json()
- file_path = data.get('path')
-
- if not file_path:
- return web.json_response({
- 'error': 'No file path provided',
- 'loras': []
- }, status=400)
-
- # Normalize file path for cross-platform compatibility
- file_path = os.path.normpath(file_path.strip('"').strip("'"))
-
- # Validate that the file exists
- if not os.path.isfile(file_path):
- return web.json_response({
- 'error': 'File not found',
- 'loras': []
- }, status=404)
-
- # Extract metadata from the image using ExifUtils
- metadata = ExifUtils.extract_image_metadata(file_path)
-
- # If no metadata found, return error
- if not metadata:
- # Get base64 image data
- with open(file_path, "rb") as image_file:
- image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
-
- return web.json_response({
- "error": "No metadata found in this image",
- "loras": [], # Return empty loras array to prevent client-side errors
- "image_base64": image_base64
- }, status=200)
-
- # Use the parser factory to get the appropriate parser
- parser = RecipeParserFactory.create_parser(metadata)
-
- if parser is None:
- # Get base64 image data
- with open(file_path, "rb") as image_file:
- image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
-
- return web.json_response({
- "error": "No parser found for this image",
- "loras": [], # Return empty loras array to prevent client-side errors
- "image_base64": image_base64
- }, status=200)
-
- # Parse the metadata
- result = await parser.parse_metadata(
- metadata,
- recipe_scanner=self.recipe_scanner
- )
-
- # Add base64 image data to result
- with open(file_path, "rb") as image_file:
- result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
-
- # Check for errors
- if "error" in result and not result.get("loras"):
- return web.json_response(result, status=200)
-
- # Calculate fingerprint from parsed loras
- from ..utils.utils import calculate_recipe_fingerprint
- fingerprint = calculate_recipe_fingerprint(result.get("loras", []))
-
- # Add fingerprint to result
- result["fingerprint"] = fingerprint
-
- # Find matching recipes with the same fingerprint
- matching_recipes = []
- if fingerprint:
- matching_recipes = await self.recipe_scanner.find_recipes_by_fingerprint(fingerprint)
-
- # Add matching recipes to result
- result["matching_recipes"] = matching_recipes
-
- return web.json_response(result)
-
- except Exception as e:
- logger.error(f"Error analyzing local image: {e}", exc_info=True)
- return web.json_response({
- 'error': str(e),
- 'loras': [] # Return empty loras array to prevent client-side errors
- }, status=500)
-
- async def save_recipe(self, request: web.Request) -> web.Response:
- """Save a recipe to the recipes folder"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- reader = await request.multipart()
-
- # Process form data
- image = None
- image_base64 = None
- image_url = None
- name = None
- tags = []
- metadata = None
-
- while True:
- field = await reader.next()
- if field is None:
- break
-
- if field.name == 'image':
- # Read image data
- image_data = b''
- while True:
- chunk = await field.read_chunk()
- if not chunk:
- break
- image_data += chunk
- image = image_data
-
- elif field.name == 'image_base64':
- # Get base64 image data
- image_base64 = await field.text()
-
- elif field.name == 'image_url':
- # Get image URL
- image_url = await field.text()
-
- elif field.name == 'name':
- name = await field.text()
-
- elif field.name == 'tags':
- tags_text = await field.text()
- try:
- tags = json.loads(tags_text)
- except:
- tags = []
-
- elif field.name == 'metadata':
- metadata_text = await field.text()
- try:
- metadata = json.loads(metadata_text)
- except:
- metadata = {}
-
- missing_fields = []
- if not name:
- missing_fields.append("name")
- if not metadata:
- missing_fields.append("metadata")
- if missing_fields:
- return web.json_response({"error": f"Missing required fields: {', '.join(missing_fields)}"}, status=400)
-
- # Handle different image sources
- if not image:
- if image_base64:
- # Convert base64 to binary
- try:
- # Remove potential data URL prefix
- if ',' in image_base64:
- image_base64 = image_base64.split(',', 1)[1]
- image = base64.b64decode(image_base64)
- except Exception as e:
- return web.json_response({"error": f"Invalid base64 image data: {str(e)}"}, status=400)
- else:
- return web.json_response({"error": "No image data provided"}, status=400)
-
- # Create recipes directory if it doesn't exist
- recipes_dir = self.recipe_scanner.recipes_dir
- os.makedirs(recipes_dir, exist_ok=True)
-
- # Generate UUID for the recipe
- import uuid
- recipe_id = str(uuid.uuid4())
-
- # Optimize the image (resize and convert to WebP)
- optimized_image, extension = ExifUtils.optimize_image(
- image_data=image,
- target_width=CARD_PREVIEW_WIDTH,
- format='webp',
- quality=85,
- preserve_metadata=True
- )
-
- # Save the optimized image
- image_filename = f"{recipe_id}{extension}"
- image_path = os.path.join(recipes_dir, image_filename)
- with open(image_path, 'wb') as f:
- f.write(optimized_image)
-
- # Create the recipe data structure
- current_time = time.time()
-
- # Format loras data according to the recipe.json format
- loras_data = []
- for lora in metadata.get("loras", []):
- # Modified: Always include deleted LoRAs in the recipe metadata
- # Even if they're marked to be excluded, we still keep their identifying information
- # The exclude flag will only be used to determine if they should be included in recipe syntax
-
- # Convert frontend lora format to recipe format
- lora_entry = {
- "file_name": lora.get("file_name", "") or os.path.splitext(os.path.basename(lora.get("localPath", "")))[0] if lora.get("localPath") else "",
- "hash": lora.get("hash", "").lower() if lora.get("hash") else "",
- "strength": float(lora.get("weight", 1.0)),
- "modelVersionId": lora.get("id", 0),
- "modelName": lora.get("name", ""),
- "modelVersionName": lora.get("version", ""),
- "isDeleted": lora.get("isDeleted", False), # Preserve deletion status in saved recipe
- "exclude": lora.get("exclude", False) # Add exclude flag to the recipe
- }
- loras_data.append(lora_entry)
-
- # Format gen_params according to the recipe.json format
- gen_params = metadata.get("gen_params", {})
- if not gen_params and "raw_metadata" in metadata:
- # Extract from raw metadata if available
- raw_metadata = metadata.get("raw_metadata", {})
- gen_params = {
- "prompt": raw_metadata.get("prompt", ""),
- "negative_prompt": raw_metadata.get("negative_prompt", ""),
- "checkpoint": raw_metadata.get("checkpoint", {}),
- "steps": raw_metadata.get("steps", ""),
- "sampler": raw_metadata.get("sampler", ""),
- "cfg_scale": raw_metadata.get("cfg_scale", ""),
- "seed": raw_metadata.get("seed", ""),
- "size": raw_metadata.get("size", ""),
- "clip_skip": raw_metadata.get("clip_skip", "")
- }
-
- # Calculate recipe fingerprint
- from ..utils.utils import calculate_recipe_fingerprint
- fingerprint = calculate_recipe_fingerprint(loras_data)
-
- # Create the recipe data structure
- recipe_data = {
- "id": recipe_id,
- "file_path": image_path,
- "title": name,
- "modified": current_time,
- "created_date": current_time,
- "base_model": metadata.get("base_model", ""),
- "loras": loras_data,
- "gen_params": gen_params,
- "fingerprint": fingerprint
- }
-
- # Add tags if provided
- if tags:
- recipe_data["tags"] = tags
-
- # Add source_path if provided in metadata
- if metadata.get("source_path"):
- recipe_data["source_path"] = metadata.get("source_path")
-
- # Save the recipe JSON
- json_filename = f"{recipe_id}.recipe.json"
- json_path = os.path.join(recipes_dir, json_filename)
- with open(json_path, 'w', encoding='utf-8') as f:
- json.dump(recipe_data, f, indent=4, ensure_ascii=False)
-
- # Add recipe metadata to the image
- ExifUtils.append_recipe_metadata(image_path, recipe_data)
-
- # Check for duplicates
- matching_recipes = []
- if fingerprint:
- matching_recipes = await self.recipe_scanner.find_recipes_by_fingerprint(fingerprint)
- # Remove current recipe from matches
- if recipe_id in matching_recipes:
- matching_recipes.remove(recipe_id)
-
- # Simplified cache update approach
- # Instead of trying to update the cache directly, just set it to None
- # to force a refresh on the next get_cached_data call
- if self.recipe_scanner._cache is not None:
- # Add the recipe to the raw data if the cache exists
- # This is a simple direct update without locks or timeouts
- self.recipe_scanner._cache.raw_data.append(recipe_data)
- # Schedule a background task to resort the cache
- asyncio.create_task(self.recipe_scanner._cache.resort())
- logger.info(f"Added recipe {recipe_id} to cache")
-
- return web.json_response({
- 'success': True,
- 'recipe_id': recipe_id,
- 'image_path': image_path,
- 'json_path': json_path,
- 'matching_recipes': matching_recipes
- })
-
- except Exception as e:
- logger.error(f"Error saving recipe: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def delete_recipe(self, request: web.Request) -> web.Response:
- """Delete a recipe by ID"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- recipe_id = request.match_info['recipe_id']
-
- # Get recipes directory
- recipes_dir = self.recipe_scanner.recipes_dir
- if not recipes_dir or not os.path.exists(recipes_dir):
- return web.json_response({"error": "Recipes directory not found"}, status=404)
-
- # Find recipe JSON file
- recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
- if not os.path.exists(recipe_json_path):
- return web.json_response({"error": "Recipe not found"}, status=404)
-
- # Load recipe data to get image path
- with open(recipe_json_path, 'r', encoding='utf-8') as f:
- recipe_data = json.load(f)
-
- # Get image path
- image_path = recipe_data.get('file_path')
-
- # Delete recipe JSON file
- os.remove(recipe_json_path)
- logger.info(f"Deleted recipe JSON file: {recipe_json_path}")
-
- # Delete recipe image if it exists
- if image_path and os.path.exists(image_path):
- os.remove(image_path)
- logger.info(f"Deleted recipe image: {image_path}")
-
- # Simplified cache update approach
- if self.recipe_scanner._cache is not None:
- # Remove the recipe from raw_data if it exists
- self.recipe_scanner._cache.raw_data = [
- r for r in self.recipe_scanner._cache.raw_data
- if str(r.get('id', '')) != recipe_id
- ]
- # Schedule a background task to resort the cache
- asyncio.create_task(self.recipe_scanner._cache.resort())
- logger.info(f"Removed recipe {recipe_id} from cache")
-
- return web.json_response({"success": True, "message": "Recipe deleted successfully"})
- except Exception as e:
- logger.error(f"Error deleting recipe: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def get_top_tags(self, request: web.Request) -> web.Response:
- """Get top tags used in recipes"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Get limit parameter with default
- limit = int(request.query.get('limit', '20'))
-
- # Get all recipes from cache
- cache = await self.recipe_scanner.get_cached_data()
-
- # Count tag occurrences
- tag_counts = {}
- for recipe in cache.raw_data:
- if 'tags' in recipe and recipe['tags']:
- for tag in recipe['tags']:
- tag_counts[tag] = tag_counts.get(tag, 0) + 1
-
- # Sort tags by count and limit results
- sorted_tags = [{'tag': tag, 'count': count} for tag, count in tag_counts.items()]
- sorted_tags.sort(key=lambda x: x['count'], reverse=True)
- top_tags = sorted_tags[:limit]
-
- return web.json_response({
- 'success': True,
- 'tags': top_tags
- })
- except Exception as e:
- logger.error(f"Error retrieving top tags: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def get_base_models(self, request: web.Request) -> web.Response:
- """Get base models used in recipes"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Get all recipes from cache
- cache = await self.recipe_scanner.get_cached_data()
-
- # Count base model occurrences
- base_model_counts = {}
- for recipe in cache.raw_data:
- if 'base_model' in recipe and recipe['base_model']:
- base_model = recipe['base_model']
- base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
-
- # Sort base models by count
- sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()]
- sorted_models.sort(key=lambda x: x['count'], reverse=True)
-
- return web.json_response({
- 'success': True,
- 'base_models': sorted_models
- })
- except Exception as e:
- logger.error(f"Error retrieving base models: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)}
- , status=500)
-
- async def share_recipe(self, request: web.Request) -> web.Response:
- """Process a recipe image for sharing by adding metadata to EXIF"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- recipe_id = request.match_info['recipe_id']
-
- # Get all recipes from cache
- cache = await self.recipe_scanner.get_cached_data()
-
- # Find the specific recipe
- recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None)
-
- if not recipe:
- return web.json_response({"error": "Recipe not found"}, status=404)
-
- # Get the image path
- image_path = recipe.get('file_path')
- if not image_path or not os.path.exists(image_path):
- return web.json_response({"error": "Recipe image not found"}, status=404)
-
- # Create a temporary copy of the image to modify
- import tempfile
- import shutil
-
- # Create temp file with same extension
- ext = os.path.splitext(image_path)[1]
- with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as temp_file:
- temp_path = temp_file.name
-
- # Copy the original image to temp file
- shutil.copy2(image_path, temp_path)
- processed_path = temp_path
-
- # Create a URL for the processed image
- # Use a timestamp to prevent caching
- timestamp = int(time.time())
- url_path = f"/api/recipe/{recipe_id}/share/download?t={timestamp}"
-
- # Store the temp path in a dictionary to serve later
- if not hasattr(self, '_shared_recipes'):
- self._shared_recipes = {}
-
- self._shared_recipes[recipe_id] = {
- 'path': processed_path,
- 'timestamp': timestamp,
- 'expires': time.time() + 300 # Expire after 5 minutes
- }
-
- # Clean up old entries
- self._cleanup_shared_recipes()
-
- return web.json_response({
- 'success': True,
- 'download_url': url_path,
- 'filename': f"recipe_{recipe.get('title', '').replace(' ', '_').lower()}{ext}"
- })
- except Exception as e:
- logger.error(f"Error sharing recipe: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def download_shared_recipe(self, request: web.Request) -> web.Response:
- """Serve a processed recipe image for download"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- recipe_id = request.match_info['recipe_id']
-
- # Check if we have this shared recipe
- if not hasattr(self, '_shared_recipes') or recipe_id not in self._shared_recipes:
- return web.json_response({"error": "Shared recipe not found or expired"}, status=404)
-
- shared_info = self._shared_recipes[recipe_id]
- file_path = shared_info['path']
-
- if not os.path.exists(file_path):
- return web.json_response({"error": "Shared recipe file not found"}, status=404)
-
- # Get recipe to determine filename
- cache = await self.recipe_scanner.get_cached_data()
- recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None)
-
- # Set filename for download
- filename = f"recipe_{recipe.get('title', '').replace(' ', '_').lower() if recipe else recipe_id}"
- ext = os.path.splitext(file_path)[1]
- download_filename = f"{filename}{ext}"
-
- # Serve the file
- return web.FileResponse(
- file_path,
- headers={
- 'Content-Disposition': f'attachment; filename="{download_filename}"'
- }
- )
- except Exception as e:
- logger.error(f"Error downloading shared recipe: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- def _cleanup_shared_recipes(self):
- """Clean up expired shared recipes"""
- if not hasattr(self, '_shared_recipes'):
- return
-
- current_time = time.time()
- expired_ids = [rid for rid, info in self._shared_recipes.items()
- if current_time > info.get('expires', 0)]
-
- for rid in expired_ids:
- try:
- # Delete the temporary file
- file_path = self._shared_recipes[rid]['path']
- if os.path.exists(file_path):
- os.unlink(file_path)
-
- # Remove from dictionary
- del self._shared_recipes[rid]
- except Exception as e:
- logger.error(f"Error cleaning up shared recipe {rid}: {e}")
-
- async def save_recipe_from_widget(self, request: web.Request) -> web.Response:
- """Save a recipe from the LoRAs widget"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Get metadata using the metadata collector instead of workflow parsing
- raw_metadata = get_metadata()
- metadata_dict = MetadataProcessor.to_dict(raw_metadata)
-
- # Check if we have valid metadata
- if not metadata_dict:
- return web.json_response({"error": "No generation metadata found"}, status=400)
-
- # Get the most recent image from metadata registry instead of temp directory
- if not standalone_mode:
- metadata_registry = MetadataRegistry()
- latest_image = metadata_registry.get_first_decoded_image()
- else:
- latest_image = None
-
- if latest_image is None:
- return web.json_response({"error": "No recent images found to use for recipe. Try generating an image first."}, status=400)
-
- # Convert the image data to bytes - handle tuple and tensor cases
- logger.debug(f"Image type: {type(latest_image)}")
-
- try:
- # Handle the tuple case first
- if isinstance(latest_image, tuple):
- # Extract the tensor from the tuple
- if len(latest_image) > 0:
- tensor_image = latest_image[0]
- else:
- return web.json_response({"error": "Empty image tuple received"}, status=400)
- else:
- tensor_image = latest_image
-
- # Get the shape info for debugging
- if hasattr(tensor_image, 'shape'):
- shape_info = tensor_image.shape
- logger.debug(f"Tensor shape: {shape_info}, dtype: {tensor_image.dtype}")
-
- import torch
-
- # Convert tensor to numpy array
- if isinstance(tensor_image, torch.Tensor):
- image_np = tensor_image.cpu().numpy()
- else:
- image_np = np.array(tensor_image)
-
- # Handle different tensor shapes
- # Case: (1, 1, H, W, 3) or (1, H, W, 3) - batch or multi-batch
- if len(image_np.shape) > 3:
- # Remove batch dimensions until we get to (H, W, 3)
- while len(image_np.shape) > 3:
- image_np = image_np[0]
-
- # If values are in [0, 1] range, convert to [0, 255]
- if image_np.dtype == np.float32 or image_np.dtype == np.float64:
- if image_np.max() <= 1.0:
- image_np = (image_np * 255).astype(np.uint8)
-
- # Ensure image is in the right format (HWC with RGB channels)
- if len(image_np.shape) == 3 and image_np.shape[2] == 3:
- pil_image = Image.fromarray(image_np)
- img_byte_arr = io.BytesIO()
- pil_image.save(img_byte_arr, format='PNG')
- image = img_byte_arr.getvalue()
- else:
- return web.json_response({"error": f"Cannot handle this data shape: {image_np.shape}, {image_np.dtype}"}, status=400)
- except Exception as e:
- logger.error(f"Error processing image data: {str(e)}", exc_info=True)
- return web.json_response({"error": f"Error processing image: {str(e)}"}, status=400)
-
- # Get the lora stack from the metadata
- lora_stack = metadata_dict.get("loras", "")
-
- # Parse the lora stack format: " ..."
- import re
- lora_matches = re.findall(r']+)>', lora_stack)
-
- # Check if any loras were found
- if not lora_matches:
- return web.json_response({"error": "No LoRAs found in the generation metadata"}, status=400)
-
- # Generate recipe name from the first 3 loras (or less if fewer are available)
- loras_for_name = lora_matches[:3] # Take at most 3 loras for the name
-
- recipe_name_parts = []
- for lora_name, lora_strength in loras_for_name:
- # Get the basename without path or extension
- basename = os.path.basename(lora_name)
- basename = os.path.splitext(basename)[0]
- recipe_name_parts.append(f"{basename}:{lora_strength}")
-
- recipe_name = " ".join(recipe_name_parts)
-
- # Create recipes directory if it doesn't exist
- recipes_dir = self.recipe_scanner.recipes_dir
- os.makedirs(recipes_dir, exist_ok=True)
-
- # Generate UUID for the recipe
- import uuid
- recipe_id = str(uuid.uuid4())
-
- # Optimize the image (resize and convert to WebP)
- optimized_image, extension = ExifUtils.optimize_image(
- image_data=image,
- target_width=CARD_PREVIEW_WIDTH,
- format='webp',
- quality=85,
- preserve_metadata=True
- )
-
- # Save the optimized image
- image_filename = f"{recipe_id}{extension}"
- image_path = os.path.join(recipes_dir, image_filename)
- with open(image_path, 'wb') as f:
- f.write(optimized_image)
-
- # Format loras data from the lora stack
- loras_data = []
-
- for lora_name, lora_strength in lora_matches:
- try:
- # Get lora info from scanner
- lora_info = await self.recipe_scanner._lora_scanner.get_model_info_by_name(lora_name)
-
- # Create lora entry
- lora_entry = {
- "file_name": lora_name,
- "hash": lora_info.get("sha256", "").lower() if lora_info else "",
- "strength": float(lora_strength),
- "modelVersionId": lora_info.get("civitai", {}).get("id", 0) if lora_info else 0,
- "modelName": lora_info.get("civitai", {}).get("model", {}).get("name", "") if lora_info else lora_name,
- "modelVersionName": lora_info.get("civitai", {}).get("name", "") if lora_info else "",
- "isDeleted": False
- }
- loras_data.append(lora_entry)
- except Exception as e:
- logger.warning(f"Error processing LoRA {lora_name}: {e}")
-
- # Get base model from lora scanner for the available loras
- base_model_counts = {}
- for lora in loras_data:
- lora_info = await self.recipe_scanner._lora_scanner.get_model_info_by_name(lora.get("file_name", ""))
- if lora_info and "base_model" in lora_info:
- base_model = lora_info["base_model"]
- base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
-
- # Get most common base model
- most_common_base_model = ""
- if base_model_counts:
- most_common_base_model = max(base_model_counts.items(), key=lambda x: x[1])[0]
-
- # Create the recipe data structure
- recipe_data = {
- "id": recipe_id,
- "file_path": image_path,
- "title": recipe_name, # Use generated recipe name
- "modified": time.time(),
- "created_date": time.time(),
- "base_model": most_common_base_model,
- "loras": loras_data,
- "checkpoint": metadata_dict.get("checkpoint", ""),
- "gen_params": {key: value for key, value in metadata_dict.items()
- if key not in ['checkpoint', 'loras']},
- "loras_stack": lora_stack # Include the original lora stack string
- }
-
- # Save the recipe JSON
- json_filename = f"{recipe_id}.recipe.json"
- json_path = os.path.join(recipes_dir, json_filename)
- with open(json_path, 'w', encoding='utf-8') as f:
- json.dump(recipe_data, f, indent=4, ensure_ascii=False)
-
- # Add recipe metadata to the image
- ExifUtils.append_recipe_metadata(image_path, recipe_data)
-
- # Update cache
- if self.recipe_scanner._cache is not None:
- # Add the recipe to the raw data if the cache exists
- self.recipe_scanner._cache.raw_data.append(recipe_data)
- # Schedule a background task to resort the cache
- asyncio.create_task(self.recipe_scanner._cache.resort())
- logger.info(f"Added recipe {recipe_id} to cache")
-
- return web.json_response({
- 'success': True,
- 'recipe_id': recipe_id,
- 'image_path': image_path,
- 'json_path': json_path,
- 'recipe_name': recipe_name # Include the generated recipe name in the response
- })
-
- except Exception as e:
- logger.error(f"Error saving recipe from widget: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def get_recipe_syntax(self, request: web.Request) -> web.Response:
- """Generate recipe syntax for LoRAs in the recipe, looking up proper file names using hash_index"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- recipe_id = request.match_info['recipe_id']
-
- # Get all recipes from cache
- cache = await self.recipe_scanner.get_cached_data()
-
- # Find the specific recipe
- recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None)
-
- if not recipe:
- return web.json_response({"error": "Recipe not found"}, status=404)
-
- # Get the loras from the recipe
- loras = recipe.get('loras', [])
-
- if not loras:
- return web.json_response({"error": "No LoRAs found in this recipe"}, status=400)
-
- # Generate recipe syntax for all LoRAs that:
- # 1. Are in the library (not deleted) OR
- # 2. Are deleted but not marked for exclusion
- lora_syntax_parts = []
-
- # Access the hash_index from lora_scanner
- hash_index = self.recipe_scanner._lora_scanner._hash_index
-
- for lora in loras:
- # Skip loras that are deleted AND marked for exclusion
- if lora.get("isDeleted", False):
- continue
-
- if not self.recipe_scanner._lora_scanner.has_hash(lora.get("hash", "")):
- continue
-
- # Get the strength
- strength = lora.get("strength", 1.0)
-
- # Try to find the actual file name for this lora
- file_name = None
- hash_value = lora.get("hash", "").lower()
-
- if hash_value and hasattr(hash_index, "_hash_to_path"):
- # Look up the file path from the hash
- file_path = hash_index._hash_to_path.get(hash_value)
-
- if file_path:
- # Extract the file name without extension from the path
- file_name = os.path.splitext(os.path.basename(file_path))[0]
-
- # If hash lookup failed, fall back to modelVersionId lookup
- if not file_name and lora.get("modelVersionId"):
- # Search for files with matching modelVersionId
- all_loras = await self.recipe_scanner._lora_scanner.get_cached_data()
- for cached_lora in all_loras.raw_data:
- if not cached_lora.get("civitai"):
- continue
- if cached_lora.get("civitai", {}).get("id") == lora.get("modelVersionId"):
- file_name = os.path.splitext(os.path.basename(cached_lora["path"]))[0]
- break
-
- # If all lookups failed, use the file_name from the recipe
- if not file_name:
- file_name = lora.get("file_name", "unknown-lora")
-
- # Add to syntax parts
- lora_syntax_parts.append(f"")
-
- # Join the LoRA syntax parts
- lora_syntax = " ".join(lora_syntax_parts)
-
- return web.json_response({
- 'success': True,
- 'syntax': lora_syntax
- })
- except Exception as e:
- logger.error(f"Error generating recipe syntax: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def update_recipe(self, request: web.Request) -> web.Response:
- """Update recipe metadata (name and tags)"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- recipe_id = request.match_info['recipe_id']
- data = await request.json()
-
- # Validate required fields
- if 'title' not in data and 'tags' not in data and 'source_path' not in data and 'preview_nsfw_level' not in data:
- return web.json_response({
- "error": "At least one field to update must be provided (title or tags or source_path or preview_nsfw_level)"
- }, status=400)
-
- # Use the recipe scanner's update method
- success = await self.recipe_scanner.update_recipe_metadata(recipe_id, data)
-
- if not success:
- return web.json_response({"error": "Recipe not found or update failed"}, status=404)
-
- return web.json_response({
- "success": True,
- "recipe_id": recipe_id,
- "updates": data
- })
- except Exception as e:
- logger.error(f"Error updating recipe: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def reconnect_lora(self, request: web.Request) -> web.Response:
- """Reconnect a deleted LoRA in a recipe to a local LoRA file"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Parse request data
- data = await request.json()
-
- # Validate required fields
- required_fields = ['recipe_id', 'lora_index', 'target_name']
- for field in required_fields:
- if field not in data:
- return web.json_response({
- "error": f"Missing required field: {field}"
- }, status=400)
-
- recipe_id = data['recipe_id']
- lora_index = int(data['lora_index'])
- target_name = data['target_name']
-
- # Get recipe scanner
- scanner = self.recipe_scanner
- lora_scanner = scanner._lora_scanner
-
- # Check if recipe exists
- recipe_path = os.path.join(scanner.recipes_dir, f"{recipe_id}.recipe.json")
- if not os.path.exists(recipe_path):
- return web.json_response({"error": "Recipe not found"}, status=404)
-
- # Find target LoRA by name
- target_lora = await lora_scanner.get_model_info_by_name(target_name)
- if not target_lora:
- return web.json_response({"error": f"Local LoRA not found with name: {target_name}"}, status=404)
-
- # Load recipe data
- with open(recipe_path, 'r', encoding='utf-8') as f:
- recipe_data = json.load(f)
-
- lora = recipe_data.get("loras", [])[lora_index] if lora_index < len(recipe_data.get('loras', [])) else None
-
- if lora is None:
- return web.json_response({"error": "LoRA index out of range in recipe"}, status=404)
-
- # Update LoRA data
- lora['isDeleted'] = False
- lora['exclude'] = False
- lora['file_name'] = target_name
-
- # Update with information from the target LoRA
- if 'sha256' in target_lora:
- lora['hash'] = target_lora['sha256'].lower()
- if target_lora.get("civitai"):
- lora['modelName'] = target_lora['civitai']['model']['name']
- lora['modelVersionName'] = target_lora['civitai']['name']
- lora['modelVersionId'] = target_lora['civitai']['id']
-
- updated_lora = dict(lora) # Make a copy for response
-
- # Recalculate recipe fingerprint after updating LoRA
- from ..utils.utils import calculate_recipe_fingerprint
- recipe_data['fingerprint'] = calculate_recipe_fingerprint(recipe_data.get('loras', []))
-
- # Save updated recipe
- with open(recipe_path, 'w', encoding='utf-8') as f:
- json.dump(recipe_data, f, indent=4, ensure_ascii=False)
-
- updated_lora['inLibrary'] = True
- updated_lora['preview_url'] = config.get_preview_static_url(target_lora['preview_url'])
- updated_lora['localPath'] = target_lora['file_path']
-
- # Update in cache if it exists
- if scanner._cache is not None:
- for cache_item in scanner._cache.raw_data:
- if cache_item.get('id') == recipe_id:
- # Replace loras array with updated version
- cache_item['loras'] = recipe_data['loras']
- # Update fingerprint in cache
- cache_item['fingerprint'] = recipe_data['fingerprint']
-
- # Resort the cache
- asyncio.create_task(scanner._cache.resort())
- break
-
- # Update EXIF metadata if image exists
- image_path = recipe_data.get('file_path')
- if image_path and os.path.exists(image_path):
- from ..utils.exif_utils import ExifUtils
- ExifUtils.append_recipe_metadata(image_path, recipe_data)
-
- # Find other recipes with the same fingerprint
- matching_recipes = []
- if 'fingerprint' in recipe_data:
- matching_recipes = await scanner.find_recipes_by_fingerprint(recipe_data['fingerprint'])
- # Remove current recipe from matches
- if recipe_id in matching_recipes:
- matching_recipes.remove(recipe_id)
-
- return web.json_response({
- "success": True,
- "recipe_id": recipe_id,
- "updated_lora": updated_lora,
- "matching_recipes": matching_recipes
- })
-
- except Exception as e:
- logger.error(f"Error reconnecting LoRA: {e}", exc_info=True)
- return web.json_response({"error": str(e)}, status=500)
-
- async def get_recipes_for_lora(self, request: web.Request) -> web.Response:
- """Get recipes that use a specific Lora"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- lora_hash = request.query.get('hash')
-
- # Hash is required
- if not lora_hash:
- return web.json_response({'success': False, 'error': 'Lora hash is required'}, status=400)
-
- # Log the search parameters
- logger.debug(f"Getting recipes for Lora by hash: {lora_hash}")
-
- # Get all recipes from cache
- cache = await self.recipe_scanner.get_cached_data()
-
- # Filter recipes that use this Lora by hash
- matching_recipes = []
- for recipe in cache.raw_data:
- # Check if any of the recipe's loras match this hash
- loras = recipe.get('loras', [])
- for lora in loras:
- if lora.get('hash', '').lower() == lora_hash.lower():
- matching_recipes.append(recipe)
- break # No need to check other loras in this recipe
-
- # Process the recipes similar to get_paginated_data to ensure all needed data is available
- for recipe in matching_recipes:
- # Add inLibrary information for each lora
- if 'loras' in recipe:
- for lora in recipe['loras']:
- if 'hash' in lora and lora['hash']:
- lora['inLibrary'] = self.recipe_scanner._lora_scanner.has_hash(lora['hash'].lower())
- lora['preview_url'] = self.recipe_scanner._lora_scanner.get_preview_url_by_hash(lora['hash'].lower())
- lora['localPath'] = self.recipe_scanner._lora_scanner.get_path_by_hash(lora['hash'].lower())
-
- # Ensure file_url is set (needed by frontend)
- if 'file_path' in recipe:
- recipe['file_url'] = self._format_recipe_file_url(recipe['file_path'])
- else:
- recipe['file_url'] = '/loras_static/images/no-preview.png'
-
- return web.json_response({'success': True, 'recipes': matching_recipes})
- except Exception as e:
- logger.error(f"Error getting recipes for Lora: {str(e)}")
- return web.json_response({'success': False, 'error': str(e)}, status=500)
-
- async def scan_recipes(self, request: web.Request) -> web.Response:
- """API endpoint for scanning and rebuilding the recipe cache"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Force refresh the recipe cache
- logger.info("Manually triggering recipe cache rebuild")
- await self.recipe_scanner.get_cached_data(force_refresh=True)
-
- return web.json_response({
- 'success': True,
- 'message': 'Recipe cache refreshed successfully'
- })
- except Exception as e:
- logger.error(f"Error refreshing recipe cache: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def find_duplicates(self, request: web.Request) -> web.Response:
- """Find all duplicate recipes based on fingerprints"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Get all duplicate recipes
- duplicate_groups = await self.recipe_scanner.find_all_duplicate_recipes()
-
- # Create response data with additional recipe information
- response_data = []
-
- for fingerprint, recipe_ids in duplicate_groups.items():
- # Skip groups with only one recipe (not duplicates)
- if len(recipe_ids) <= 1:
- continue
-
- # Get recipe details for each recipe in the group
- recipes = []
- for recipe_id in recipe_ids:
- recipe = await self.recipe_scanner.get_recipe_by_id(recipe_id)
- if recipe:
- # Add only needed fields to keep response size manageable
- recipes.append({
- 'id': recipe.get('id'),
- 'title': recipe.get('title'),
- 'file_url': recipe.get('file_url') or self._format_recipe_file_url(recipe.get('file_path', '')),
- 'modified': recipe.get('modified'),
- 'created_date': recipe.get('created_date'),
- 'lora_count': len(recipe.get('loras', [])),
- })
-
- # Only include groups with at least 2 valid recipes
- if len(recipes) >= 2:
- # Sort recipes by modified date (newest first)
- recipes.sort(key=lambda x: x.get('modified', 0), reverse=True)
-
- response_data.append({
- 'fingerprint': fingerprint,
- 'count': len(recipes),
- 'recipes': recipes
- })
-
- # Sort groups by count (highest first)
- response_data.sort(key=lambda x: x['count'], reverse=True)
-
- return web.json_response({
- 'success': True,
- 'duplicate_groups': response_data
- })
-
- except Exception as e:
- logger.error(f"Error finding duplicate recipes: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- async def bulk_delete(self, request: web.Request) -> web.Response:
- """Delete multiple recipes by ID"""
- try:
- # Ensure services are initialized
- await self.init_services()
-
- # Parse request data
- data = await request.json()
- recipe_ids = data.get('recipe_ids', [])
-
- if not recipe_ids:
- return web.json_response({
- 'success': False,
- 'error': 'No recipe IDs provided'
- }, status=400)
-
- # Get recipes directory
- recipes_dir = self.recipe_scanner.recipes_dir
- if not recipes_dir or not os.path.exists(recipes_dir):
- return web.json_response({
- 'success': False,
- 'error': 'Recipes directory not found'
- }, status=404)
-
- # Track deleted and failed recipes
- deleted_recipes = []
- failed_recipes = []
-
- # Process each recipe ID
- for recipe_id in recipe_ids:
- # Find recipe JSON file
- recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
-
- if not os.path.exists(recipe_json_path):
- failed_recipes.append({
- 'id': recipe_id,
- 'reason': 'Recipe not found'
- })
- continue
-
- try:
- # Load recipe data to get image path
- with open(recipe_json_path, 'r', encoding='utf-8') as f:
- recipe_data = json.load(f)
-
- # Get image path
- image_path = recipe_data.get('file_path')
-
- # Delete recipe JSON file
- os.remove(recipe_json_path)
-
- # Delete recipe image if it exists
- if image_path and os.path.exists(image_path):
- os.remove(image_path)
-
- deleted_recipes.append(recipe_id)
-
- except Exception as e:
- failed_recipes.append({
- 'id': recipe_id,
- 'reason': str(e)
- })
-
- # Update cache if any recipes were deleted
- if deleted_recipes and self.recipe_scanner._cache is not None:
- # Remove deleted recipes from raw_data
- self.recipe_scanner._cache.raw_data = [
- r for r in self.recipe_scanner._cache.raw_data
- if r.get('id') not in deleted_recipes
- ]
- # Resort the cache
- asyncio.create_task(self.recipe_scanner._cache.resort())
- logger.info(f"Removed {len(deleted_recipes)} recipes from cache")
-
- return web.json_response({
- 'success': True,
- 'deleted': deleted_recipes,
- 'failed': failed_recipes,
- 'total_deleted': len(deleted_recipes),
- 'total_failed': len(failed_recipes)
- })
-
- except Exception as e:
- logger.error(f"Error performing bulk delete: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
+ registrar = RecipeRouteRegistrar(app)
+ registrar.register_routes(routes.to_route_mapping())
+ routes.register_startup_hooks(app)
diff --git a/py/routes/stats_routes.py b/py/routes/stats_routes.py
index b61762d1..f8c0aaa4 100644
--- a/py/routes/stats_routes.py
+++ b/py/routes/stats_routes.py
@@ -507,12 +507,12 @@ class StatsRoutes:
app.router.add_get('/statistics', self.handle_stats_page)
# Register API routes
- app.router.add_get('/api/stats/collection-overview', self.get_collection_overview)
- app.router.add_get('/api/stats/usage-analytics', self.get_usage_analytics)
- app.router.add_get('/api/stats/base-model-distribution', self.get_base_model_distribution)
- app.router.add_get('/api/stats/tag-analytics', self.get_tag_analytics)
- app.router.add_get('/api/stats/storage-analytics', self.get_storage_analytics)
- app.router.add_get('/api/stats/insights', self.get_insights)
+ app.router.add_get('/api/lm/stats/collection-overview', self.get_collection_overview)
+ app.router.add_get('/api/lm/stats/usage-analytics', self.get_usage_analytics)
+ app.router.add_get('/api/lm/stats/base-model-distribution', self.get_base_model_distribution)
+ app.router.add_get('/api/lm/stats/tag-analytics', self.get_tag_analytics)
+ app.router.add_get('/api/lm/stats/storage-analytics', self.get_storage_analytics)
+ app.router.add_get('/api/lm/stats/insights', self.get_insights)
async def _on_startup(self, app):
"""Initialize services when the app starts"""
diff --git a/py/routes/update_routes.py b/py/routes/update_routes.py
index d139ce77..a25e085e 100644
--- a/py/routes/update_routes.py
+++ b/py/routes/update_routes.py
@@ -7,7 +7,7 @@ import shutil
import tempfile
from aiohttp import web
from typing import Dict, List
-from ..services.downloader import get_downloader, Downloader
+from ..services.downloader import get_downloader
logger = logging.getLogger(__name__)
@@ -17,9 +17,9 @@ class UpdateRoutes:
@staticmethod
def setup_routes(app):
"""Register update check routes"""
- app.router.add_get('/api/check-updates', UpdateRoutes.check_updates)
- app.router.add_get('/api/version-info', UpdateRoutes.get_version_info)
- app.router.add_post('/api/perform-update', UpdateRoutes.perform_update)
+ app.router.add_get('/api/lm/check-updates', UpdateRoutes.check_updates)
+ app.router.add_get('/api/lm/version-info', UpdateRoutes.get_version_info)
+ app.router.add_post('/api/lm/perform-update', UpdateRoutes.perform_update)
@staticmethod
async def check_updates(request):
@@ -154,7 +154,7 @@ class UpdateRoutes:
async def _download_and_replace_zip(plugin_root: str) -> tuple[bool, str]:
"""
Download latest release ZIP from GitHub and replace plugin files.
- Skips settings.json. Writes extracted file list to .tracking.
+ Skips settings.json and civitai folder. Writes extracted file list to .tracking.
"""
repo_owner = "willmiao"
repo_name = "ComfyUI-Lora-Manager"
@@ -193,7 +193,8 @@ class UpdateRoutes:
zip_path = tmp_zip_path
- UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json'])
+ # Skip both settings.json and civitai folder
+ UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json', 'civitai'])
# Extract ZIP to temp dir
with tempfile.TemporaryDirectory() as tmp_dir:
@@ -202,17 +203,17 @@ class UpdateRoutes:
# Find extracted folder (GitHub ZIP contains a root folder)
extracted_root = next(os.scandir(tmp_dir)).path
- # Copy files, skipping settings.json
+ # Copy files, skipping settings.json and civitai folder
for item in os.listdir(extracted_root):
+ if item == 'settings.json' or item == 'civitai':
+ continue
src = os.path.join(extracted_root, item)
dst = os.path.join(plugin_root, item)
if os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
- shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json'))
+ shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json', 'civitai'))
else:
- if item == 'settings.json':
- continue
shutil.copy2(src, dst)
# Write .tracking file: list all files under extracted_root, relative to extracted_root
@@ -220,8 +221,15 @@ class UpdateRoutes:
tracking_info_file = os.path.join(plugin_root, '.tracking')
tracking_files = []
for root, dirs, files in os.walk(extracted_root):
+ # Skip civitai folder and its contents
+ rel_root = os.path.relpath(root, extracted_root)
+ if rel_root == 'civitai' or rel_root.startswith('civitai' + os.sep):
+ continue
for file in files:
rel_path = os.path.relpath(os.path.join(root, file), extracted_root)
+ # Skip settings.json and any file under civitai
+ if rel_path == 'settings.json' or rel_path.startswith('civitai' + os.sep):
+ continue
tracking_files.append(rel_path.replace("\\", "/"))
with open(tracking_info_file, "w", encoding='utf-8') as file:
file.write('\n'.join(tracking_files))
@@ -257,7 +265,7 @@ class UpdateRoutes:
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/commits/main"
try:
- downloader = await Downloader.get_instance()
+ downloader = await get_downloader()
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
if not success:
@@ -423,7 +431,7 @@ class UpdateRoutes:
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
try:
- downloader = await Downloader.get_instance()
+ downloader = await get_downloader()
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
if not success:
diff --git a/py/services/base_model_service.py b/py/services/base_model_service.py
index 14a89a52..2c2c0ad8 100644
--- a/py/services/base_model_service.py
+++ b/py/services/base_model_service.py
@@ -4,99 +4,88 @@ import logging
import os
from ..utils.models import BaseModelMetadata
-from ..utils.routes_common import ModelRouteUtils
-from ..utils.constants import NSFW_LEVELS
-from .settings_manager import settings
-from ..utils.utils import fuzzy_match
+from .model_query import FilterCriteria, ModelCacheRepository, ModelFilterSet, SearchStrategy, SettingsProvider
+from .settings_manager import settings as default_settings
logger = logging.getLogger(__name__)
class BaseModelService(ABC):
"""Base service class for all model types"""
- def __init__(self, model_type: str, scanner, metadata_class: Type[BaseModelMetadata]):
- """Initialize the service
-
+ def __init__(
+ self,
+ model_type: str,
+ scanner,
+ metadata_class: Type[BaseModelMetadata],
+ *,
+ cache_repository: Optional[ModelCacheRepository] = None,
+ filter_set: Optional[ModelFilterSet] = None,
+ search_strategy: Optional[SearchStrategy] = None,
+ settings_provider: Optional[SettingsProvider] = None,
+ ):
+ """Initialize the service.
+
Args:
- model_type: Type of model (lora, checkpoint, etc.)
- scanner: Model scanner instance
- metadata_class: Metadata class for this model type
+ model_type: Type of model (lora, checkpoint, etc.).
+ scanner: Model scanner instance.
+ metadata_class: Metadata class for this model type.
+ cache_repository: Custom repository for cache access (primarily for tests).
+ filter_set: Filter component controlling folder/tag/favorites logic.
+ search_strategy: Search component for fuzzy/text matching.
+ settings_provider: Settings object; defaults to the global settings manager.
"""
self.model_type = model_type
self.scanner = scanner
self.metadata_class = metadata_class
+ self.settings = settings_provider or default_settings
+ self.cache_repository = cache_repository or ModelCacheRepository(scanner)
+ self.filter_set = filter_set or ModelFilterSet(self.settings)
+ self.search_strategy = search_strategy or SearchStrategy()
- async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name',
- folder: str = None, search: str = None, fuzzy_search: bool = False,
- base_models: list = None, tags: list = None,
- search_options: dict = None, hash_filters: dict = None,
- favorites_only: bool = False, **kwargs) -> Dict:
- """Get paginated and filtered model data
-
- Args:
- page: Page number (1-based)
- page_size: Number of items per page
- sort_by: Sort criteria, e.g. 'name', 'name:asc', 'name:desc', 'date', 'date:asc', 'date:desc'
- folder: Folder filter
- search: Search term
- fuzzy_search: Whether to use fuzzy search
- base_models: List of base models to filter by
- tags: List of tags to filter by
- search_options: Search options dict
- hash_filters: Hash filtering options
- favorites_only: Filter for favorites only
- **kwargs: Additional model-specific filters
-
- Returns:
- Dict containing paginated results
- """
- cache = await self.scanner.get_cached_data()
+ async def get_paginated_data(
+ self,
+ page: int,
+ page_size: int,
+ sort_by: str = 'name',
+ folder: str = None,
+ search: str = None,
+ fuzzy_search: bool = False,
+ base_models: list = None,
+ tags: list = None,
+ search_options: dict = None,
+ hash_filters: dict = None,
+ favorites_only: bool = False,
+ **kwargs,
+ ) -> Dict:
+ """Get paginated and filtered model data"""
+ sort_params = self.cache_repository.parse_sort(sort_by)
+ sorted_data = await self.cache_repository.fetch_sorted(sort_params)
- # Parse sort_by into sort_key and order
- if ':' in sort_by:
- sort_key, order = sort_by.split(':', 1)
- sort_key = sort_key.strip()
- order = order.strip().lower()
- if order not in ('asc', 'desc'):
- order = 'asc'
- else:
- sort_key = sort_by.strip()
- order = 'asc'
-
- # Get default search options if not provided
- if search_options is None:
- search_options = {
- 'filename': True,
- 'modelname': True,
- 'tags': False,
- 'recursive': True,
- }
-
- # Get the base data set using new sort logic
- filtered_data = await cache.get_sorted_data(sort_key, order)
-
- # Apply hash filtering if provided (highest priority)
if hash_filters:
- filtered_data = await self._apply_hash_filters(filtered_data, hash_filters)
-
- # Jump to pagination for hash filters
+ filtered_data = await self._apply_hash_filters(sorted_data, hash_filters)
return self._paginate(filtered_data, page, page_size)
-
- # Apply common filters
+
filtered_data = await self._apply_common_filters(
- filtered_data, folder, base_models, tags, favorites_only, search_options
+ sorted_data,
+ folder=folder,
+ base_models=base_models,
+ tags=tags,
+ favorites_only=favorites_only,
+ search_options=search_options,
)
-
- # Apply search filtering
+
if search:
filtered_data = await self._apply_search_filters(
- filtered_data, search, fuzzy_search, search_options
+ filtered_data,
+ search,
+ fuzzy_search,
+ search_options,
)
-
- # Apply model-specific filters
+
filtered_data = await self._apply_specific_filters(filtered_data, **kwargs)
-
+
return self._paginate(filtered_data, page, page_size)
+
async def _apply_hash_filters(self, data: List[Dict], hash_filters: Dict) -> List[Dict]:
"""Apply hash-based filtering"""
@@ -120,113 +109,36 @@ class BaseModelService(ABC):
return data
- async def _apply_common_filters(self, data: List[Dict], folder: str = None,
- base_models: list = None, tags: list = None,
- favorites_only: bool = False, search_options: dict = None) -> List[Dict]:
+ async def _apply_common_filters(
+ self,
+ data: List[Dict],
+ folder: str = None,
+ base_models: list = None,
+ tags: list = None,
+ favorites_only: bool = False,
+ search_options: dict = None,
+ ) -> List[Dict]:
"""Apply common filters that work across all model types"""
- # Apply SFW filtering if enabled in settings
- if settings.get('show_only_sfw', False):
- data = [
- item for item in data
- if not item.get('preview_nsfw_level') or item.get('preview_nsfw_level') < NSFW_LEVELS['R']
- ]
-
- # Apply favorites filtering if enabled
- if favorites_only:
- data = [
- item for item in data
- if item.get('favorite', False) is True
- ]
-
- # Apply folder filtering
- if folder is not None:
- if search_options and search_options.get('recursive', True):
- # Recursive folder filtering - include all subfolders
- # Ensure we match exact folder or its subfolders by checking path boundaries
- if folder == "":
- # Empty folder means root - include all items
- pass # Don't filter anything
- else:
- # Add trailing slash to ensure we match folder boundaries correctly
- folder_with_separator = folder + "/"
- data = [
- item for item in data
- if (item['folder'] == folder or
- item['folder'].startswith(folder_with_separator))
- ]
- else:
- # Exact folder filtering
- data = [
- item for item in data
- if item['folder'] == folder
- ]
-
- # Apply base model filtering
- if base_models and len(base_models) > 0:
- data = [
- item for item in data
- if item.get('base_model') in base_models
- ]
-
- # Apply tag filtering
- if tags and len(tags) > 0:
- data = [
- item for item in data
- if any(tag in item.get('tags', []) for tag in tags)
- ]
-
- return data
+ normalized_options = self.search_strategy.normalize_options(search_options)
+ criteria = FilterCriteria(
+ folder=folder,
+ base_models=base_models,
+ tags=tags,
+ favorites_only=favorites_only,
+ search_options=normalized_options,
+ )
+ return self.filter_set.apply(data, criteria)
- async def _apply_search_filters(self, data: List[Dict], search: str,
- fuzzy_search: bool, search_options: dict) -> List[Dict]:
+ async def _apply_search_filters(
+ self,
+ data: List[Dict],
+ search: str,
+ fuzzy_search: bool,
+ search_options: dict,
+ ) -> List[Dict]:
"""Apply search filtering"""
- search_results = []
-
- for item in data:
- # Search by file name
- if search_options.get('filename', True):
- if fuzzy_search:
- if fuzzy_match(item.get('file_name', ''), search):
- search_results.append(item)
- continue
- elif search.lower() in item.get('file_name', '').lower():
- search_results.append(item)
- continue
-
- # Search by model name
- if search_options.get('modelname', True):
- if fuzzy_search:
- if fuzzy_match(item.get('model_name', ''), search):
- search_results.append(item)
- continue
- elif search.lower() in item.get('model_name', '').lower():
- search_results.append(item)
- continue
-
- # Search by tags
- if search_options.get('tags', False) and 'tags' in item:
- if any((fuzzy_match(tag, search) if fuzzy_search else search.lower() in tag.lower())
- for tag in item['tags']):
- search_results.append(item)
- continue
-
- # Search by creator
- civitai = item.get('civitai')
- creator_username = ''
- if civitai and isinstance(civitai, dict):
- creator = civitai.get('creator')
- if creator and isinstance(creator, dict):
- creator_username = creator.get('username', '')
- if search_options.get('creator', False) and creator_username:
- if fuzzy_search:
- if fuzzy_match(creator_username, search):
- search_results.append(item)
- continue
- elif search.lower() in creator_username.lower():
- search_results.append(item)
- continue
-
- return search_results
+ normalized_options = self.search_strategy.normalize_options(search_options)
+ return self.search_strategy.apply(data, search, normalized_options, fuzzy_search)
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:
"""Apply model-specific filters - to be overridden by subclasses if needed"""
@@ -284,6 +196,18 @@ class BaseModelService(ABC):
"""Get model root directories"""
return self.scanner.get_model_roots()
+ def filter_civitai_data(self, data: Dict, minimal: bool = False) -> Dict:
+ """Filter relevant fields from CivitAI data"""
+ if not data:
+ return {}
+
+ fields = ["id", "modelId", "name", "trainedWords"] if minimal else [
+ "id", "modelId", "name", "createdAt", "updatedAt",
+ "publishedAt", "trainedWords", "baseModel", "description",
+ "model", "images", "customImages", "creator"
+ ]
+ return {k: data[k] for k in fields if k in data}
+
async def get_folder_tree(self, model_root: str) -> Dict:
"""Get hierarchical folder tree for a specific model root"""
cache = await self.scanner.get_cached_data()
@@ -363,7 +287,7 @@ class BaseModelService(ABC):
from ..config import config
return config.get_preview_static_url(preview_url)
- return None
+ return '/loras_static/images/no-preview.png'
async def get_model_civitai_url(self, model_name: str) -> Dict[str, Optional[str]]:
"""Get the Civitai URL for a model file"""
@@ -394,7 +318,7 @@ class BaseModelService(ABC):
for model in cache.raw_data:
if model.get('file_path') == file_path:
- return ModelRouteUtils.filter_civitai_data(model.get("civitai", {}))
+ return self.filter_civitai_data(model.get("civitai", {}))
return None
diff --git a/py/services/checkpoint_service.py b/py/services/checkpoint_service.py
index ef3dc4a8..2f7b8a96 100644
--- a/py/services/checkpoint_service.py
+++ b/py/services/checkpoint_service.py
@@ -1,11 +1,10 @@
import os
import logging
-from typing import Dict, List, Optional
+from typing import Dict
from .base_model_service import BaseModelService
from ..utils.models import CheckpointMetadata
from ..config import config
-from ..utils.routes_common import ModelRouteUtils
logger = logging.getLogger(__name__)
@@ -38,7 +37,7 @@ class CheckpointService(BaseModelService):
"notes": checkpoint_data.get("notes", ""),
"model_type": checkpoint_data.get("model_type", "checkpoint"),
"favorite": checkpoint_data.get("favorite", False),
- "civitai": ModelRouteUtils.filter_civitai_data(checkpoint_data.get("civitai", {}), minimal=True)
+ "civitai": self.filter_civitai_data(checkpoint_data.get("civitai", {}), minimal=True)
}
def find_duplicate_hashes(self) -> Dict:
diff --git a/py/services/civitai_client.py b/py/services/civitai_client.py
index 04d56d29..bb6004ed 100644
--- a/py/services/civitai_client.py
+++ b/py/services/civitai_client.py
@@ -1,5 +1,5 @@
-from datetime import datetime
import os
+import copy
import logging
import asyncio
from typing import Optional, Dict, Tuple, List
@@ -59,17 +59,17 @@ class CivitaiClient:
return success, result
- async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
+ async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
try:
downloader = await get_downloader()
- success, version = await downloader.make_request(
+ success, result = await downloader.make_request(
'GET',
f"{self.base_url}/model-versions/by-hash/{model_hash}",
use_auth=True
)
if success:
# Get model ID from version data
- model_id = version.get('modelId')
+ model_id = result.get('modelId')
if model_id:
# Fetch additional model metadata
success_model, data = await downloader.make_request(
@@ -79,22 +79,29 @@ class CivitaiClient:
)
if success_model:
# Enrich version_info with model data
- version['model']['description'] = data.get("description")
- version['model']['tags'] = data.get("tags", [])
+ result['model']['description'] = data.get("description")
+ result['model']['tags'] = data.get("tags", [])
# Add creator from model data
- version['creator'] = data.get("creator")
+ result['creator'] = data.get("creator")
- return version
- return None
+ return result, None
+
+ # Handle specific error cases
+ if "not found" in str(result):
+ return None, "Model not found"
+
+ # Other error cases
+ logger.error(f"Failed to fetch model info for {model_hash[:10]}: {result}")
+ return None, str(result)
except Exception as e:
logger.error(f"API Error: {str(e)}")
- return None
+ return None, str(e)
async def download_preview_image(self, image_url: str, save_path: str):
try:
downloader = await get_downloader()
- success, content = await downloader.download_to_memory(
+ success, content, headers = await downloader.download_to_memory(
image_url,
use_auth=False # Preview images don't need auth
)
@@ -122,7 +129,8 @@ class CivitaiClient:
# Also return model type along with versions
return {
'modelVersions': result.get('modelVersions', []),
- 'type': result.get('type', '')
+ 'type': result.get('type', ''),
+ 'name': result.get('name', '')
}
return None
except Exception as e:
@@ -182,31 +190,76 @@ class CivitaiClient:
)
if not success:
return None
-
+
model_versions = data.get('modelVersions', [])
-
- # Step 2: Determine the version_id to use
- target_version_id = version_id
- if target_version_id is None:
- target_version_id = model_versions[0].get('id')
-
- # Step 3: Get detailed version info using the version_id
- success, version = await downloader.make_request(
- 'GET',
- f"{self.base_url}/model-versions/{target_version_id}",
- use_auth=True
- )
- if not success:
+ if not model_versions:
+ logger.warning(f"No model versions found for model {model_id}")
return None
-
+
+ # Step 2: Determine the target version entry to use
+ target_version = None
+ if version_id is not None:
+ target_version = next(
+ (item for item in model_versions if item.get('id') == version_id),
+ None
+ )
+ if target_version is None:
+ logger.warning(
+ f"Version {version_id} not found for model {model_id}, defaulting to first version"
+ )
+ if target_version is None:
+ target_version = model_versions[0]
+
+ target_version_id = target_version.get('id')
+
+ # Step 3: Get detailed version info using the SHA256 hash
+ model_hash = None
+ for file_info in target_version.get('files', []):
+ if file_info.get('type') == 'Model' and file_info.get('primary'):
+ model_hash = file_info.get('hashes', {}).get('SHA256')
+ if model_hash:
+ break
+
+ version = None
+ if model_hash:
+ success, version = await downloader.make_request(
+ 'GET',
+ f"{self.base_url}/model-versions/by-hash/{model_hash}",
+ use_auth=True
+ )
+ if not success:
+ logger.warning(
+ f"Failed to fetch version by hash for model {model_id} version {target_version_id}: {version}"
+ )
+ version = None
+ else:
+ logger.warning(
+ f"No primary model hash found for model {model_id} version {target_version_id}"
+ )
+
+ if version is None:
+ version = copy.deepcopy(target_version)
+ version.pop('index', None)
+ version['modelId'] = model_id
+ version['model'] = {
+ 'name': data.get('name'),
+ 'type': data.get('type'),
+ 'nsfw': data.get('nsfw'),
+ 'poi': data.get('poi')
+ }
+
# Step 4: Enrich version_info with model data
# Add description and tags from model data
- version['model']['description'] = data.get("description")
- version['model']['tags'] = data.get("tags", [])
-
+ model_info = version.get('model')
+ if not isinstance(model_info, dict):
+ model_info = {}
+ version['model'] = model_info
+ model_info['description'] = data.get("description")
+ model_info['tags'] = data.get("tags", [])
+
# Add creator from model data
version['creator'] = data.get("creator")
-
+
return version
# Case 3: Neither model_id nor version_id provided
@@ -245,8 +298,8 @@ class CivitaiClient:
return result, None
# Handle specific error cases
- if "404" in str(result):
- error_msg = f"Model not found (status 404)"
+ if "not found" in str(result):
+ error_msg = f"Model not found"
logger.warning(f"Model version not found: {version_id} - {error_msg}")
return None, error_msg
@@ -258,59 +311,6 @@ class CivitaiClient:
logger.error(error_msg)
return None, error_msg
- async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
- """Fetch model metadata (description, tags, and creator info) from Civitai API
-
- Args:
- model_id: The Civitai model ID
-
- Returns:
- Tuple[Optional[Dict], int]: A tuple containing:
- - A dictionary with model metadata or None if not found
- - The HTTP status code from the request (0 for exceptions)
- """
- try:
- downloader = await get_downloader()
- url = f"{self.base_url}/models/{model_id}"
-
- success, result = await downloader.make_request(
- 'GET',
- url,
- use_auth=True
- )
-
- if not success:
- # Try to extract status code from error message
- status_code = 0
- if "404" in str(result):
- status_code = 404
- elif "401" in str(result):
- status_code = 401
- elif "403" in str(result):
- status_code = 403
- logger.warning(f"Failed to fetch model metadata: {result}")
- return None, status_code
-
- # Extract relevant metadata
- metadata = {
- "description": result.get("description") or "No model description available",
- "tags": result.get("tags", []),
- "creator": {
- "username": result.get("creator", {}).get("username"),
- "image": result.get("creator", {}).get("image")
- }
- }
-
- if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
- return metadata, 200
- else:
- logger.warning(f"No metadata found for model {model_id}")
- return None, 200
-
- except Exception as e:
- logger.error(f"Error fetching model metadata: {e}", exc_info=True)
- return None, 0
-
async def get_image_info(self, image_id: str) -> Optional[Dict]:
"""Fetch image information from Civitai API
diff --git a/py/services/download_coordinator.py b/py/services/download_coordinator.py
new file mode 100644
index 00000000..4cf866e5
--- /dev/null
+++ b/py/services/download_coordinator.py
@@ -0,0 +1,100 @@
+"""Service wrapper for coordinating download lifecycle events."""
+
+from __future__ import annotations
+
+import logging
+from typing import Any, Awaitable, Callable, Dict, Optional
+
+
+logger = logging.getLogger(__name__)
+
+
+class DownloadCoordinator:
+ """Manage download scheduling, cancellation and introspection."""
+
+ def __init__(
+ self,
+ *,
+ ws_manager,
+ download_manager_factory: Callable[[], Awaitable],
+ ) -> None:
+ self._ws_manager = ws_manager
+ self._download_manager_factory = download_manager_factory
+
+ async def schedule_download(self, payload: Dict[str, Any]) -> Dict[str, Any]:
+ """Schedule a download using the provided payload."""
+
+ download_manager = await self._download_manager_factory()
+
+ download_id = payload.get("download_id") or self._ws_manager.generate_download_id()
+ payload.setdefault("download_id", download_id)
+
+ async def progress_callback(progress: Any) -> None:
+ await self._ws_manager.broadcast_download_progress(
+ download_id,
+ {
+ "status": "progress",
+ "progress": progress,
+ "download_id": download_id,
+ },
+ )
+
+ model_id = self._parse_optional_int(payload.get("model_id"), "model_id")
+ model_version_id = self._parse_optional_int(
+ payload.get("model_version_id"), "model_version_id"
+ )
+
+ if model_id is None and model_version_id is None:
+ raise ValueError(
+ "Missing required parameter: Please provide either 'model_id' or 'model_version_id'"
+ )
+
+ result = await download_manager.download_from_civitai(
+ model_id=model_id,
+ model_version_id=model_version_id,
+ save_dir=payload.get("model_root"),
+ relative_path=payload.get("relative_path", ""),
+ use_default_paths=payload.get("use_default_paths", False),
+ progress_callback=progress_callback,
+ download_id=download_id,
+ source=payload.get("source"),
+ )
+
+ result["download_id"] = download_id
+ return result
+
+ async def cancel_download(self, download_id: str) -> Dict[str, Any]:
+ """Cancel an active download and emit a broadcast event."""
+
+ download_manager = await self._download_manager_factory()
+ result = await download_manager.cancel_download(download_id)
+
+ await self._ws_manager.broadcast_download_progress(
+ download_id,
+ {
+ "status": "cancelled",
+ "progress": 0,
+ "download_id": download_id,
+ "message": "Download cancelled by user",
+ },
+ )
+
+ return result
+
+ async def list_active_downloads(self) -> Dict[str, Any]:
+ """Return the active download map from the underlying manager."""
+
+ download_manager = await self._download_manager_factory()
+ return await download_manager.get_active_downloads()
+
+ def _parse_optional_int(self, value: Any, field: str) -> Optional[int]:
+ """Parse an optional integer from user input."""
+
+ if value is None or value == "":
+ return None
+
+ try:
+ return int(value)
+ except (TypeError, ValueError) as exc:
+ raise ValueError(f"Invalid {field}: Must be an integer") from exc
+
diff --git a/py/services/download_manager.py b/py/services/download_manager.py
index 9f090b20..76b974fb 100644
--- a/py/services/download_manager.py
+++ b/py/services/download_manager.py
@@ -36,17 +36,10 @@ class DownloadManager:
return
self._initialized = True
- self._civitai_client = None # Will be lazily initialized
# Add download management
self._active_downloads = OrderedDict() # download_id -> download_info
self._download_semaphore = asyncio.Semaphore(5) # Limit concurrent downloads
self._download_tasks = {} # download_id -> asyncio.Task
-
- async def _get_civitai_client(self):
- """Lazily initialize CivitaiClient from registry"""
- if self._civitai_client is None:
- self._civitai_client = await ServiceRegistry.get_civitai_client()
- return self._civitai_client
async def _get_lora_scanner(self):
"""Get the lora scanner from registry"""
@@ -59,7 +52,7 @@ class DownloadManager:
async def download_from_civitai(self, model_id: int = None, model_version_id: int = None,
save_dir: str = None, relative_path: str = '',
progress_callback=None, use_default_paths: bool = False,
- download_id: str = None) -> Dict:
+ download_id: str = None, source: str = None) -> Dict:
"""Download model from Civitai with task tracking and concurrency control
Args:
@@ -70,6 +63,7 @@ class DownloadManager:
progress_callback: Callback function for progress updates
use_default_paths: Flag to use default paths
download_id: Unique identifier for this download task
+ source: Optional source parameter to specify metadata provider
Returns:
Dict with download result
@@ -93,7 +87,7 @@ class DownloadManager:
download_task = asyncio.create_task(
self._download_with_semaphore(
task_id, model_id, model_version_id, save_dir,
- relative_path, progress_callback, use_default_paths
+ relative_path, progress_callback, use_default_paths, source
)
)
@@ -114,7 +108,8 @@ class DownloadManager:
async def _download_with_semaphore(self, task_id: str, model_id: int, model_version_id: int,
save_dir: str, relative_path: str,
- progress_callback=None, use_default_paths: bool = False):
+ progress_callback=None, use_default_paths: bool = False,
+ source: str = None):
"""Execute download with semaphore to limit concurrency"""
# Update status to waiting
if task_id in self._active_downloads:
@@ -144,7 +139,7 @@ class DownloadManager:
result = await self._execute_original_download(
model_id, model_version_id, save_dir,
relative_path, tracking_callback, use_default_paths,
- task_id
+ task_id, source
)
# Update status based on result
@@ -179,7 +174,7 @@ class DownloadManager:
async def _execute_original_download(self, model_id, model_version_id, save_dir,
relative_path, progress_callback, use_default_paths,
- download_id=None):
+ download_id=None, source=None):
"""Wrapper for original download_from_civitai implementation"""
try:
# Check if model version already exists in library
@@ -201,8 +196,12 @@ class DownloadManager:
if await embedding_scanner.check_model_version_exists(model_version_id):
return {'success': False, 'error': 'Model version already exists in embedding library'}
- # Get metadata provider instead of civitai client directly
- metadata_provider = await get_default_metadata_provider()
+ # Get metadata provider based on source parameter
+ if source == 'civarchive':
+ from .metadata_service import get_metadata_provider
+ metadata_provider = await get_metadata_provider('civarchive')
+ else:
+ metadata_provider = await get_default_metadata_provider()
# Get version info based on the provided identifier
version_info = await metadata_provider.get_model_version(model_id, model_version_id)
@@ -295,6 +294,8 @@ class DownloadManager:
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
if not file_info:
return {'success': False, 'error': 'No primary file found in metadata'}
+ if not file_info.get('downloadUrl'):
+ return {'success': False, 'error': 'No download URL found for primary file'}
# 3. Prepare download
file_name = file_info['name']
@@ -396,8 +397,6 @@ class DownloadManager:
model_type: str = "lora", download_id: str = None) -> Dict:
"""Execute the actual download process including preview images and model files"""
try:
- civitai_client = await self._get_civitai_client()
-
# Extract original filename details
original_filename = os.path.basename(metadata.file_path)
base_name, extension = os.path.splitext(original_filename)
@@ -464,7 +463,7 @@ class DownloadManager:
# Download the original image to temp path using downloader
downloader = await get_downloader()
- success, content = await downloader.download_to_memory(
+ success, content, headers = await downloader.download_to_memory(
images[0]['url'],
use_auth=False
)
@@ -504,11 +503,13 @@ class DownloadManager:
# Download model file with progress tracking using downloader
downloader = await get_downloader()
+ # Determine if the download URL is from Civitai
+ use_auth = download_url.startswith("https://civitai.com/api/download/")
success, result = await downloader.download_file(
download_url,
save_path, # Use full path instead of separate dir and filename
progress_callback=lambda p: self._handle_download_progress(p, progress_callback),
- use_auth=True # Model downloads need authentication
+ use_auth=use_auth # Only use authentication for Civitai downloads
)
if not success:
diff --git a/py/services/downloader.py b/py/services/downloader.py
index dc38c0d1..4f6b5f97 100644
--- a/py/services/downloader.py
+++ b/py/services/downloader.py
@@ -45,6 +45,7 @@ class Downloader:
# Session management
self._session = None
self._session_created_at = None
+ self._proxy_url = None # Store proxy URL for current session
# Configuration
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better throughput
@@ -64,6 +65,13 @@ class Downloader:
await self._create_session()
return self._session
+ @property
+ def proxy_url(self) -> Optional[str]:
+ """Get the current proxy URL (initialize if needed)"""
+ if not hasattr(self, '_proxy_url'):
+ self._proxy_url = None
+ return self._proxy_url
+
def _should_refresh_session(self) -> bool:
"""Check if session should be refreshed"""
if self._session is None:
@@ -84,6 +92,26 @@ class Downloader:
if self._session is not None:
await self._session.close()
+ # Check for app-level proxy settings
+ proxy_url = None
+ if settings.get('proxy_enabled', False):
+ proxy_host = settings.get('proxy_host', '').strip()
+ proxy_port = settings.get('proxy_port', '').strip()
+ proxy_type = settings.get('proxy_type', 'http').lower()
+ proxy_username = settings.get('proxy_username', '').strip()
+ proxy_password = settings.get('proxy_password', '').strip()
+
+ if proxy_host and proxy_port:
+ # Build proxy URL
+ if proxy_username and proxy_password:
+ proxy_url = f"{proxy_type}://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"
+ else:
+ proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
+
+ logger.debug(f"Using app-level proxy: {proxy_type}://{proxy_host}:{proxy_port}")
+ logger.debug("Proxy mode: app-level proxy is active.")
+ else:
+ logger.debug("Proxy mode: system-level proxy (trust_env) will be used if configured in environment.")
# Optimize TCP connection parameters
connector = aiohttp.TCPConnector(
ssl=True,
@@ -102,12 +130,15 @@ class Downloader:
self._session = aiohttp.ClientSession(
connector=connector,
- trust_env=True, # Use system proxy settings
+ trust_env=proxy_url is None, # Only use system proxy if no app-level proxy is set
timeout=timeout
)
+
+ # Store proxy URL for use in requests
+ self._proxy_url = proxy_url
self._session_created_at = datetime.now()
- logger.debug("Created new HTTP session")
+ logger.debug("Created new HTTP session with proxy settings. App-level proxy: %s, System-level proxy (trust_env): %s", bool(proxy_url), proxy_url is None)
def _get_auth_headers(self, use_auth: bool = False) -> Dict[str, str]:
"""Get headers with optional authentication"""
@@ -164,6 +195,11 @@ class Downloader:
while retry_count <= self.max_retries:
try:
session = await self.session
+ # Debug log for proxy mode at request time
+ if self.proxy_url:
+ logger.debug(f"[download_file] Using app-level proxy: {self.proxy_url}")
+ else:
+ logger.debug("[download_file] Using system-level proxy (trust_env) if configured.")
# Add Range header for resume if we have partial data
request_headers = headers.copy()
@@ -177,7 +213,7 @@ class Downloader:
if resume_offset > 0:
logger.debug(f"Requesting range from byte {resume_offset}")
- async with session.get(url, headers=request_headers, allow_redirects=True) as response:
+ async with session.get(url, headers=request_headers, allow_redirects=True, proxy=self.proxy_url) as response:
# Handle different response codes
if response.status == 200:
# Full content response
@@ -202,7 +238,7 @@ class Downloader:
part_size = os.path.getsize(part_path)
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
# Try to get actual file size
- head_response = await session.head(url, headers=headers)
+ head_response = await session.head(url, headers=headers, proxy=self.proxy_url)
if head_response.status == 200:
actual_size = int(head_response.headers.get('content-length', 0))
if part_size == actual_size:
@@ -330,8 +366,9 @@ class Downloader:
self,
url: str,
use_auth: bool = False,
- custom_headers: Optional[Dict[str, str]] = None
- ) -> Tuple[bool, Union[bytes, str]]:
+ custom_headers: Optional[Dict[str, str]] = None,
+ return_headers: bool = False
+ ) -> Tuple[bool, Union[bytes, str], Optional[Dict]]:
"""
Download a file to memory (for small files like preview images)
@@ -339,34 +376,47 @@ class Downloader:
url: Download URL
use_auth: Whether to include authentication headers
custom_headers: Additional headers to include in request
+ return_headers: Whether to return response headers along with content
Returns:
- Tuple[bool, Union[bytes, str]]: (success, content or error message)
+ Tuple[bool, Union[bytes, str], Optional[Dict]]: (success, content or error message, response headers if requested)
"""
try:
session = await self.session
+ # Debug log for proxy mode at request time
+ if self.proxy_url:
+ logger.debug(f"[download_to_memory] Using app-level proxy: {self.proxy_url}")
+ else:
+ logger.debug("[download_to_memory] Using system-level proxy (trust_env) if configured.")
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
- async with session.get(url, headers=headers) as response:
+ async with session.get(url, headers=headers, proxy=self.proxy_url) as response:
if response.status == 200:
content = await response.read()
- return True, content
+ if return_headers:
+ return True, content, dict(response.headers)
+ else:
+ return True, content, None
elif response.status == 401:
- return False, "Unauthorized access - invalid or missing API key"
+ error_msg = "Unauthorized access - invalid or missing API key"
+ return False, error_msg, None
elif response.status == 403:
- return False, "Access forbidden"
+ error_msg = "Access forbidden"
+ return False, error_msg, None
elif response.status == 404:
- return False, "File not found"
+ error_msg = "File not found"
+ return False, error_msg, None
else:
- return False, f"Download failed with status {response.status}"
+ error_msg = f"Download failed with status {response.status}"
+ return False, error_msg, None
except Exception as e:
logger.error(f"Error downloading to memory from {url}: {e}")
- return False, str(e)
+ return False, str(e), None
async def get_response_headers(
self,
@@ -387,13 +437,18 @@ class Downloader:
"""
try:
session = await self.session
+ # Debug log for proxy mode at request time
+ if self.proxy_url:
+ logger.debug(f"[get_response_headers] Using app-level proxy: {self.proxy_url}")
+ else:
+ logger.debug("[get_response_headers] Using system-level proxy (trust_env) if configured.")
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
- async with session.head(url, headers=headers) as response:
+ async with session.head(url, headers=headers, proxy=self.proxy_url) as response:
if response.status == 200:
return True, dict(response.headers)
else:
@@ -426,12 +481,21 @@ class Downloader:
"""
try:
session = await self.session
+ # Debug log for proxy mode at request time
+ if self.proxy_url:
+ logger.debug(f"[make_request] Using app-level proxy: {self.proxy_url}")
+ else:
+ logger.debug("[make_request] Using system-level proxy (trust_env) if configured.")
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
+ # Add proxy to kwargs if not already present
+ if 'proxy' not in kwargs:
+ kwargs['proxy'] = self.proxy_url
+
async with session.request(method, url, headers=headers, **kwargs) as response:
if response.status == 200:
# Try to parse as JSON, fall back to text
@@ -460,7 +524,13 @@ class Downloader:
await self._session.close()
self._session = None
self._session_created_at = None
+ self._proxy_url = None
logger.debug("Closed HTTP session")
+
+ async def refresh_session(self):
+ """Force refresh the HTTP session (useful when proxy settings change)"""
+ await self._create_session()
+ logger.info("HTTP session refreshed due to settings change")
# Global instance accessor
diff --git a/py/services/embedding_service.py b/py/services/embedding_service.py
index bab067d9..46396fc5 100644
--- a/py/services/embedding_service.py
+++ b/py/services/embedding_service.py
@@ -1,11 +1,10 @@
import os
import logging
-from typing import Dict, List, Optional
+from typing import Dict
from .base_model_service import BaseModelService
from ..utils.models import EmbeddingMetadata
from ..config import config
-from ..utils.routes_common import ModelRouteUtils
logger = logging.getLogger(__name__)
@@ -38,7 +37,7 @@ class EmbeddingService(BaseModelService):
"notes": embedding_data.get("notes", ""),
"model_type": embedding_data.get("model_type", "embedding"),
"favorite": embedding_data.get("favorite", False),
- "civitai": ModelRouteUtils.filter_civitai_data(embedding_data.get("civitai", {}), minimal=True)
+ "civitai": self.filter_civitai_data(embedding_data.get("civitai", {}), minimal=True)
}
def find_duplicate_hashes(self) -> Dict:
diff --git a/py/services/example_images_cleanup_service.py b/py/services/example_images_cleanup_service.py
new file mode 100644
index 00000000..671f3d8a
--- /dev/null
+++ b/py/services/example_images_cleanup_service.py
@@ -0,0 +1,246 @@
+"""Service for cleaning up example image folders."""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import shutil
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Dict, List
+
+from .service_registry import ServiceRegistry
+from .settings_manager import settings
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass(slots=True)
+class CleanupResult:
+ """Structured result returned from cleanup operations."""
+
+ success: bool
+ checked_folders: int
+ moved_empty_folders: int
+ moved_orphaned_folders: int
+ skipped_non_hash: int
+ move_failures: int
+ errors: List[str]
+ deleted_root: str | None
+ partial_success: bool
+
+ def to_dict(self) -> Dict[str, object]:
+ """Convert the dataclass to a serialisable dictionary."""
+
+ data = {
+ "success": self.success,
+ "checked_folders": self.checked_folders,
+ "moved_empty_folders": self.moved_empty_folders,
+ "moved_orphaned_folders": self.moved_orphaned_folders,
+ "moved_total": self.moved_empty_folders + self.moved_orphaned_folders,
+ "skipped_non_hash": self.skipped_non_hash,
+ "move_failures": self.move_failures,
+ "errors": self.errors,
+ "deleted_root": self.deleted_root,
+ "partial_success": self.partial_success,
+ }
+
+ return data
+
+
+class ExampleImagesCleanupService:
+ """Encapsulates logic for cleaning example image folders."""
+
+ DELETED_FOLDER_NAME = "_deleted"
+
+ def __init__(self, deleted_folder_name: str | None = None) -> None:
+ self._deleted_folder_name = deleted_folder_name or self.DELETED_FOLDER_NAME
+
+ async def cleanup_example_image_folders(self) -> Dict[str, object]:
+ """Clean empty or orphaned example image folders by moving them under a deleted bucket."""
+
+ example_images_path = settings.get("example_images_path")
+ if not example_images_path:
+ logger.debug("Cleanup skipped: example images path not configured")
+ return {
+ "success": False,
+ "error": "Example images path is not configured.",
+ "error_code": "path_not_configured",
+ }
+
+ example_root = Path(example_images_path)
+ if not example_root.exists():
+ logger.debug("Cleanup skipped: example images path missing -> %s", example_root)
+ return {
+ "success": False,
+ "error": "Example images path does not exist.",
+ "error_code": "path_not_found",
+ }
+
+ try:
+ lora_scanner = await ServiceRegistry.get_lora_scanner()
+ checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
+ embedding_scanner = await ServiceRegistry.get_embedding_scanner()
+ except Exception as exc: # pragma: no cover - defensive guard
+ logger.error("Failed to acquire scanners for cleanup: %s", exc, exc_info=True)
+ return {
+ "success": False,
+ "error": f"Failed to load model scanners: {exc}",
+ "error_code": "scanner_initialization_failed",
+ }
+
+ deleted_bucket = example_root / self._deleted_folder_name
+ deleted_bucket.mkdir(exist_ok=True)
+
+ checked_folders = 0
+ moved_empty = 0
+ moved_orphaned = 0
+ skipped_non_hash = 0
+ move_failures = 0
+ errors: List[str] = []
+
+ for entry in os.scandir(example_root):
+ if not entry.is_dir(follow_symlinks=False):
+ continue
+
+ if entry.name == self._deleted_folder_name:
+ continue
+
+ checked_folders += 1
+ folder_path = Path(entry.path)
+
+ try:
+ if self._is_folder_empty(folder_path):
+ if await self._remove_empty_folder(folder_path):
+ moved_empty += 1
+ else:
+ move_failures += 1
+ continue
+
+ if not self._is_hash_folder(entry.name):
+ skipped_non_hash += 1
+ continue
+
+ hash_exists = (
+ lora_scanner.has_hash(entry.name)
+ or checkpoint_scanner.has_hash(entry.name)
+ or embedding_scanner.has_hash(entry.name)
+ )
+
+ if not hash_exists:
+ if await self._move_folder(folder_path, deleted_bucket):
+ moved_orphaned += 1
+ else:
+ move_failures += 1
+
+ except Exception as exc: # pragma: no cover - filesystem guard
+ move_failures += 1
+ error_message = f"{entry.name}: {exc}"
+ errors.append(error_message)
+ logger.error("Error processing example images folder %s: %s", folder_path, exc, exc_info=True)
+
+ partial_success = move_failures > 0 and (moved_empty > 0 or moved_orphaned > 0)
+ success = move_failures == 0 and not errors
+
+ result = CleanupResult(
+ success=success,
+ checked_folders=checked_folders,
+ moved_empty_folders=moved_empty,
+ moved_orphaned_folders=moved_orphaned,
+ skipped_non_hash=skipped_non_hash,
+ move_failures=move_failures,
+ errors=errors,
+ deleted_root=str(deleted_bucket),
+ partial_success=partial_success,
+ )
+
+ summary = result.to_dict()
+ if success:
+ logger.info(
+ "Example images cleanup complete: checked=%s, moved_empty=%s, moved_orphaned=%s",
+ checked_folders,
+ moved_empty,
+ moved_orphaned,
+ )
+ elif partial_success:
+ logger.warning(
+ "Example images cleanup partially complete: moved=%s, failures=%s",
+ summary["moved_total"],
+ move_failures,
+ )
+ else:
+ logger.error(
+ "Example images cleanup failed: move_failures=%s, errors=%s",
+ move_failures,
+ errors,
+ )
+
+ return summary
+
+ @staticmethod
+ def _is_folder_empty(folder_path: Path) -> bool:
+ try:
+ with os.scandir(folder_path) as iterator:
+ return not any(iterator)
+ except FileNotFoundError:
+ return True
+ except OSError as exc: # pragma: no cover - defensive guard
+ logger.debug("Failed to inspect folder %s: %s", folder_path, exc)
+ return False
+
+ @staticmethod
+ def _is_hash_folder(name: str) -> bool:
+ if len(name) != 64:
+ return False
+ hex_chars = set("0123456789abcdefABCDEF")
+ return all(char in hex_chars for char in name)
+
+ async def _remove_empty_folder(self, folder_path: Path) -> bool:
+ loop = asyncio.get_running_loop()
+
+ try:
+ await loop.run_in_executor(
+ None,
+ shutil.rmtree,
+ str(folder_path),
+ )
+ logger.debug("Removed empty example images folder %s", folder_path)
+ return True
+ except Exception as exc: # pragma: no cover - filesystem guard
+ logger.error("Failed to remove empty example images folder %s: %s", folder_path, exc, exc_info=True)
+ return False
+
+ async def _move_folder(self, folder_path: Path, deleted_bucket: Path) -> bool:
+ destination = self._build_destination(folder_path.name, deleted_bucket)
+ loop = asyncio.get_running_loop()
+
+ try:
+ await loop.run_in_executor(
+ None,
+ shutil.move,
+ str(folder_path),
+ str(destination),
+ )
+ logger.debug("Moved example images folder %s -> %s", folder_path, destination)
+ return True
+ except Exception as exc: # pragma: no cover - filesystem guard
+ logger.error(
+ "Failed to move example images folder %s to %s: %s",
+ folder_path,
+ destination,
+ exc,
+ exc_info=True,
+ )
+ return False
+
+ def _build_destination(self, folder_name: str, deleted_bucket: Path) -> Path:
+ destination = deleted_bucket / folder_name
+ suffix = 1
+
+ while destination.exists():
+ destination = deleted_bucket / f"{folder_name}_{suffix}"
+ suffix += 1
+
+ return destination
diff --git a/py/services/lora_service.py b/py/services/lora_service.py
index d1e522a3..551c4d3c 100644
--- a/py/services/lora_service.py
+++ b/py/services/lora_service.py
@@ -5,7 +5,6 @@ from typing import Dict, List, Optional
from .base_model_service import BaseModelService
from ..utils.models import LoraMetadata
from ..config import config
-from ..utils.routes_common import ModelRouteUtils
logger = logging.getLogger(__name__)
@@ -38,7 +37,7 @@ class LoraService(BaseModelService):
"usage_tips": lora_data.get("usage_tips", ""),
"notes": lora_data.get("notes", ""),
"favorite": lora_data.get("favorite", False),
- "civitai": ModelRouteUtils.filter_civitai_data(lora_data.get("civitai", {}), minimal=True)
+ "civitai": self.filter_civitai_data(lora_data.get("civitai", {}), minimal=True)
}
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:
diff --git a/py/services/metadata_service.py b/py/services/metadata_service.py
index 86a94eaf..6a4f9dd8 100644
--- a/py/services/metadata_service.py
+++ b/py/services/metadata_service.py
@@ -49,10 +49,19 @@ async def initialize_metadata_providers():
civitai_provider = CivitaiModelMetadataProvider(civitai_client)
provider_manager.register_provider('civitai_api', civitai_provider)
providers.append(('civitai_api', civitai_provider))
- logger.info("Civitai API metadata provider registered")
+ logger.debug("Civitai API metadata provider registered")
except Exception as e:
logger.error(f"Failed to initialize Civitai API metadata provider: {e}")
-
+
+ # Register CivArchive provider, but do NOT add to fallback providers
+ try:
+ from .model_metadata_provider import CivArchiveModelMetadataProvider
+ civarchive_provider = CivArchiveModelMetadataProvider()
+ provider_manager.register_provider('civarchive', civarchive_provider)
+ logger.debug("CivArchive metadata provider registered (not included in fallback)")
+ except Exception as e:
+ logger.error(f"Failed to initialize CivArchive metadata provider: {e}")
+
# Set up fallback provider based on available providers
if len(providers) > 1:
# Always use Civitai API first, then Archive DB
@@ -68,7 +77,7 @@ async def initialize_metadata_providers():
# Only one provider available, set it as default
provider_name, provider = providers[0]
provider_manager.register_provider(provider_name, provider, is_default=True)
- logger.info(f"Single metadata provider registered as default: {provider_name}")
+ logger.debug(f"Single metadata provider registered as default: {provider_name}")
else:
logger.warning("No metadata providers available - this may cause metadata lookup failures")
diff --git a/py/services/metadata_sync_service.py b/py/services/metadata_sync_service.py
new file mode 100644
index 00000000..aaf2f248
--- /dev/null
+++ b/py/services/metadata_sync_service.py
@@ -0,0 +1,355 @@
+"""Services for synchronising metadata with remote providers."""
+
+from __future__ import annotations
+
+import json
+import logging
+import os
+from datetime import datetime
+from typing import Any, Awaitable, Callable, Dict, Iterable, Optional
+
+from ..services.settings_manager import SettingsManager
+from ..utils.model_utils import determine_base_model
+
+logger = logging.getLogger(__name__)
+
+
+class MetadataProviderProtocol:
+ """Subset of metadata provider interface consumed by the sync service."""
+
+ async def get_model_by_hash(self, sha256: str) -> tuple[Optional[Dict[str, Any]], Optional[str]]:
+ ...
+
+ async def get_model_version(
+ self, model_id: int, model_version_id: Optional[int]
+ ) -> Optional[Dict[str, Any]]:
+ ...
+
+
+class MetadataSyncService:
+ """High level orchestration for metadata synchronisation flows."""
+
+ def __init__(
+ self,
+ *,
+ metadata_manager,
+ preview_service,
+ settings: SettingsManager,
+ default_metadata_provider_factory: Callable[[], Awaitable[MetadataProviderProtocol]],
+ metadata_provider_selector: Callable[[str], Awaitable[MetadataProviderProtocol]],
+ ) -> None:
+ self._metadata_manager = metadata_manager
+ self._preview_service = preview_service
+ self._settings = settings
+ self._get_default_provider = default_metadata_provider_factory
+ self._get_provider = metadata_provider_selector
+
+ async def load_local_metadata(self, metadata_path: str) -> Dict[str, Any]:
+ """Load metadata JSON from disk, returning an empty structure when missing."""
+
+ if not os.path.exists(metadata_path):
+ return {}
+
+ try:
+ with open(metadata_path, "r", encoding="utf-8") as handle:
+ return json.load(handle)
+ except Exception as exc: # pragma: no cover - defensive logging
+ logger.error("Error loading metadata from %s: %s", metadata_path, exc)
+ return {}
+
+ async def mark_not_found_on_civitai(
+ self, metadata_path: str, local_metadata: Dict[str, Any]
+ ) -> None:
+ """Persist the not-found flag for a metadata payload."""
+
+ local_metadata["from_civitai"] = False
+ await self._metadata_manager.save_metadata(metadata_path, local_metadata)
+
+ @staticmethod
+ def is_civitai_api_metadata(meta: Dict[str, Any]) -> bool:
+ """Determine if the metadata originated from the CivitAI public API."""
+
+ if not isinstance(meta, dict):
+ return False
+ files = meta.get("files")
+ images = meta.get("images")
+ source = meta.get("source")
+ return bool(files) and bool(images) and source != "archive_db"
+
+ async def update_model_metadata(
+ self,
+ metadata_path: str,
+ local_metadata: Dict[str, Any],
+ civitai_metadata: Dict[str, Any],
+ metadata_provider: Optional[MetadataProviderProtocol] = None,
+ ) -> Dict[str, Any]:
+ """Merge remote metadata into the local record and persist the result."""
+
+ existing_civitai = local_metadata.get("civitai") or {}
+
+ if (
+ civitai_metadata.get("source") == "archive_db"
+ and self.is_civitai_api_metadata(existing_civitai)
+ ):
+ logger.info(
+ "Skip civitai update for %s (%s)",
+ local_metadata.get("model_name", ""),
+ existing_civitai.get("name", ""),
+ )
+ else:
+ merged_civitai = existing_civitai.copy()
+ merged_civitai.update(civitai_metadata)
+
+ if civitai_metadata.get("source") == "archive_db":
+ model_name = civitai_metadata.get("model", {}).get("name", "")
+ version_name = civitai_metadata.get("name", "")
+ logger.info(
+ "Recovered metadata from archive_db for deleted model: %s (%s)",
+ model_name,
+ version_name,
+ )
+
+ if "trainedWords" in existing_civitai:
+ existing_trained = existing_civitai.get("trainedWords", [])
+ new_trained = civitai_metadata.get("trainedWords", [])
+ merged_trained = list(set(existing_trained + new_trained))
+ merged_civitai["trainedWords"] = merged_trained
+
+ local_metadata["civitai"] = merged_civitai
+
+ if "model" in civitai_metadata and civitai_metadata["model"]:
+ model_data = civitai_metadata["model"]
+
+ if model_data.get("name"):
+ local_metadata["model_name"] = model_data["name"]
+
+ if not local_metadata.get("modelDescription") and model_data.get("description"):
+ local_metadata["modelDescription"] = model_data["description"]
+
+ if not local_metadata.get("tags") and model_data.get("tags"):
+ local_metadata["tags"] = model_data["tags"]
+
+ if model_data.get("creator") and not local_metadata.get("civitai", {}).get(
+ "creator"
+ ):
+ local_metadata.setdefault("civitai", {})["creator"] = model_data["creator"]
+
+ local_metadata["base_model"] = determine_base_model(
+ civitai_metadata.get("baseModel")
+ )
+
+ await self._preview_service.ensure_preview_for_metadata(
+ metadata_path, local_metadata, civitai_metadata.get("images", [])
+ )
+
+ await self._metadata_manager.save_metadata(metadata_path, local_metadata)
+ return local_metadata
+
+ async def fetch_and_update_model(
+ self,
+ *,
+ sha256: str,
+ file_path: str,
+ model_data: Dict[str, Any],
+ update_cache_func: Callable[[str, str, Dict[str, Any]], Awaitable[bool]],
+ ) -> tuple[bool, Optional[str]]:
+ """Fetch metadata for a model and update both disk and cache state."""
+
+ if not isinstance(model_data, dict):
+ error = f"Invalid model_data type: {type(model_data)}"
+ logger.error(error)
+ return False, error
+
+ metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
+ enable_archive = self._settings.get("enable_metadata_archive_db", False)
+
+ try:
+ if model_data.get("civitai_deleted") is True:
+ if not enable_archive or model_data.get("db_checked") is True:
+ return (
+ False,
+ "CivitAI model is deleted and metadata archive DB is not enabled",
+ )
+ metadata_provider = await self._get_provider("sqlite")
+ else:
+ metadata_provider = await self._get_default_provider()
+
+ civitai_metadata, error = await metadata_provider.get_model_by_hash(sha256)
+ if not civitai_metadata:
+ if error == "Model not found":
+ model_data["from_civitai"] = False
+ model_data["civitai_deleted"] = True
+ model_data["db_checked"] = enable_archive
+ model_data["last_checked_at"] = datetime.now().timestamp()
+
+ data_to_save = model_data.copy()
+ data_to_save.pop("folder", None)
+ await self._metadata_manager.save_metadata(file_path, data_to_save)
+
+ error_msg = (
+ f"Error fetching metadata: {error} (model_name={model_data.get('model_name', '')})"
+ )
+ logger.error(error_msg)
+ return False, error_msg
+
+ model_data["from_civitai"] = True
+ model_data["civitai_deleted"] = civitai_metadata.get("source") == "archive_db"
+ model_data["db_checked"] = enable_archive
+ model_data["last_checked_at"] = datetime.now().timestamp()
+
+ local_metadata = model_data.copy()
+ local_metadata.pop("folder", None)
+
+ await self.update_model_metadata(
+ metadata_path,
+ local_metadata,
+ civitai_metadata,
+ metadata_provider,
+ )
+
+ update_payload = {
+ "model_name": local_metadata.get("model_name"),
+ "preview_url": local_metadata.get("preview_url"),
+ "civitai": local_metadata.get("civitai"),
+ }
+ model_data.update(update_payload)
+
+ await update_cache_func(file_path, file_path, local_metadata)
+ return True, None
+ except KeyError as exc:
+ error_msg = f"Error fetching metadata - Missing key: {exc} in model_data={model_data}"
+ logger.error(error_msg)
+ return False, error_msg
+ except Exception as exc: # pragma: no cover - error path
+ error_msg = f"Error fetching metadata: {exc}"
+ logger.error(error_msg, exc_info=True)
+ return False, error_msg
+
+ async def fetch_metadata_by_sha(
+ self, sha256: str, metadata_provider: Optional[MetadataProviderProtocol] = None
+ ) -> tuple[Optional[Dict[str, Any]], Optional[str]]:
+ """Fetch metadata for a SHA256 hash from the configured provider."""
+
+ provider = metadata_provider or await self._get_default_provider()
+ return await provider.get_model_by_hash(sha256)
+
+ async def relink_metadata(
+ self,
+ *,
+ file_path: str,
+ metadata: Dict[str, Any],
+ model_id: int,
+ model_version_id: Optional[int],
+ ) -> Dict[str, Any]:
+ """Relink a local metadata record to a specific CivitAI model version."""
+
+ provider = await self._get_default_provider()
+ civitai_metadata = await provider.get_model_version(model_id, model_version_id)
+ if not civitai_metadata:
+ raise ValueError(
+ f"Model version not found on CivitAI for ID: {model_id}"
+ + (f" with version: {model_version_id}" if model_version_id else "")
+ )
+
+ primary_model_file: Optional[Dict[str, Any]] = None
+ for file_info in civitai_metadata.get("files", []):
+ if file_info.get("primary", False) and file_info.get("type") == "Model":
+ primary_model_file = file_info
+ break
+
+ if primary_model_file and primary_model_file.get("hashes", {}).get("SHA256"):
+ metadata["sha256"] = primary_model_file["hashes"]["SHA256"].lower()
+
+ metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
+ await self.update_model_metadata(
+ metadata_path,
+ metadata,
+ civitai_metadata,
+ provider,
+ )
+
+ return metadata
+
+ async def save_metadata_updates(
+ self,
+ *,
+ file_path: str,
+ updates: Dict[str, Any],
+ metadata_loader: Callable[[str], Awaitable[Dict[str, Any]]],
+ update_cache: Callable[[str, str, Dict[str, Any]], Awaitable[bool]],
+ ) -> Dict[str, Any]:
+ """Apply metadata updates and persist to disk and cache."""
+
+ metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
+ metadata = await metadata_loader(metadata_path)
+
+ for key, value in updates.items():
+ if isinstance(value, dict) and isinstance(metadata.get(key), dict):
+ metadata[key].update(value)
+ else:
+ metadata[key] = value
+
+ await self._metadata_manager.save_metadata(file_path, metadata)
+ await update_cache(file_path, file_path, metadata)
+
+ if "model_name" in updates:
+ logger.debug("Metadata update touched model_name; cache resort required")
+
+ return metadata
+
+ async def verify_duplicate_hashes(
+ self,
+ *,
+ file_paths: Iterable[str],
+ metadata_loader: Callable[[str], Awaitable[Dict[str, Any]]],
+ hash_calculator: Callable[[str], Awaitable[str]],
+ update_cache: Callable[[str, str, Dict[str, Any]], Awaitable[bool]],
+ ) -> Dict[str, Any]:
+ """Verify a collection of files share the same SHA256 hash."""
+
+ file_paths = list(file_paths)
+ if not file_paths:
+ raise ValueError("No file paths provided for verification")
+
+ results = {
+ "verified_as_duplicates": True,
+ "mismatched_files": [],
+ "new_hash_map": {},
+ }
+
+ expected_hash: Optional[str] = None
+ first_metadata_path = os.path.splitext(file_paths[0])[0] + ".metadata.json"
+ first_metadata = await metadata_loader(first_metadata_path)
+ if first_metadata and "sha256" in first_metadata:
+ expected_hash = first_metadata["sha256"].lower()
+
+ for path in file_paths:
+ if not os.path.exists(path):
+ continue
+
+ try:
+ actual_hash = await hash_calculator(path)
+ metadata_path = os.path.splitext(path)[0] + ".metadata.json"
+ metadata = await metadata_loader(metadata_path)
+ stored_hash = metadata.get("sha256", "").lower()
+
+ if not expected_hash:
+ expected_hash = stored_hash
+
+ if actual_hash != expected_hash:
+ results["verified_as_duplicates"] = False
+ results["mismatched_files"].append(path)
+ results["new_hash_map"][path] = actual_hash
+
+ if actual_hash != stored_hash:
+ metadata["sha256"] = actual_hash
+ await self._metadata_manager.save_metadata(path, metadata)
+ await update_cache(path, path, metadata)
+ except Exception as exc: # pragma: no cover - defensive path
+ logger.error("Error verifying hash for %s: %s", path, exc)
+ results["mismatched_files"].append(path)
+ results["new_hash_map"][path] = "error_calculating_hash"
+ results["verified_as_duplicates"] = False
+
+ return results
+
diff --git a/py/services/model_lifecycle_service.py b/py/services/model_lifecycle_service.py
new file mode 100644
index 00000000..9aa87b04
--- /dev/null
+++ b/py/services/model_lifecycle_service.py
@@ -0,0 +1,245 @@
+"""Service routines for model lifecycle mutations."""
+
+from __future__ import annotations
+
+import logging
+import os
+from typing import Awaitable, Callable, Dict, Iterable, List, Optional
+
+from ..services.service_registry import ServiceRegistry
+from ..utils.constants import PREVIEW_EXTENSIONS
+
+logger = logging.getLogger(__name__)
+
+
+async def delete_model_artifacts(target_dir: str, file_name: str) -> List[str]:
+ """Delete the primary model artefacts within ``target_dir``."""
+
+ patterns = [
+ f"{file_name}.safetensors",
+ f"{file_name}.metadata.json",
+ ]
+ for ext in PREVIEW_EXTENSIONS:
+ patterns.append(f"{file_name}{ext}")
+
+ deleted: List[str] = []
+ main_file = patterns[0]
+ main_path = os.path.join(target_dir, main_file).replace(os.sep, "/")
+
+ if os.path.exists(main_path):
+ os.remove(main_path)
+ deleted.append(main_path)
+ else:
+ logger.warning("Model file not found: %s", main_file)
+
+ for pattern in patterns[1:]:
+ path = os.path.join(target_dir, pattern)
+ if os.path.exists(path):
+ try:
+ os.remove(path)
+ deleted.append(pattern)
+ except Exception as exc: # pragma: no cover - defensive path
+ logger.warning("Failed to delete %s: %s", pattern, exc)
+
+ return deleted
+
+
+class ModelLifecycleService:
+ """Co-ordinate destructive and mutating model operations."""
+
+ def __init__(
+ self,
+ *,
+ scanner,
+ metadata_manager,
+ metadata_loader: Callable[[str], Awaitable[Dict[str, object]]],
+ recipe_scanner_factory: Callable[[], Awaitable] | None = None,
+ ) -> None:
+ self._scanner = scanner
+ self._metadata_manager = metadata_manager
+ self._metadata_loader = metadata_loader
+ self._recipe_scanner_factory = (
+ recipe_scanner_factory or ServiceRegistry.get_recipe_scanner
+ )
+
+ async def delete_model(self, file_path: str) -> Dict[str, object]:
+ """Delete a model file and associated artefacts."""
+
+ if not file_path:
+ raise ValueError("Model path is required")
+
+ target_dir = os.path.dirname(file_path)
+ file_name = os.path.splitext(os.path.basename(file_path))[0]
+
+ deleted_files = await delete_model_artifacts(target_dir, file_name)
+
+ cache = await self._scanner.get_cached_data()
+ cache.raw_data = [item for item in cache.raw_data if item["file_path"] != file_path]
+ await cache.resort()
+
+ if hasattr(self._scanner, "_hash_index") and self._scanner._hash_index:
+ self._scanner._hash_index.remove_by_path(file_path)
+
+ return {"success": True, "deleted_files": deleted_files}
+
+ async def exclude_model(self, file_path: str) -> Dict[str, object]:
+ """Mark a model as excluded and prune cache references."""
+
+ if not file_path:
+ raise ValueError("Model path is required")
+
+ metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
+ metadata = await self._metadata_loader(metadata_path)
+ metadata["exclude"] = True
+
+ await self._metadata_manager.save_metadata(file_path, metadata)
+
+ cache = await self._scanner.get_cached_data()
+ model_to_remove = next(
+ (item for item in cache.raw_data if item["file_path"] == file_path),
+ None,
+ )
+
+ if model_to_remove:
+ for tag in model_to_remove.get("tags", []):
+ if tag in getattr(self._scanner, "_tags_count", {}):
+ self._scanner._tags_count[tag] = max(
+ 0, self._scanner._tags_count[tag] - 1
+ )
+ if self._scanner._tags_count[tag] == 0:
+ del self._scanner._tags_count[tag]
+
+ if hasattr(self._scanner, "_hash_index") and self._scanner._hash_index:
+ self._scanner._hash_index.remove_by_path(file_path)
+
+ cache.raw_data = [
+ item for item in cache.raw_data if item["file_path"] != file_path
+ ]
+ await cache.resort()
+
+ excluded = getattr(self._scanner, "_excluded_models", None)
+ if isinstance(excluded, list):
+ excluded.append(file_path)
+
+ message = f"Model {os.path.basename(file_path)} excluded"
+ return {"success": True, "message": message}
+
+ async def bulk_delete_models(self, file_paths: Iterable[str]) -> Dict[str, object]:
+ """Delete a collection of models via the scanner bulk operation."""
+
+ file_paths = list(file_paths)
+ if not file_paths:
+ raise ValueError("No file paths provided for deletion")
+
+ return await self._scanner.bulk_delete_models(file_paths)
+
+ async def rename_model(
+ self, *, file_path: str, new_file_name: str
+ ) -> Dict[str, object]:
+ """Rename a model and its companion artefacts."""
+
+ if not file_path or not new_file_name:
+ raise ValueError("File path and new file name are required")
+
+ invalid_chars = {"/", "\\", ":", "*", "?", '"', "<", ">", "|"}
+ if any(char in new_file_name for char in invalid_chars):
+ raise ValueError("Invalid characters in file name")
+
+ target_dir = os.path.dirname(file_path)
+ old_file_name = os.path.splitext(os.path.basename(file_path))[0]
+ new_file_path = os.path.join(target_dir, f"{new_file_name}.safetensors").replace(
+ os.sep, "/"
+ )
+
+ if os.path.exists(new_file_path):
+ raise ValueError("A file with this name already exists")
+
+ patterns = [
+ f"{old_file_name}.safetensors",
+ f"{old_file_name}.metadata.json",
+ f"{old_file_name}.metadata.json.bak",
+ ]
+ for ext in PREVIEW_EXTENSIONS:
+ patterns.append(f"{old_file_name}{ext}")
+
+ existing_files: List[tuple[str, str]] = []
+ for pattern in patterns:
+ path = os.path.join(target_dir, pattern)
+ if os.path.exists(path):
+ existing_files.append((path, pattern))
+
+ metadata_path = os.path.join(target_dir, f"{old_file_name}.metadata.json")
+ metadata: Optional[Dict[str, object]] = None
+ hash_value: Optional[str] = None
+
+ if os.path.exists(metadata_path):
+ metadata = await self._metadata_loader(metadata_path)
+ hash_value = metadata.get("sha256") if isinstance(metadata, dict) else None
+
+ renamed_files: List[str] = []
+ new_metadata_path: Optional[str] = None
+ new_preview: Optional[str] = None
+
+ for old_path, pattern in existing_files:
+ ext = self._get_multipart_ext(pattern)
+ new_path = os.path.join(target_dir, f"{new_file_name}{ext}").replace(
+ os.sep, "/"
+ )
+ os.rename(old_path, new_path)
+ renamed_files.append(new_path)
+
+ if ext == ".metadata.json":
+ new_metadata_path = new_path
+
+ if metadata and new_metadata_path:
+ metadata["file_name"] = new_file_name
+ metadata["file_path"] = new_file_path
+
+ if metadata.get("preview_url"):
+ old_preview = str(metadata["preview_url"])
+ ext = self._get_multipart_ext(old_preview)
+ new_preview = os.path.join(target_dir, f"{new_file_name}{ext}").replace(
+ os.sep, "/"
+ )
+ metadata["preview_url"] = new_preview
+
+ await self._metadata_manager.save_metadata(new_file_path, metadata)
+
+ if metadata:
+ await self._scanner.update_single_model_cache(
+ file_path, new_file_path, metadata
+ )
+
+ if hash_value and getattr(self._scanner, "model_type", "") == "lora":
+ recipe_scanner = await self._recipe_scanner_factory()
+ if recipe_scanner:
+ try:
+ await recipe_scanner.update_lora_filename_by_hash(
+ hash_value, new_file_name
+ )
+ except Exception as exc: # pragma: no cover - defensive logging
+ logger.error(
+ "Error updating recipe references for %s: %s",
+ file_path,
+ exc,
+ )
+
+ return {
+ "success": True,
+ "new_file_path": new_file_path,
+ "new_preview_path": new_preview,
+ "renamed_files": renamed_files,
+ "reload_required": False,
+ }
+
+ @staticmethod
+ def _get_multipart_ext(filename: str) -> str:
+ """Return the extension for files with compound suffixes."""
+
+ parts = filename.split(".")
+ if len(parts) == 3:
+ return "." + ".".join(parts[-2:])
+ if len(parts) >= 4:
+ return "." + ".".join(parts[-3:])
+ return os.path.splitext(filename)[1]
+
diff --git a/py/services/model_metadata_provider.py b/py/services/model_metadata_provider.py
index 9f54a2e7..3099b5fc 100644
--- a/py/services/model_metadata_provider.py
+++ b/py/services/model_metadata_provider.py
@@ -1,8 +1,40 @@
from abc import ABC, abstractmethod
import json
-import aiosqlite
import logging
-from typing import Optional, Dict, List, Tuple, Any
+from typing import Optional, Dict, Tuple, Any
+from .downloader import get_downloader
+
+try:
+ from bs4 import BeautifulSoup
+except ImportError as exc:
+ BeautifulSoup = None # type: ignore[assignment]
+ _BS4_IMPORT_ERROR = exc
+else:
+ _BS4_IMPORT_ERROR = None
+
+try:
+ import aiosqlite
+except ImportError as exc:
+ aiosqlite = None # type: ignore[assignment]
+ _AIOSQLITE_IMPORT_ERROR = exc
+else:
+ _AIOSQLITE_IMPORT_ERROR = None
+
+def _require_beautifulsoup() -> Any:
+ if BeautifulSoup is None:
+ raise RuntimeError(
+ "BeautifulSoup (bs4) is required for CivArchiveModelMetadataProvider. "
+ "Install it with 'pip install beautifulsoup4'."
+ ) from _BS4_IMPORT_ERROR
+ return BeautifulSoup
+
+def _require_aiosqlite() -> Any:
+ if aiosqlite is None:
+ raise RuntimeError(
+ "aiosqlite is required for SQLiteModelMetadataProvider. "
+ "Install it with 'pip install aiosqlite'."
+ ) from _AIOSQLITE_IMPORT_ERROR
+ return aiosqlite
logger = logging.getLogger(__name__)
@@ -10,7 +42,7 @@ class ModelMetadataProvider(ABC):
"""Base abstract class for all model metadata providers"""
@abstractmethod
- async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
+ async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Find model by hash value"""
pass
@@ -28,11 +60,6 @@ class ModelMetadataProvider(ABC):
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Fetch model version metadata"""
pass
-
- @abstractmethod
- async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
- """Fetch model metadata (description, tags, and creator info)"""
- pass
class CivitaiModelMetadataProvider(ModelMetadataProvider):
"""Provider that uses Civitai API for metadata"""
@@ -40,7 +67,7 @@ class CivitaiModelMetadataProvider(ModelMetadataProvider):
def __init__(self, civitai_client):
self.client = civitai_client
- async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
+ async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
return await self.client.get_model_by_hash(model_hash)
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
@@ -51,19 +78,135 @@ class CivitaiModelMetadataProvider(ModelMetadataProvider):
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
return await self.client.get_model_version_info(version_id)
+
+class CivArchiveModelMetadataProvider(ModelMetadataProvider):
+ """Provider that uses CivArchive HTML page parsing for metadata"""
+
+ async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
+ """Not supported by CivArchive provider"""
+ return None, "CivArchive provider does not support hash lookup"
- async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
- return await self.client.get_model_metadata(model_id)
+ async def get_model_versions(self, model_id: str) -> Optional[Dict]:
+ """Not supported by CivArchive provider"""
+ return None
+
+ async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
+ """Get specific model version by parsing CivArchive HTML page"""
+ if model_id is None or version_id is None:
+ return None
+
+ try:
+ # Construct CivArchive URL
+ url = f"https://civarchive.com/models/{model_id}?modelVersionId={version_id}"
+
+ downloader = await get_downloader()
+ session = await downloader.session
+ async with session.get(url) as response:
+ if response.status != 200:
+ return None
+
+ html_content = await response.text()
+
+ # Parse HTML to extract JSON data
+ soup_parser = _require_beautifulsoup()
+ soup = soup_parser(html_content, 'html.parser')
+ script_tag = soup.find('script', {'id': '__NEXT_DATA__', 'type': 'application/json'})
+
+ if not script_tag:
+ return None
+
+ # Parse JSON content
+ json_data = json.loads(script_tag.string)
+ model_data = json_data.get('props', {}).get('pageProps', {}).get('model')
+
+ if not model_data or 'version' not in model_data:
+ return None
+
+ # Extract version data as base
+ version = model_data['version'].copy()
+
+ # Restructure stats
+ if 'downloadCount' in version and 'ratingCount' in version and 'rating' in version:
+ version['stats'] = {
+ 'downloadCount': version.pop('downloadCount'),
+ 'ratingCount': version.pop('ratingCount'),
+ 'rating': version.pop('rating')
+ }
+
+ # Rename trigger to trainedWords
+ if 'trigger' in version:
+ version['trainedWords'] = version.pop('trigger')
+
+ # Transform files data to expected format
+ if 'files' in version:
+ transformed_files = []
+ for file_data in version['files']:
+ # Find first available mirror (deletedAt is null)
+ available_mirror = None
+ for mirror in file_data.get('mirrors', []):
+ if mirror.get('deletedAt') is None:
+ available_mirror = mirror
+ break
+
+ # Create transformed file entry
+ transformed_file = {
+ 'id': file_data.get('id'),
+ 'sizeKB': file_data.get('sizeKB'),
+ 'name': available_mirror.get('filename', file_data.get('name')) if available_mirror else file_data.get('name'),
+ 'type': file_data.get('type'),
+ 'downloadUrl': available_mirror.get('url') if available_mirror else None,
+ 'primary': True,
+ 'mirrors': file_data.get('mirrors', [])
+ }
+
+ # Transform hash format
+ if 'sha256' in file_data:
+ transformed_file['hashes'] = {
+ 'SHA256': file_data['sha256'].upper()
+ }
+
+ transformed_files.append(transformed_file)
+
+ version['files'] = transformed_files
+
+ # Add model information
+ version['model'] = {
+ 'name': model_data.get('name'),
+ 'type': model_data.get('type'),
+ 'nsfw': model_data.get('is_nsfw', False),
+ 'description': model_data.get('description'),
+ 'tags': model_data.get('tags', [])
+ }
+
+ version['creator'] = {
+ 'username': model_data.get('username'),
+ 'image': ''
+ }
+
+ # Add source identifier
+ version['source'] = 'civarchive'
+ version['is_deleted'] = json_data.get('query', {}).get('is_deleted', False)
+
+ return version
+
+ except Exception as e:
+ logger.error(f"Error fetching CivArchive model version {model_id}/{version_id}: {e}")
+ return None
+
+ async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
+ """Not supported by CivArchive provider - requires both model_id and version_id"""
+ return None, "CivArchive provider requires both model_id and version_id"
class SQLiteModelMetadataProvider(ModelMetadataProvider):
"""Provider that uses SQLite database for metadata"""
def __init__(self, db_path: str):
self.db_path = db_path
+ self._aiosqlite = _require_aiosqlite()
- async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
+ async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Find model by hash value from SQLite database"""
- async with aiosqlite.connect(self.db_path) as db:
+ async with self._aiosqlite.connect(self.db_path) as db:
# Look up in model_files table to get model_id and version_id
query = """
SELECT model_id, version_id
@@ -71,24 +214,25 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
WHERE sha256 = ?
LIMIT 1
"""
- db.row_factory = aiosqlite.Row
+ db.row_factory = self._aiosqlite.Row
cursor = await db.execute(query, (model_hash.upper(),))
file_row = await cursor.fetchone()
if not file_row:
- return None
+ return None, "Model not found"
# Get version details
model_id = file_row['model_id']
version_id = file_row['version_id']
# Build response in the same format as Civitai API
- return await self._get_version_with_model_data(db, model_id, version_id)
+ result = await self._get_version_with_model_data(db, model_id, version_id)
+ return result, None if result else "Error retrieving model data"
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
"""Get all versions of a model from SQLite database"""
- async with aiosqlite.connect(self.db_path) as db:
- db.row_factory = aiosqlite.Row
+ async with self._aiosqlite.connect(self.db_path) as db:
+ db.row_factory = self._aiosqlite.Row
# First check if model exists
model_query = "SELECT * FROM models WHERE id = ?"
@@ -100,6 +244,7 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
model_data = json.loads(model_row['data'])
model_type = model_row['type']
+ model_name = model_row['name']
# Get all versions for this model
versions_query = """
@@ -136,7 +281,8 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
return {
'modelVersions': model_versions,
- 'type': model_type
+ 'type': model_type,
+ 'name': model_name
}
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
@@ -144,8 +290,8 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
if not model_id and not version_id:
return None
- async with aiosqlite.connect(self.db_path) as db:
- db.row_factory = aiosqlite.Row
+ async with self._aiosqlite.connect(self.db_path) as db:
+ db.row_factory = self._aiosqlite.Row
# Case 1: Only version_id is provided
if model_id is None and version_id is not None:
@@ -181,8 +327,8 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Fetch model version metadata from SQLite database"""
- async with aiosqlite.connect(self.db_path) as db:
- db.row_factory = aiosqlite.Row
+ async with self._aiosqlite.connect(self.db_path) as db:
+ db.row_factory = self._aiosqlite.Row
# Get version details
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
@@ -198,37 +344,6 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
version_data = await self._get_version_with_model_data(db, model_id, version_id)
return version_data, None
- async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
- """Fetch model metadata from SQLite database"""
- async with aiosqlite.connect(self.db_path) as db:
- db.row_factory = aiosqlite.Row
-
- # Get model details
- model_query = "SELECT name, type, data, username FROM models WHERE id = ?"
- cursor = await db.execute(model_query, (model_id,))
- model_row = await cursor.fetchone()
-
- if not model_row:
- return None, 404
-
- # Parse data JSON
- try:
- model_data = json.loads(model_row['data'])
-
- # Extract relevant metadata
- metadata = {
- "description": model_data.get("description", "No model description available"),
- "tags": model_data.get("tags", []),
- "creator": {
- "username": model_row['username'] or model_data.get("creator", {}).get("username"),
- "image": model_data.get("creator", {}).get("image")
- }
- }
-
- return metadata, 200
- except json.JSONDecodeError:
- return None, 500
-
async def _get_version_with_model_data(self, db, model_id, version_id) -> Optional[Dict]:
"""Helper to build version data with model information"""
# Get version details
@@ -274,6 +389,45 @@ class SQLiteModelMetadataProvider(ModelMetadataProvider):
# Add any additional fields from version data
result.update(version_data)
+ # Attach files associated with this version from model_files table
+ files_query = """
+ SELECT data
+ FROM model_files
+ WHERE version_id = ? AND type = 'Model'
+ ORDER BY id ASC
+ """
+ cursor = await db.execute(files_query, (version_id,))
+ file_rows = await cursor.fetchall()
+
+ files = []
+ for file_row in file_rows:
+ try:
+ file_data = json.loads(file_row['data'])
+ except json.JSONDecodeError:
+ logger.warning(
+ "Skipping model_files entry with invalid JSON for version_id %s", version_id
+ )
+ continue
+ # Remove 'modelId' and 'modelVersionId' fields if present
+ file_data.pop('modelId', None)
+ file_data.pop('modelVersionId', None)
+ files.append(file_data)
+
+ if 'files' in result:
+ existing_files = result['files']
+ if isinstance(existing_files, list):
+ existing_files.extend(files)
+ result['files'] = existing_files
+ else:
+ merged_files = files.copy()
+ if existing_files:
+ merged_files.insert(0, existing_files)
+ result['files'] = merged_files
+ elif files:
+ result['files'] = files
+ else:
+ result['files'] = []
+
return result
except json.JSONDecodeError:
return None
@@ -283,15 +437,16 @@ class FallbackMetadataProvider(ModelMetadataProvider):
def __init__(self, providers: list):
self.providers = providers
- async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
+ async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
for provider in self.providers:
try:
- result = await provider.get_model_by_hash(model_hash)
+ result, error = await provider.get_model_by_hash(model_hash)
if result:
- return result
- except Exception:
+ return result, error
+ except Exception as e:
+ logger.debug(f"Provider failed for get_model_by_hash: {e}")
continue
- return None
+ return None, "Model not found"
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
for provider in self.providers:
@@ -326,17 +481,6 @@ class FallbackMetadataProvider(ModelMetadataProvider):
continue
return None, "No provider could retrieve the data"
- async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
- for provider in self.providers:
- try:
- result, status = await provider.get_model_metadata(model_id)
- if result:
- return result, status
- except Exception as e:
- logger.debug(f"Provider failed for get_model_metadata: {e}")
- continue
- return None, 404
-
class ModelMetadataProviderManager:
"""Manager for selecting and using model metadata providers"""
@@ -359,7 +503,7 @@ class ModelMetadataProviderManager:
if is_default or self.default_provider is None:
self.default_provider = name
- async def get_model_by_hash(self, model_hash: str, provider_name: str = None) -> Optional[Dict]:
+ async def get_model_by_hash(self, model_hash: str, provider_name: str = None) -> Tuple[Optional[Dict], Optional[str]]:
"""Find model by hash using specified or default provider"""
provider = self._get_provider(provider_name)
return await provider.get_model_by_hash(model_hash)
@@ -379,11 +523,6 @@ class ModelMetadataProviderManager:
provider = self._get_provider(provider_name)
return await provider.get_model_version_info(version_id)
- async def get_model_metadata(self, model_id: str, provider_name: str = None) -> Tuple[Optional[Dict], int]:
- """Fetch model metadata using specified or default provider"""
- provider = self._get_provider(provider_name)
- return await provider.get_model_metadata(model_id)
-
def _get_provider(self, provider_name: str = None) -> ModelMetadataProvider:
"""Get provider by name or default provider"""
if provider_name and provider_name in self.providers:
diff --git a/py/services/model_query.py b/py/services/model_query.py
new file mode 100644
index 00000000..df7bb67a
--- /dev/null
+++ b/py/services/model_query.py
@@ -0,0 +1,196 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Protocol, Callable
+
+from ..utils.constants import NSFW_LEVELS
+from ..utils.utils import fuzzy_match as default_fuzzy_match
+
+
+class SettingsProvider(Protocol):
+ """Protocol describing the SettingsManager contract used by query helpers."""
+
+ def get(self, key: str, default: Any = None) -> Any:
+ ...
+
+
+@dataclass(frozen=True)
+class SortParams:
+ """Normalized representation of sorting instructions."""
+
+ key: str
+ order: str
+
+
+@dataclass(frozen=True)
+class FilterCriteria:
+ """Container for model list filtering options."""
+
+ folder: Optional[str] = None
+ base_models: Optional[Sequence[str]] = None
+ tags: Optional[Sequence[str]] = None
+ favorites_only: bool = False
+ search_options: Optional[Dict[str, Any]] = None
+
+
+class ModelCacheRepository:
+ """Adapter around scanner cache access and sort normalisation."""
+
+ def __init__(self, scanner) -> None:
+ self._scanner = scanner
+
+ async def get_cache(self):
+ """Return the underlying cache instance from the scanner."""
+ return await self._scanner.get_cached_data()
+
+ async def fetch_sorted(self, params: SortParams) -> List[Dict[str, Any]]:
+ """Fetch cached data pre-sorted according to ``params``."""
+ cache = await self.get_cache()
+ return await cache.get_sorted_data(params.key, params.order)
+
+ @staticmethod
+ def parse_sort(sort_by: str) -> SortParams:
+ """Parse an incoming sort string into key/order primitives."""
+ if not sort_by:
+ return SortParams(key="name", order="asc")
+
+ if ":" in sort_by:
+ raw_key, raw_order = sort_by.split(":", 1)
+ sort_key = raw_key.strip().lower() or "name"
+ order = raw_order.strip().lower()
+ else:
+ sort_key = sort_by.strip().lower() or "name"
+ order = "asc"
+
+ if order not in ("asc", "desc"):
+ order = "asc"
+
+ return SortParams(key=sort_key, order=order)
+
+
+class ModelFilterSet:
+ """Applies common filtering rules to the model collection."""
+
+ def __init__(self, settings: SettingsProvider, nsfw_levels: Optional[Dict[str, int]] = None) -> None:
+ self._settings = settings
+ self._nsfw_levels = nsfw_levels or NSFW_LEVELS
+
+ def apply(self, data: Iterable[Dict[str, Any]], criteria: FilterCriteria) -> List[Dict[str, Any]]:
+ """Return items that satisfy the provided criteria."""
+ items = list(data)
+
+ if self._settings.get("show_only_sfw", False):
+ threshold = self._nsfw_levels.get("R", 0)
+ items = [
+ item for item in items
+ if not item.get("preview_nsfw_level") or item.get("preview_nsfw_level") < threshold
+ ]
+
+ if criteria.favorites_only:
+ items = [item for item in items if item.get("favorite", False)]
+
+ folder = criteria.folder
+ options = criteria.search_options or {}
+ recursive = bool(options.get("recursive", True))
+ if folder is not None:
+ if recursive:
+ if folder:
+ folder_with_sep = f"{folder}/"
+ items = [
+ item for item in items
+ if item.get("folder") == folder or item.get("folder", "").startswith(folder_with_sep)
+ ]
+ else:
+ items = [item for item in items if item.get("folder") == folder]
+
+ base_models = criteria.base_models or []
+ if base_models:
+ base_model_set = set(base_models)
+ items = [item for item in items if item.get("base_model") in base_model_set]
+
+ tags = criteria.tags or []
+ if tags:
+ tag_set = set(tags)
+ items = [
+ item for item in items
+ if any(tag in tag_set for tag in item.get("tags", []))
+ ]
+
+ return items
+
+
+class SearchStrategy:
+ """Encapsulates text and fuzzy matching behaviour for model queries."""
+
+ DEFAULT_OPTIONS: Dict[str, Any] = {
+ "filename": True,
+ "modelname": True,
+ "tags": False,
+ "recursive": True,
+ "creator": False,
+ }
+
+ def __init__(self, fuzzy_matcher: Optional[Callable[[str, str], bool]] = None) -> None:
+ self._fuzzy_match = fuzzy_matcher or default_fuzzy_match
+
+ def normalize_options(self, options: Optional[Dict[str, Any]]) -> Dict[str, Any]:
+ """Merge provided options with defaults without mutating input."""
+ normalized = dict(self.DEFAULT_OPTIONS)
+ if options:
+ normalized.update(options)
+ return normalized
+
+ def apply(
+ self,
+ data: Iterable[Dict[str, Any]],
+ search_term: str,
+ options: Dict[str, Any],
+ fuzzy: bool = False,
+ ) -> List[Dict[str, Any]]:
+ """Return items matching the search term using the configured strategy."""
+ if not search_term:
+ return list(data)
+
+ search_lower = search_term.lower()
+ results: List[Dict[str, Any]] = []
+
+ for item in data:
+ if options.get("filename", True):
+ candidate = item.get("file_name", "")
+ if self._matches(candidate, search_term, search_lower, fuzzy):
+ results.append(item)
+ continue
+
+ if options.get("modelname", True):
+ candidate = item.get("model_name", "")
+ if self._matches(candidate, search_term, search_lower, fuzzy):
+ results.append(item)
+ continue
+
+ if options.get("tags", False):
+ tags = item.get("tags", []) or []
+ if any(self._matches(tag, search_term, search_lower, fuzzy) for tag in tags):
+ results.append(item)
+ continue
+
+ if options.get("creator", False):
+ creator_username = ""
+ civitai = item.get("civitai")
+ if isinstance(civitai, dict):
+ creator = civitai.get("creator")
+ if isinstance(creator, dict):
+ creator_username = creator.get("username", "")
+ if creator_username and self._matches(creator_username, search_term, search_lower, fuzzy):
+ results.append(item)
+ continue
+
+ return results
+
+ def _matches(self, candidate: str, search_term: str, search_lower: str, fuzzy: bool) -> bool:
+ if not candidate:
+ return False
+
+ candidate_lower = candidate.lower()
+ if fuzzy:
+ return self._fuzzy_match(candidate, search_term)
+ return search_lower in candidate_lower
diff --git a/py/services/model_scanner.py b/py/services/model_scanner.py
index f0ae3177..51aa4507 100644
--- a/py/services/model_scanner.py
+++ b/py/services/model_scanner.py
@@ -13,6 +13,7 @@ from ..utils.metadata_manager import MetadataManager
from .model_cache import ModelCache
from .model_hash_index import ModelHashIndex
from ..utils.constants import PREVIEW_EXTENSIONS
+from .model_lifecycle_service import delete_model_artifacts
from .service_registry import ServiceRegistry
from .websocket_manager import ws_manager
@@ -1040,10 +1041,8 @@ class ModelScanner:
target_dir = os.path.dirname(file_path)
file_name = os.path.splitext(os.path.basename(file_path))[0]
- # Delete all associated files for the model
- from ..utils.routes_common import ModelRouteUtils
- deleted_files = await ModelRouteUtils.delete_model_files(
- target_dir,
+ deleted_files = await delete_model_artifacts(
+ target_dir,
file_name
)
diff --git a/py/services/preview_asset_service.py b/py/services/preview_asset_service.py
new file mode 100644
index 00000000..42baadac
--- /dev/null
+++ b/py/services/preview_asset_service.py
@@ -0,0 +1,168 @@
+"""Service for processing preview assets for models."""
+
+from __future__ import annotations
+
+import logging
+import os
+from typing import Awaitable, Callable, Dict, Optional, Sequence
+
+from ..utils.constants import CARD_PREVIEW_WIDTH, PREVIEW_EXTENSIONS
+
+logger = logging.getLogger(__name__)
+
+
+class PreviewAssetService:
+ """Manage fetching and persisting preview assets."""
+
+ def __init__(
+ self,
+ *,
+ metadata_manager,
+ downloader_factory: Callable[[], Awaitable],
+ exif_utils,
+ ) -> None:
+ self._metadata_manager = metadata_manager
+ self._downloader_factory = downloader_factory
+ self._exif_utils = exif_utils
+
+ async def ensure_preview_for_metadata(
+ self,
+ metadata_path: str,
+ local_metadata: Dict[str, object],
+ images: Sequence[Dict[str, object]] | None,
+ ) -> None:
+ """Ensure preview assets exist for the supplied metadata entry."""
+
+ if local_metadata.get("preview_url") and os.path.exists(
+ str(local_metadata["preview_url"])
+ ):
+ return
+
+ if not images:
+ return
+
+ first_preview = images[0]
+ base_name = os.path.splitext(os.path.splitext(os.path.basename(metadata_path))[0])[0]
+ preview_dir = os.path.dirname(metadata_path)
+ is_video = first_preview.get("type") == "video"
+
+ if is_video:
+ extension = ".mp4"
+ preview_path = os.path.join(preview_dir, base_name + extension)
+ downloader = await self._downloader_factory()
+ success, result = await downloader.download_file(
+ first_preview["url"], preview_path, use_auth=False
+ )
+ if success:
+ local_metadata["preview_url"] = preview_path.replace(os.sep, "/")
+ local_metadata["preview_nsfw_level"] = first_preview.get("nsfwLevel", 0)
+ else:
+ extension = ".webp"
+ preview_path = os.path.join(preview_dir, base_name + extension)
+ downloader = await self._downloader_factory()
+ success, content, _headers = await downloader.download_to_memory(
+ first_preview["url"], use_auth=False
+ )
+ if not success:
+ return
+
+ try:
+ optimized_data, _ = self._exif_utils.optimize_image(
+ image_data=content,
+ target_width=CARD_PREVIEW_WIDTH,
+ format="webp",
+ quality=85,
+ preserve_metadata=False,
+ )
+ with open(preview_path, "wb") as handle:
+ handle.write(optimized_data)
+ except Exception as exc: # pragma: no cover - defensive path
+ logger.error("Error optimizing preview image: %s", exc)
+ try:
+ with open(preview_path, "wb") as handle:
+ handle.write(content)
+ except Exception as save_exc:
+ logger.error("Error saving preview image: %s", save_exc)
+ return
+
+ local_metadata["preview_url"] = preview_path.replace(os.sep, "/")
+ local_metadata["preview_nsfw_level"] = first_preview.get("nsfwLevel", 0)
+
+ async def replace_preview(
+ self,
+ *,
+ model_path: str,
+ preview_data: bytes,
+ content_type: str,
+ original_filename: Optional[str],
+ nsfw_level: int,
+ update_preview_in_cache: Callable[[str, str, int], Awaitable[bool]],
+ metadata_loader: Callable[[str], Awaitable[Dict[str, object]]],
+ ) -> Dict[str, object]:
+ """Replace an existing preview asset for a model."""
+
+ base_name = os.path.splitext(os.path.basename(model_path))[0]
+ folder = os.path.dirname(model_path)
+
+ extension, optimized_data = await self._convert_preview(
+ preview_data, content_type, original_filename
+ )
+
+ for ext in PREVIEW_EXTENSIONS:
+ existing_preview = os.path.join(folder, base_name + ext)
+ if os.path.exists(existing_preview):
+ try:
+ os.remove(existing_preview)
+ except Exception as exc: # pragma: no cover - defensive path
+ logger.warning(
+ "Failed to delete existing preview %s: %s", existing_preview, exc
+ )
+
+ preview_path = os.path.join(folder, base_name + extension).replace(os.sep, "/")
+ with open(preview_path, "wb") as handle:
+ handle.write(optimized_data)
+
+ metadata_path = os.path.splitext(model_path)[0] + ".metadata.json"
+ metadata = await metadata_loader(metadata_path)
+ metadata["preview_url"] = preview_path
+ metadata["preview_nsfw_level"] = nsfw_level
+ await self._metadata_manager.save_metadata(model_path, metadata)
+
+ await update_preview_in_cache(model_path, preview_path, nsfw_level)
+
+ return {"preview_path": preview_path, "preview_nsfw_level": nsfw_level}
+
+ async def _convert_preview(
+ self, data: bytes, content_type: str, original_filename: Optional[str]
+ ) -> tuple[str, bytes]:
+ """Convert preview bytes to the persisted representation."""
+
+ if content_type.startswith("video/"):
+ extension = self._resolve_video_extension(content_type, original_filename)
+ return extension, data
+
+ original_ext = (original_filename or "").lower()
+ if original_ext.endswith(".gif") or content_type.lower() == "image/gif":
+ return ".gif", data
+
+ optimized_data, _ = self._exif_utils.optimize_image(
+ image_data=data,
+ target_width=CARD_PREVIEW_WIDTH,
+ format="webp",
+ quality=85,
+ preserve_metadata=False,
+ )
+ return ".webp", optimized_data
+
+ def _resolve_video_extension(self, content_type: str, original_filename: Optional[str]) -> str:
+ """Infer the best extension for a video preview."""
+
+ if original_filename:
+ extension = os.path.splitext(original_filename)[1].lower()
+ if extension in {".mp4", ".webm", ".mov", ".avi"}:
+ return extension
+
+ if "webm" in content_type:
+ return ".webm"
+ return ".mp4"
+
diff --git a/py/services/recipe_cache.py b/py/services/recipe_cache.py
index b1f52246..ac28b3aa 100644
--- a/py/services/recipe_cache.py
+++ b/py/services/recipe_cache.py
@@ -1,5 +1,5 @@
import asyncio
-from typing import List, Dict
+from typing import Iterable, List, Dict, Optional
from dataclasses import dataclass
from operator import itemgetter
from natsort import natsorted
@@ -10,77 +10,115 @@ class RecipeCache:
raw_data: List[Dict]
sorted_by_name: List[Dict]
sorted_by_date: List[Dict]
-
+
def __post_init__(self):
self._lock = asyncio.Lock()
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
- self.sorted_by_name = natsorted(
- self.raw_data,
- key=lambda x: x.get('title', '').lower() # Case-insensitive sort
- )
- if not name_only:
- self.sorted_by_date = sorted(
- self.raw_data,
- key=itemgetter('created_date', 'file_path'),
- reverse=True
- )
-
- async def update_recipe_metadata(self, recipe_id: str, metadata: Dict) -> bool:
+ self._resort_locked(name_only=name_only)
+
+ async def update_recipe_metadata(self, recipe_id: str, metadata: Dict, *, resort: bool = True) -> bool:
"""Update metadata for a specific recipe in all cached data
-
+
Args:
recipe_id: The ID of the recipe to update
metadata: The new metadata
-
+
Returns:
bool: True if the update was successful, False if the recipe wasn't found
"""
+ async with self._lock:
+ for item in self.raw_data:
+ if str(item.get('id')) == str(recipe_id):
+ item.update(metadata)
+ if resort:
+ self._resort_locked()
+ return True
+ return False # Recipe not found
+
+ async def add_recipe(self, recipe_data: Dict, *, resort: bool = False) -> None:
+ """Add a new recipe to the cache."""
- # Update in raw_data
- for item in self.raw_data:
- if item.get('id') == recipe_id:
- item.update(metadata)
- break
- else:
- return False # Recipe not found
-
- # Resort to reflect changes
- await self.resort()
- return True
-
- async def add_recipe(self, recipe_data: Dict) -> None:
- """Add a new recipe to the cache
-
- Args:
- recipe_data: The recipe data to add
- """
async with self._lock:
self.raw_data.append(recipe_data)
- await self.resort()
+ if resort:
+ self._resort_locked()
+
+ async def remove_recipe(self, recipe_id: str, *, resort: bool = False) -> Optional[Dict]:
+ """Remove a recipe from the cache by ID.
- async def remove_recipe(self, recipe_id: str) -> bool:
- """Remove a recipe from the cache by ID
-
Args:
recipe_id: The ID of the recipe to remove
-
+
Returns:
- bool: True if the recipe was found and removed, False otherwise
+ The removed recipe data if found, otherwise ``None``.
"""
- # Find the recipe in raw_data
- recipe_index = next((i for i, recipe in enumerate(self.raw_data)
- if recipe.get('id') == recipe_id), None)
-
- if recipe_index is None:
- return False
-
- # Remove from raw_data
- self.raw_data.pop(recipe_index)
-
- # Resort to update sorted lists
- await self.resort()
-
- return True
\ No newline at end of file
+
+ async with self._lock:
+ for index, recipe in enumerate(self.raw_data):
+ if str(recipe.get('id')) == str(recipe_id):
+ removed = self.raw_data.pop(index)
+ if resort:
+ self._resort_locked()
+ return removed
+ return None
+
+ async def bulk_remove(self, recipe_ids: Iterable[str], *, resort: bool = False) -> List[Dict]:
+ """Remove multiple recipes from the cache."""
+
+ id_set = {str(recipe_id) for recipe_id in recipe_ids}
+ if not id_set:
+ return []
+
+ async with self._lock:
+ removed = [item for item in self.raw_data if str(item.get('id')) in id_set]
+ if not removed:
+ return []
+
+ self.raw_data = [item for item in self.raw_data if str(item.get('id')) not in id_set]
+ if resort:
+ self._resort_locked()
+ return removed
+
+ async def replace_recipe(self, recipe_id: str, new_data: Dict, *, resort: bool = False) -> bool:
+ """Replace cached data for a recipe."""
+
+ async with self._lock:
+ for index, recipe in enumerate(self.raw_data):
+ if str(recipe.get('id')) == str(recipe_id):
+ self.raw_data[index] = new_data
+ if resort:
+ self._resort_locked()
+ return True
+ return False
+
+ async def get_recipe(self, recipe_id: str) -> Optional[Dict]:
+ """Return a shallow copy of a cached recipe."""
+
+ async with self._lock:
+ for recipe in self.raw_data:
+ if str(recipe.get('id')) == str(recipe_id):
+ return dict(recipe)
+ return None
+
+ async def snapshot(self) -> List[Dict]:
+ """Return a copy of all cached recipes."""
+
+ async with self._lock:
+ return [dict(item) for item in self.raw_data]
+
+ def _resort_locked(self, *, name_only: bool = False) -> None:
+ """Sort cached views. Caller must hold ``_lock``."""
+
+ self.sorted_by_name = natsorted(
+ self.raw_data,
+ key=lambda x: x.get('title', '').lower()
+ )
+ if not name_only:
+ self.sorted_by_date = sorted(
+ self.raw_data,
+ key=itemgetter('created_date', 'file_path'),
+ reverse=True
+ )
\ No newline at end of file
diff --git a/py/services/recipe_scanner.py b/py/services/recipe_scanner.py
index ca5a20ac..9a82b237 100644
--- a/py/services/recipe_scanner.py
+++ b/py/services/recipe_scanner.py
@@ -3,13 +3,14 @@ import logging
import asyncio
import json
import time
-from typing import List, Dict, Optional, Any, Tuple
+from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from ..config import config
from .recipe_cache import RecipeCache
from .service_registry import ServiceRegistry
from .lora_scanner import LoraScanner
from .metadata_service import get_default_metadata_provider
-from ..utils.utils import fuzzy_match
+from .recipes.errors import RecipeNotFoundError
+from ..utils.utils import calculate_recipe_fingerprint, fuzzy_match
from natsort import natsorted
import sys
@@ -46,6 +47,8 @@ class RecipeScanner:
self._initialization_lock = asyncio.Lock()
self._initialization_task: Optional[asyncio.Task] = None
self._is_initializing = False
+ self._mutation_lock = asyncio.Lock()
+ self._resort_tasks: Set[asyncio.Task] = set()
if lora_scanner:
self._lora_scanner = lora_scanner
self._initialized = True
@@ -191,6 +194,22 @@ class RecipeScanner:
# Clean up the event loop
loop.close()
+ def _schedule_resort(self, *, name_only: bool = False) -> None:
+ """Schedule a background resort of the recipe cache."""
+
+ if not self._cache:
+ return
+
+ async def _resort_wrapper() -> None:
+ try:
+ await self._cache.resort(name_only=name_only)
+ except Exception as exc: # pragma: no cover - defensive logging
+ logger.error("Recipe Scanner: error resorting cache: %s", exc, exc_info=True)
+
+ task = asyncio.create_task(_resort_wrapper())
+ self._resort_tasks.add(task)
+ task.add_done_callback(lambda finished: self._resort_tasks.discard(finished))
+
@property
def recipes_dir(self) -> str:
"""Get path to recipes directory"""
@@ -255,7 +274,45 @@ class RecipeScanner:
# Return the cache (may be empty or partially initialized)
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
-
+
+ async def refresh_cache(self, force: bool = False) -> RecipeCache:
+ """Public helper to refresh or return the recipe cache."""
+
+ return await self.get_cached_data(force_refresh=force)
+
+ async def add_recipe(self, recipe_data: Dict[str, Any]) -> None:
+ """Add a recipe to the in-memory cache."""
+
+ if not recipe_data:
+ return
+
+ cache = await self.get_cached_data()
+ await cache.add_recipe(recipe_data, resort=False)
+ self._schedule_resort()
+
+ async def remove_recipe(self, recipe_id: str) -> bool:
+ """Remove a recipe from the cache by ID."""
+
+ if not recipe_id:
+ return False
+
+ cache = await self.get_cached_data()
+ removed = await cache.remove_recipe(recipe_id, resort=False)
+ if removed is None:
+ return False
+
+ self._schedule_resort()
+ return True
+
+ async def bulk_remove(self, recipe_ids: Iterable[str]) -> int:
+ """Remove multiple recipes from the cache."""
+
+ cache = await self.get_cached_data()
+ removed = await cache.bulk_remove(recipe_ids, resort=False)
+ if removed:
+ self._schedule_resort()
+ return len(removed)
+
async def scan_all_recipes(self) -> List[Dict]:
"""Scan all recipe JSON files and return metadata"""
recipes = []
@@ -326,7 +383,6 @@ class RecipeScanner:
# Calculate and update fingerprint if missing
if 'loras' in recipe_data and 'fingerprint' not in recipe_data:
- from ..utils.utils import calculate_recipe_fingerprint
fingerprint = calculate_recipe_fingerprint(recipe_data['loras'])
recipe_data['fingerprint'] = fingerprint
@@ -497,9 +553,36 @@ class RecipeScanner:
logger.error(f"Error getting base model for lora: {e}")
return None
+ def _enrich_lora_entry(self, lora: Dict[str, Any]) -> Dict[str, Any]:
+ """Populate convenience fields for a LoRA entry."""
+
+ if not lora or not self._lora_scanner:
+ return lora
+
+ hash_value = (lora.get('hash') or '').lower()
+ if not hash_value:
+ return lora
+
+ try:
+ lora['inLibrary'] = self._lora_scanner.has_hash(hash_value)
+ lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(hash_value)
+ lora['localPath'] = self._lora_scanner.get_path_by_hash(hash_value)
+ except Exception as exc: # pragma: no cover - defensive logging
+ logger.debug("Error enriching lora entry %s: %s", hash_value, exc)
+
+ return lora
+
+ async def get_local_lora(self, name: str) -> Optional[Dict[str, Any]]:
+ """Lookup a local LoRA model by name."""
+
+ if not self._lora_scanner or not name:
+ return None
+
+ return await self._lora_scanner.get_model_info_by_name(name)
+
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'date', search: str = None, filters: dict = None, search_options: dict = None, lora_hash: str = None, bypass_filters: bool = True):
"""Get paginated and filtered recipe data
-
+
Args:
page: Current page number (1-based)
page_size: Number of items per page
@@ -598,16 +681,12 @@ class RecipeScanner:
# Get paginated items
paginated_items = filtered_data[start_idx:end_idx]
-
+
# Add inLibrary information for each lora
for item in paginated_items:
if 'loras' in item:
- for lora in item['loras']:
- if 'hash' in lora and lora['hash']:
- lora['inLibrary'] = self._lora_scanner.has_hash(lora['hash'].lower())
- lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora['hash'].lower())
- lora['localPath'] = self._lora_scanner.get_path_by_hash(lora['hash'].lower())
-
+ item['loras'] = [self._enrich_lora_entry(dict(lora)) for lora in item['loras']]
+
result = {
'items': paginated_items,
'total': total_items,
@@ -653,13 +732,8 @@ class RecipeScanner:
# Add lora metadata
if 'loras' in formatted_recipe:
- for lora in formatted_recipe['loras']:
- if 'hash' in lora and lora['hash']:
- lora_hash = lora['hash'].lower()
- lora['inLibrary'] = self._lora_scanner.has_hash(lora_hash)
- lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora_hash)
- lora['localPath'] = self._lora_scanner.get_path_by_hash(lora_hash)
-
+ formatted_recipe['loras'] = [self._enrich_lora_entry(dict(lora)) for lora in formatted_recipe['loras']]
+
return formatted_recipe
def _format_file_url(self, file_path: str) -> str:
@@ -717,26 +791,159 @@ class RecipeScanner:
# Save updated recipe
with open(recipe_json_path, 'w', encoding='utf-8') as f:
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
-
+
# Update the cache if it exists
if self._cache is not None:
- await self._cache.update_recipe_metadata(recipe_id, metadata)
-
+ await self._cache.update_recipe_metadata(recipe_id, metadata, resort=False)
+ self._schedule_resort()
+
# If the recipe has an image, update its EXIF metadata
from ..utils.exif_utils import ExifUtils
image_path = recipe_data.get('file_path')
if image_path and os.path.exists(image_path):
ExifUtils.append_recipe_metadata(image_path, recipe_data)
-
+
return True
except Exception as e:
import logging
logging.getLogger(__name__).error(f"Error updating recipe metadata: {e}", exc_info=True)
return False
+ async def update_lora_entry(
+ self,
+ recipe_id: str,
+ lora_index: int,
+ *,
+ target_name: str,
+ target_lora: Optional[Dict[str, Any]] = None,
+ ) -> Tuple[Dict[str, Any], Dict[str, Any]]:
+ """Update a specific LoRA entry within a recipe.
+
+ Returns the updated recipe data and the refreshed LoRA metadata.
+ """
+
+ if target_name is None:
+ raise ValueError("target_name must be provided")
+
+ recipe_json_path = os.path.join(self.recipes_dir, f"{recipe_id}.recipe.json")
+ if not os.path.exists(recipe_json_path):
+ raise RecipeNotFoundError("Recipe not found")
+
+ async with self._mutation_lock:
+ with open(recipe_json_path, 'r', encoding='utf-8') as file_obj:
+ recipe_data = json.load(file_obj)
+
+ loras = recipe_data.get('loras', [])
+ if lora_index >= len(loras):
+ raise RecipeNotFoundError("LoRA index out of range in recipe")
+
+ lora_entry = loras[lora_index]
+ lora_entry['isDeleted'] = False
+ lora_entry['exclude'] = False
+ lora_entry['file_name'] = target_name
+
+ if target_lora is not None:
+ sha_value = target_lora.get('sha256') or target_lora.get('sha')
+ if sha_value:
+ lora_entry['hash'] = sha_value.lower()
+
+ civitai_info = target_lora.get('civitai') or {}
+ if civitai_info:
+ lora_entry['modelName'] = civitai_info.get('model', {}).get('name', '')
+ lora_entry['modelVersionName'] = civitai_info.get('name', '')
+ lora_entry['modelVersionId'] = civitai_info.get('id')
+
+ recipe_data['fingerprint'] = calculate_recipe_fingerprint(recipe_data.get('loras', []))
+ recipe_data['modified'] = time.time()
+
+ with open(recipe_json_path, 'w', encoding='utf-8') as file_obj:
+ json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
+
+ cache = await self.get_cached_data()
+ replaced = await cache.replace_recipe(recipe_id, recipe_data, resort=False)
+ if not replaced:
+ await cache.add_recipe(recipe_data, resort=False)
+ self._schedule_resort()
+
+ updated_lora = dict(lora_entry)
+ if target_lora is not None:
+ preview_url = target_lora.get('preview_url')
+ if preview_url:
+ updated_lora['preview_url'] = config.get_preview_static_url(preview_url)
+ if target_lora.get('file_path'):
+ updated_lora['localPath'] = target_lora['file_path']
+
+ updated_lora = self._enrich_lora_entry(updated_lora)
+ return recipe_data, updated_lora
+
+ async def get_recipes_for_lora(self, lora_hash: str) -> List[Dict[str, Any]]:
+ """Return recipes that reference a given LoRA hash."""
+
+ if not lora_hash:
+ return []
+
+ normalized_hash = lora_hash.lower()
+ cache = await self.get_cached_data()
+ matching_recipes: List[Dict[str, Any]] = []
+
+ for recipe in cache.raw_data:
+ loras = recipe.get('loras', [])
+ if any((entry.get('hash') or '').lower() == normalized_hash for entry in loras):
+ recipe_copy = {**recipe}
+ recipe_copy['loras'] = [self._enrich_lora_entry(dict(entry)) for entry in loras]
+ recipe_copy['file_url'] = self._format_file_url(recipe.get('file_path'))
+ matching_recipes.append(recipe_copy)
+
+ return matching_recipes
+
+ async def get_recipe_syntax_tokens(self, recipe_id: str) -> List[str]:
+ """Build LoRA syntax tokens for a recipe."""
+
+ cache = await self.get_cached_data()
+ recipe = await cache.get_recipe(recipe_id)
+ if recipe is None:
+ raise RecipeNotFoundError("Recipe not found")
+
+ loras = recipe.get('loras', [])
+ if not loras:
+ return []
+
+ lora_cache = None
+ if self._lora_scanner is not None:
+ lora_cache = await self._lora_scanner.get_cached_data()
+
+ syntax_parts: List[str] = []
+ for lora in loras:
+ if lora.get('isDeleted', False):
+ continue
+
+ file_name = None
+ hash_value = (lora.get('hash') or '').lower()
+ if hash_value and self._lora_scanner is not None and hasattr(self._lora_scanner, '_hash_index'):
+ file_path = self._lora_scanner._hash_index.get_path(hash_value)
+ if file_path:
+ file_name = os.path.splitext(os.path.basename(file_path))[0]
+
+ if not file_name and lora.get('modelVersionId') and lora_cache is not None:
+ for cached_lora in getattr(lora_cache, 'raw_data', []):
+ civitai_info = cached_lora.get('civitai')
+ if civitai_info and civitai_info.get('id') == lora.get('modelVersionId'):
+ cached_path = cached_lora.get('path') or cached_lora.get('file_path')
+ if cached_path:
+ file_name = os.path.splitext(os.path.basename(cached_path))[0]
+ break
+
+ if not file_name:
+ file_name = lora.get('file_name', 'unknown-lora')
+
+ strength = lora.get('strength', 1.0)
+ syntax_parts.append(f"")
+
+ return syntax_parts
+
async def update_lora_filename_by_hash(self, hash_value: str, new_file_name: str) -> Tuple[int, int]:
"""Update file_name in all recipes that contain a LoRA with the specified hash.
-
+
Args:
hash_value: The SHA256 hash value of the LoRA
new_file_name: The new file_name to set
diff --git a/py/services/recipes/__init__.py b/py/services/recipes/__init__.py
new file mode 100644
index 00000000..8009b7c3
--- /dev/null
+++ b/py/services/recipes/__init__.py
@@ -0,0 +1,23 @@
+"""Recipe service layer implementations."""
+
+from .analysis_service import RecipeAnalysisService
+from .persistence_service import RecipePersistenceService
+from .sharing_service import RecipeSharingService
+from .errors import (
+ RecipeServiceError,
+ RecipeValidationError,
+ RecipeNotFoundError,
+ RecipeDownloadError,
+ RecipeConflictError,
+)
+
+__all__ = [
+ "RecipeAnalysisService",
+ "RecipePersistenceService",
+ "RecipeSharingService",
+ "RecipeServiceError",
+ "RecipeValidationError",
+ "RecipeNotFoundError",
+ "RecipeDownloadError",
+ "RecipeConflictError",
+]
diff --git a/py/services/recipes/analysis_service.py b/py/services/recipes/analysis_service.py
new file mode 100644
index 00000000..77d80e34
--- /dev/null
+++ b/py/services/recipes/analysis_service.py
@@ -0,0 +1,289 @@
+"""Services responsible for recipe metadata analysis."""
+from __future__ import annotations
+
+import base64
+import io
+import os
+import re
+import tempfile
+from dataclasses import dataclass
+from typing import Any, Callable, Optional
+
+import numpy as np
+from PIL import Image
+
+from ...utils.utils import calculate_recipe_fingerprint
+from .errors import (
+ RecipeDownloadError,
+ RecipeNotFoundError,
+ RecipeServiceError,
+ RecipeValidationError,
+)
+
+
+@dataclass(frozen=True)
+class AnalysisResult:
+ """Return payload from analysis operations."""
+
+ payload: dict[str, Any]
+ status: int = 200
+
+
+class RecipeAnalysisService:
+ """Extract recipe metadata from various image sources."""
+
+ def __init__(
+ self,
+ *,
+ exif_utils,
+ recipe_parser_factory,
+ downloader_factory: Callable[[], Any],
+ metadata_collector: Optional[Callable[[], Any]] = None,
+ metadata_processor_cls: Optional[type] = None,
+ metadata_registry_cls: Optional[type] = None,
+ standalone_mode: bool = False,
+ logger,
+ ) -> None:
+ self._exif_utils = exif_utils
+ self._recipe_parser_factory = recipe_parser_factory
+ self._downloader_factory = downloader_factory
+ self._metadata_collector = metadata_collector
+ self._metadata_processor_cls = metadata_processor_cls
+ self._metadata_registry_cls = metadata_registry_cls
+ self._standalone_mode = standalone_mode
+ self._logger = logger
+
+ async def analyze_uploaded_image(
+ self,
+ *,
+ image_bytes: bytes | None,
+ recipe_scanner,
+ ) -> AnalysisResult:
+ """Analyze an uploaded image payload."""
+
+ if not image_bytes:
+ raise RecipeValidationError("No image data provided")
+
+ temp_path = self._write_temp_file(image_bytes)
+ try:
+ metadata = self._exif_utils.extract_image_metadata(temp_path)
+ if not metadata:
+ return AnalysisResult({"error": "No metadata found in this image", "loras": []})
+
+ return await self._parse_metadata(
+ metadata,
+ recipe_scanner=recipe_scanner,
+ image_path=None,
+ include_image_base64=False,
+ )
+ finally:
+ self._safe_cleanup(temp_path)
+
+ async def analyze_remote_image(
+ self,
+ *,
+ url: str | None,
+ recipe_scanner,
+ civitai_client,
+ ) -> AnalysisResult:
+ """Analyze an image accessible via URL, including Civitai integration."""
+
+ if not url:
+ raise RecipeValidationError("No URL provided")
+
+ if civitai_client is None:
+ raise RecipeServiceError("Civitai client unavailable")
+
+ temp_path = self._create_temp_path()
+ metadata: Optional[dict[str, Any]] = None
+ try:
+ civitai_match = re.match(r"https://civitai\.com/images/(\d+)", url)
+ if civitai_match:
+ image_info = await civitai_client.get_image_info(civitai_match.group(1))
+ if not image_info:
+ raise RecipeDownloadError("Failed to fetch image information from Civitai")
+ image_url = image_info.get("url")
+ if not image_url:
+ raise RecipeDownloadError("No image URL found in Civitai response")
+ await self._download_image(image_url, temp_path)
+ metadata = image_info.get("meta") if "meta" in image_info else None
+ else:
+ await self._download_image(url, temp_path)
+
+ if metadata is None:
+ metadata = self._exif_utils.extract_image_metadata(temp_path)
+
+ if not metadata:
+ return self._metadata_not_found_response(temp_path)
+
+ return await self._parse_metadata(
+ metadata,
+ recipe_scanner=recipe_scanner,
+ image_path=temp_path,
+ include_image_base64=True,
+ )
+ finally:
+ self._safe_cleanup(temp_path)
+
+ async def analyze_local_image(
+ self,
+ *,
+ file_path: str | None,
+ recipe_scanner,
+ ) -> AnalysisResult:
+ """Analyze a file already present on disk."""
+
+ if not file_path:
+ raise RecipeValidationError("No file path provided")
+
+ normalized_path = os.path.normpath(file_path.strip('"').strip("'"))
+ if not os.path.isfile(normalized_path):
+ raise RecipeNotFoundError("File not found")
+
+ metadata = self._exif_utils.extract_image_metadata(normalized_path)
+ if not metadata:
+ return self._metadata_not_found_response(normalized_path)
+
+ return await self._parse_metadata(
+ metadata,
+ recipe_scanner=recipe_scanner,
+ image_path=normalized_path,
+ include_image_base64=True,
+ )
+
+ async def analyze_widget_metadata(self, *, recipe_scanner) -> AnalysisResult:
+ """Analyse the most recent generation metadata for widget saves."""
+
+ if self._metadata_collector is None or self._metadata_processor_cls is None:
+ raise RecipeValidationError("Metadata collection not available")
+
+ raw_metadata = self._metadata_collector()
+ metadata_dict = self._metadata_processor_cls.to_dict(raw_metadata)
+ if not metadata_dict:
+ raise RecipeValidationError("No generation metadata found")
+
+ latest_image = None
+ if not self._standalone_mode and self._metadata_registry_cls is not None:
+ metadata_registry = self._metadata_registry_cls()
+ latest_image = metadata_registry.get_first_decoded_image()
+
+ if latest_image is None:
+ raise RecipeValidationError(
+ "No recent images found to use for recipe. Try generating an image first."
+ )
+
+ image_bytes = self._convert_tensor_to_png_bytes(latest_image)
+ if image_bytes is None:
+ raise RecipeValidationError("Cannot handle this data shape from metadata registry")
+
+ return AnalysisResult(
+ {
+ "metadata": metadata_dict,
+ "image_bytes": image_bytes,
+ }
+ )
+
+ # Internal helpers -------------------------------------------------
+
+ async def _parse_metadata(
+ self,
+ metadata: dict[str, Any],
+ *,
+ recipe_scanner,
+ image_path: Optional[str],
+ include_image_base64: bool,
+ ) -> AnalysisResult:
+ parser = self._recipe_parser_factory.create_parser(metadata)
+ if parser is None:
+ payload = {"error": "No parser found for this image", "loras": []}
+ if include_image_base64 and image_path:
+ payload["image_base64"] = self._encode_file(image_path)
+ return AnalysisResult(payload)
+
+ result = await parser.parse_metadata(metadata, recipe_scanner=recipe_scanner)
+
+ if include_image_base64 and image_path:
+ result["image_base64"] = self._encode_file(image_path)
+
+ if "error" in result and not result.get("loras"):
+ return AnalysisResult(result)
+
+ fingerprint = calculate_recipe_fingerprint(result.get("loras", []))
+ result["fingerprint"] = fingerprint
+
+ matching_recipes: list[str] = []
+ if fingerprint:
+ matching_recipes = await recipe_scanner.find_recipes_by_fingerprint(fingerprint)
+ result["matching_recipes"] = matching_recipes
+
+ return AnalysisResult(result)
+
+ async def _download_image(self, url: str, temp_path: str) -> None:
+ downloader = await self._downloader_factory()
+ success, result = await downloader.download_file(url, temp_path, use_auth=False)
+ if not success:
+ raise RecipeDownloadError(f"Failed to download image from URL: {result}")
+
+ def _metadata_not_found_response(self, path: str) -> AnalysisResult:
+ payload: dict[str, Any] = {"error": "No metadata found in this image", "loras": []}
+ if os.path.exists(path):
+ payload["image_base64"] = self._encode_file(path)
+ return AnalysisResult(payload)
+
+ def _write_temp_file(self, data: bytes) -> str:
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
+ temp_file.write(data)
+ return temp_file.name
+
+ def _create_temp_path(self) -> str:
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
+ return temp_file.name
+
+ def _safe_cleanup(self, path: Optional[str]) -> None:
+ if path and os.path.exists(path):
+ try:
+ os.unlink(path)
+ except Exception as exc: # pragma: no cover - defensive logging
+ self._logger.error("Error deleting temporary file: %s", exc)
+
+ def _encode_file(self, path: str) -> str:
+ with open(path, "rb") as image_file:
+ return base64.b64encode(image_file.read()).decode("utf-8")
+
+ def _convert_tensor_to_png_bytes(self, latest_image: Any) -> Optional[bytes]:
+ try:
+ if isinstance(latest_image, tuple):
+ tensor_image = latest_image[0] if latest_image else None
+ if tensor_image is None:
+ return None
+ else:
+ tensor_image = latest_image
+
+ if hasattr(tensor_image, "shape"):
+ self._logger.debug(
+ "Tensor shape: %s, dtype: %s", tensor_image.shape, getattr(tensor_image, "dtype", None)
+ )
+
+ import torch # type: ignore[import-not-found]
+
+ if isinstance(tensor_image, torch.Tensor):
+ image_np = tensor_image.cpu().numpy()
+ else:
+ image_np = np.array(tensor_image)
+
+ while len(image_np.shape) > 3:
+ image_np = image_np[0]
+
+ if image_np.dtype in (np.float32, np.float64) and image_np.max() <= 1.0:
+ image_np = (image_np * 255).astype(np.uint8)
+
+ if len(image_np.shape) == 3 and image_np.shape[2] == 3:
+ pil_image = Image.fromarray(image_np)
+ img_byte_arr = io.BytesIO()
+ pil_image.save(img_byte_arr, format="PNG")
+ return img_byte_arr.getvalue()
+ except Exception as exc: # pragma: no cover - defensive logging path
+ self._logger.error("Error processing image data: %s", exc, exc_info=True)
+ return None
+
+ return None
diff --git a/py/services/recipes/errors.py b/py/services/recipes/errors.py
new file mode 100644
index 00000000..9e5d9720
--- /dev/null
+++ b/py/services/recipes/errors.py
@@ -0,0 +1,22 @@
+"""Shared exceptions for recipe services."""
+from __future__ import annotations
+
+
+class RecipeServiceError(Exception):
+ """Base exception for recipe service failures."""
+
+
+class RecipeValidationError(RecipeServiceError):
+ """Raised when a request payload fails validation."""
+
+
+class RecipeNotFoundError(RecipeServiceError):
+ """Raised when a recipe resource cannot be located."""
+
+
+class RecipeDownloadError(RecipeServiceError):
+ """Raised when remote recipe assets cannot be downloaded."""
+
+
+class RecipeConflictError(RecipeServiceError):
+ """Raised when a conflicting recipe state is detected."""
diff --git a/py/services/recipes/persistence_service.py b/py/services/recipes/persistence_service.py
new file mode 100644
index 00000000..175f046c
--- /dev/null
+++ b/py/services/recipes/persistence_service.py
@@ -0,0 +1,407 @@
+"""Services encapsulating recipe persistence workflows."""
+from __future__ import annotations
+
+import base64
+import json
+import os
+import re
+import time
+import uuid
+from dataclasses import dataclass
+from typing import Any, Dict, Iterable, Optional
+
+from ...config import config
+from ...utils.utils import calculate_recipe_fingerprint
+from .errors import RecipeNotFoundError, RecipeValidationError
+
+
+@dataclass(frozen=True)
+class PersistenceResult:
+ """Return payload from persistence operations."""
+
+ payload: dict[str, Any]
+ status: int = 200
+
+
+class RecipePersistenceService:
+ """Coordinate recipe persistence tasks across storage and caches."""
+
+ def __init__(
+ self,
+ *,
+ exif_utils,
+ card_preview_width: int,
+ logger,
+ ) -> None:
+ self._exif_utils = exif_utils
+ self._card_preview_width = card_preview_width
+ self._logger = logger
+
+ async def save_recipe(
+ self,
+ *,
+ recipe_scanner,
+ image_bytes: bytes | None,
+ image_base64: str | None,
+ name: str | None,
+ tags: Iterable[str],
+ metadata: Optional[dict[str, Any]],
+ ) -> PersistenceResult:
+ """Persist a user uploaded recipe."""
+
+ missing_fields = []
+ if not name:
+ missing_fields.append("name")
+ if metadata is None:
+ missing_fields.append("metadata")
+ if missing_fields:
+ raise RecipeValidationError(
+ f"Missing required fields: {', '.join(missing_fields)}"
+ )
+
+ resolved_image_bytes = self._resolve_image_bytes(image_bytes, image_base64)
+ recipes_dir = recipe_scanner.recipes_dir
+ os.makedirs(recipes_dir, exist_ok=True)
+
+ recipe_id = str(uuid.uuid4())
+ optimized_image, extension = self._exif_utils.optimize_image(
+ image_data=resolved_image_bytes,
+ target_width=self._card_preview_width,
+ format="webp",
+ quality=85,
+ preserve_metadata=True,
+ )
+ image_filename = f"{recipe_id}{extension}"
+ image_path = os.path.join(recipes_dir, image_filename)
+ with open(image_path, "wb") as file_obj:
+ file_obj.write(optimized_image)
+
+ current_time = time.time()
+ loras_data = [self._normalise_lora_entry(lora) for lora in metadata.get("loras", [])]
+
+ gen_params = metadata.get("gen_params", {})
+ if not gen_params and "raw_metadata" in metadata:
+ raw_metadata = metadata.get("raw_metadata", {})
+ gen_params = {
+ "prompt": raw_metadata.get("prompt", ""),
+ "negative_prompt": raw_metadata.get("negative_prompt", ""),
+ "checkpoint": raw_metadata.get("checkpoint", {}),
+ "steps": raw_metadata.get("steps", ""),
+ "sampler": raw_metadata.get("sampler", ""),
+ "cfg_scale": raw_metadata.get("cfg_scale", ""),
+ "seed": raw_metadata.get("seed", ""),
+ "size": raw_metadata.get("size", ""),
+ "clip_skip": raw_metadata.get("clip_skip", ""),
+ }
+
+ fingerprint = calculate_recipe_fingerprint(loras_data)
+ recipe_data: Dict[str, Any] = {
+ "id": recipe_id,
+ "file_path": image_path,
+ "title": name,
+ "modified": current_time,
+ "created_date": current_time,
+ "base_model": metadata.get("base_model", ""),
+ "loras": loras_data,
+ "gen_params": gen_params,
+ "fingerprint": fingerprint,
+ }
+
+ tags_list = list(tags)
+ if tags_list:
+ recipe_data["tags"] = tags_list
+
+ if metadata.get("source_path"):
+ recipe_data["source_path"] = metadata.get("source_path")
+
+ json_filename = f"{recipe_id}.recipe.json"
+ json_path = os.path.join(recipes_dir, json_filename)
+ with open(json_path, "w", encoding="utf-8") as file_obj:
+ json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
+
+ self._exif_utils.append_recipe_metadata(image_path, recipe_data)
+
+ matching_recipes = await self._find_matching_recipes(recipe_scanner, fingerprint, exclude_id=recipe_id)
+ await recipe_scanner.add_recipe(recipe_data)
+
+ return PersistenceResult(
+ {
+ "success": True,
+ "recipe_id": recipe_id,
+ "image_path": image_path,
+ "json_path": json_path,
+ "matching_recipes": matching_recipes,
+ }
+ )
+
+ async def delete_recipe(self, *, recipe_scanner, recipe_id: str) -> PersistenceResult:
+ """Delete an existing recipe."""
+
+ recipes_dir = recipe_scanner.recipes_dir
+ if not recipes_dir or not os.path.exists(recipes_dir):
+ raise RecipeNotFoundError("Recipes directory not found")
+
+ recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
+ if not os.path.exists(recipe_json_path):
+ raise RecipeNotFoundError("Recipe not found")
+
+ with open(recipe_json_path, "r", encoding="utf-8") as file_obj:
+ recipe_data = json.load(file_obj)
+
+ image_path = recipe_data.get("file_path")
+ os.remove(recipe_json_path)
+ if image_path and os.path.exists(image_path):
+ os.remove(image_path)
+
+ await recipe_scanner.remove_recipe(recipe_id)
+ return PersistenceResult({"success": True, "message": "Recipe deleted successfully"})
+
+ async def update_recipe(self, *, recipe_scanner, recipe_id: str, updates: dict[str, Any]) -> PersistenceResult:
+ """Update persisted metadata for a recipe."""
+
+ if not any(key in updates for key in ("title", "tags", "source_path", "preview_nsfw_level")):
+ raise RecipeValidationError(
+ "At least one field to update must be provided (title or tags or source_path or preview_nsfw_level)"
+ )
+
+ success = await recipe_scanner.update_recipe_metadata(recipe_id, updates)
+ if not success:
+ raise RecipeNotFoundError("Recipe not found or update failed")
+
+ return PersistenceResult({"success": True, "recipe_id": recipe_id, "updates": updates})
+
+ async def reconnect_lora(
+ self,
+ *,
+ recipe_scanner,
+ recipe_id: str,
+ lora_index: int,
+ target_name: str,
+ ) -> PersistenceResult:
+ """Reconnect a LoRA entry within an existing recipe."""
+
+ recipe_path = os.path.join(recipe_scanner.recipes_dir, f"{recipe_id}.recipe.json")
+ if not os.path.exists(recipe_path):
+ raise RecipeNotFoundError("Recipe not found")
+
+ target_lora = await recipe_scanner.get_local_lora(target_name)
+ if not target_lora:
+ raise RecipeNotFoundError(f"Local LoRA not found with name: {target_name}")
+
+ recipe_data, updated_lora = await recipe_scanner.update_lora_entry(
+ recipe_id,
+ lora_index,
+ target_name=target_name,
+ target_lora=target_lora,
+ )
+
+ image_path = recipe_data.get("file_path")
+ if image_path and os.path.exists(image_path):
+ self._exif_utils.append_recipe_metadata(image_path, recipe_data)
+
+ matching_recipes = []
+ if "fingerprint" in recipe_data:
+ matching_recipes = await recipe_scanner.find_recipes_by_fingerprint(recipe_data["fingerprint"])
+ if recipe_id in matching_recipes:
+ matching_recipes.remove(recipe_id)
+
+ return PersistenceResult(
+ {
+ "success": True,
+ "recipe_id": recipe_id,
+ "updated_lora": updated_lora,
+ "matching_recipes": matching_recipes,
+ }
+ )
+
+ async def bulk_delete(
+ self,
+ *,
+ recipe_scanner,
+ recipe_ids: Iterable[str],
+ ) -> PersistenceResult:
+ """Delete multiple recipes in a single request."""
+
+ recipe_ids = list(recipe_ids)
+ if not recipe_ids:
+ raise RecipeValidationError("No recipe IDs provided")
+
+ recipes_dir = recipe_scanner.recipes_dir
+ if not recipes_dir or not os.path.exists(recipes_dir):
+ raise RecipeNotFoundError("Recipes directory not found")
+
+ deleted_recipes: list[str] = []
+ failed_recipes: list[dict[str, Any]] = []
+
+ for recipe_id in recipe_ids:
+ recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
+ if not os.path.exists(recipe_json_path):
+ failed_recipes.append({"id": recipe_id, "reason": "Recipe not found"})
+ continue
+
+ try:
+ with open(recipe_json_path, "r", encoding="utf-8") as file_obj:
+ recipe_data = json.load(file_obj)
+ image_path = recipe_data.get("file_path")
+ os.remove(recipe_json_path)
+ if image_path and os.path.exists(image_path):
+ os.remove(image_path)
+ deleted_recipes.append(recipe_id)
+ except Exception as exc:
+ failed_recipes.append({"id": recipe_id, "reason": str(exc)})
+
+ if deleted_recipes:
+ await recipe_scanner.bulk_remove(deleted_recipes)
+
+ return PersistenceResult(
+ {
+ "success": True,
+ "deleted": deleted_recipes,
+ "failed": failed_recipes,
+ "total_deleted": len(deleted_recipes),
+ "total_failed": len(failed_recipes),
+ }
+ )
+
+ async def save_recipe_from_widget(
+ self,
+ *,
+ recipe_scanner,
+ metadata: dict[str, Any],
+ image_bytes: bytes,
+ ) -> PersistenceResult:
+ """Save a recipe constructed from widget metadata."""
+
+ if not metadata:
+ raise RecipeValidationError("No generation metadata found")
+
+ recipes_dir = recipe_scanner.recipes_dir
+ os.makedirs(recipes_dir, exist_ok=True)
+
+ recipe_id = str(uuid.uuid4())
+ optimized_image, extension = self._exif_utils.optimize_image(
+ image_data=image_bytes,
+ target_width=self._card_preview_width,
+ format="webp",
+ quality=85,
+ preserve_metadata=True,
+ )
+ image_filename = f"{recipe_id}{extension}"
+ image_path = os.path.join(recipes_dir, image_filename)
+ with open(image_path, "wb") as file_obj:
+ file_obj.write(optimized_image)
+
+ lora_stack = metadata.get("loras", "")
+ lora_matches = re.findall(r"]+)>", lora_stack)
+ if not lora_matches:
+ raise RecipeValidationError("No LoRAs found in the generation metadata")
+
+ loras_data = []
+ base_model_counts: Dict[str, int] = {}
+
+ for name, strength in lora_matches:
+ lora_info = await recipe_scanner.get_local_lora(name)
+ lora_data = {
+ "file_name": name,
+ "strength": float(strength),
+ "hash": (lora_info.get("sha256") or "").lower() if lora_info else "",
+ "modelVersionId": lora_info.get("civitai", {}).get("id") if lora_info else 0,
+ "modelName": lora_info.get("civitai", {}).get("model", {}).get("name") if lora_info else "",
+ "modelVersionName": lora_info.get("civitai", {}).get("name") if lora_info else "",
+ "isDeleted": False,
+ "exclude": False,
+ }
+ loras_data.append(lora_data)
+
+ if lora_info and "base_model" in lora_info:
+ base_model = lora_info["base_model"]
+ base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
+
+ recipe_name = self._derive_recipe_name(lora_matches)
+ most_common_base_model = (
+ max(base_model_counts.items(), key=lambda item: item[1])[0] if base_model_counts else ""
+ )
+
+ recipe_data = {
+ "id": recipe_id,
+ "file_path": image_path,
+ "title": recipe_name,
+ "modified": time.time(),
+ "created_date": time.time(),
+ "base_model": most_common_base_model,
+ "loras": loras_data,
+ "checkpoint": metadata.get("checkpoint", ""),
+ "gen_params": {
+ key: value
+ for key, value in metadata.items()
+ if key not in ["checkpoint", "loras"]
+ },
+ "loras_stack": lora_stack,
+ }
+
+ json_filename = f"{recipe_id}.recipe.json"
+ json_path = os.path.join(recipes_dir, json_filename)
+ with open(json_path, "w", encoding="utf-8") as file_obj:
+ json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
+
+ self._exif_utils.append_recipe_metadata(image_path, recipe_data)
+ await recipe_scanner.add_recipe(recipe_data)
+
+ return PersistenceResult(
+ {
+ "success": True,
+ "recipe_id": recipe_id,
+ "image_path": image_path,
+ "json_path": json_path,
+ "recipe_name": recipe_name,
+ }
+ )
+
+ # Helper methods ---------------------------------------------------
+
+ def _resolve_image_bytes(self, image_bytes: bytes | None, image_base64: str | None) -> bytes:
+ if image_bytes is not None:
+ return image_bytes
+ if image_base64:
+ try:
+ payload = image_base64.split(",", 1)[1] if "," in image_base64 else image_base64
+ return base64.b64decode(payload)
+ except Exception as exc: # pragma: no cover - validation guard
+ raise RecipeValidationError(f"Invalid base64 image data: {exc}") from exc
+ raise RecipeValidationError("No image data provided")
+
+ def _normalise_lora_entry(self, lora: dict[str, Any]) -> dict[str, Any]:
+ return {
+ "file_name": lora.get("file_name", "")
+ or (
+ os.path.splitext(os.path.basename(lora.get("localPath", "")))[0]
+ if lora.get("localPath")
+ else ""
+ ),
+ "hash": (lora.get("hash") or "").lower(),
+ "strength": float(lora.get("weight", 1.0)),
+ "modelVersionId": lora.get("id", 0),
+ "modelName": lora.get("name", ""),
+ "modelVersionName": lora.get("version", ""),
+ "isDeleted": lora.get("isDeleted", False),
+ "exclude": lora.get("exclude", False),
+ }
+
+ async def _find_matching_recipes(
+ self,
+ recipe_scanner,
+ fingerprint: str | None,
+ *,
+ exclude_id: Optional[str] = None,
+ ) -> list[str]:
+ if not fingerprint:
+ return []
+ matches = await recipe_scanner.find_recipes_by_fingerprint(fingerprint)
+ if exclude_id and exclude_id in matches:
+ matches.remove(exclude_id)
+ return matches
+
+ def _derive_recipe_name(self, lora_matches: list[tuple[str, str]]) -> str:
+ recipe_name_parts = [f"{name.strip()}-{float(strength):.2f}" for name, strength in lora_matches[:3]]
+ recipe_name = "_".join(recipe_name_parts)
+ return recipe_name or "recipe"
diff --git a/py/services/recipes/sharing_service.py b/py/services/recipes/sharing_service.py
new file mode 100644
index 00000000..47ab9718
--- /dev/null
+++ b/py/services/recipes/sharing_service.py
@@ -0,0 +1,105 @@
+"""Services handling recipe sharing and downloads."""
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+import time
+from dataclasses import dataclass
+from typing import Any, Dict
+
+from .errors import RecipeNotFoundError
+
+
+@dataclass(frozen=True)
+class SharingResult:
+ """Return payload for share operations."""
+
+ payload: dict[str, Any]
+ status: int = 200
+
+
+@dataclass(frozen=True)
+class DownloadInfo:
+ """Information required to stream a shared recipe file."""
+
+ file_path: str
+ download_filename: str
+
+
+class RecipeSharingService:
+ """Prepare temporary recipe downloads with TTL cleanup."""
+
+ def __init__(self, *, ttl_seconds: int = 300, logger) -> None:
+ self._ttl_seconds = ttl_seconds
+ self._logger = logger
+ self._shared_recipes: Dict[str, Dict[str, Any]] = {}
+
+ async def share_recipe(self, *, recipe_scanner, recipe_id: str) -> SharingResult:
+ """Prepare a temporary downloadable copy of a recipe image."""
+
+ recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
+ if not recipe:
+ raise RecipeNotFoundError("Recipe not found")
+
+ image_path = recipe.get("file_path")
+ if not image_path or not os.path.exists(image_path):
+ raise RecipeNotFoundError("Recipe image not found")
+
+ ext = os.path.splitext(image_path)[1]
+ with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as temp_file:
+ temp_path = temp_file.name
+
+ shutil.copy2(image_path, temp_path)
+ timestamp = int(time.time())
+ self._shared_recipes[recipe_id] = {
+ "path": temp_path,
+ "timestamp": timestamp,
+ "expires": time.time() + self._ttl_seconds,
+ }
+ self._cleanup_shared_recipes()
+
+ safe_title = recipe.get("title", "").replace(" ", "_").lower()
+ filename = f"recipe_{safe_title}{ext}" if safe_title else f"recipe_{recipe_id}{ext}"
+ url_path = f"/api/recipe/{recipe_id}/share/download?t={timestamp}"
+ return SharingResult({"success": True, "download_url": url_path, "filename": filename})
+
+ async def prepare_download(self, *, recipe_scanner, recipe_id: str) -> DownloadInfo:
+ """Return file path and filename for a prepared shared recipe."""
+
+ shared_info = self._shared_recipes.get(recipe_id)
+ if not shared_info or time.time() > shared_info.get("expires", 0):
+ self._cleanup_entry(recipe_id)
+ raise RecipeNotFoundError("Shared recipe not found or expired")
+
+ file_path = shared_info["path"]
+ if not os.path.exists(file_path):
+ self._cleanup_entry(recipe_id)
+ raise RecipeNotFoundError("Shared recipe file not found")
+
+ recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
+ filename_base = (
+ f"recipe_{recipe.get('title', '').replace(' ', '_').lower()}" if recipe else recipe_id
+ )
+ ext = os.path.splitext(file_path)[1]
+ download_filename = f"{filename_base}{ext}"
+ return DownloadInfo(file_path=file_path, download_filename=download_filename)
+
+ def _cleanup_shared_recipes(self) -> None:
+ for recipe_id in list(self._shared_recipes.keys()):
+ shared = self._shared_recipes.get(recipe_id)
+ if not shared:
+ continue
+ if time.time() > shared.get("expires", 0):
+ self._cleanup_entry(recipe_id)
+
+ def _cleanup_entry(self, recipe_id: str) -> None:
+ shared_info = self._shared_recipes.pop(recipe_id, None)
+ if not shared_info:
+ return
+ file_path = shared_info.get("path")
+ if file_path and os.path.exists(file_path):
+ try:
+ os.unlink(file_path)
+ except Exception as exc: # pragma: no cover - defensive logging
+ self._logger.error("Error cleaning up shared recipe %s: %s", recipe_id, exc)
diff --git a/py/services/settings_manager.py b/py/services/settings_manager.py
index 7d99da48..795c6da0 100644
--- a/py/services/settings_manager.py
+++ b/py/services/settings_manager.py
@@ -5,10 +5,41 @@ from typing import Any, Dict
logger = logging.getLogger(__name__)
+
+DEFAULT_SETTINGS: Dict[str, Any] = {
+ "civitai_api_key": "",
+ "language": "en",
+ "show_only_sfw": False,
+ "enable_metadata_archive_db": False,
+ "proxy_enabled": False,
+ "proxy_host": "",
+ "proxy_port": "",
+ "proxy_username": "",
+ "proxy_password": "",
+ "proxy_type": "http",
+ "default_lora_root": "",
+ "default_checkpoint_root": "",
+ "default_embedding_root": "",
+ "base_model_path_mappings": {},
+ "download_path_templates": {},
+ "example_images_path": "",
+ "optimize_example_images": True,
+ "auto_download_example_images": False,
+ "blur_mature_content": True,
+ "autoplay_on_hover": False,
+ "display_density": "default",
+ "card_info_display": "always",
+ "include_trigger_words": False,
+ "compact_mode": False,
+}
+
+
class SettingsManager:
def __init__(self):
self.settings_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'settings.json')
self.settings = self._load_settings()
+ self._migrate_setting_keys()
+ self._ensure_default_settings()
self._migrate_download_path_template()
self._auto_set_default_roots()
self._check_environment_variables()
@@ -23,11 +54,49 @@ class SettingsManager:
logger.error(f"Error loading settings: {e}")
return self._get_default_settings()
+ def _ensure_default_settings(self) -> None:
+ """Ensure all default settings keys exist"""
+ updated = False
+ for key, value in self._get_default_settings().items():
+ if key not in self.settings:
+ if isinstance(value, dict):
+ self.settings[key] = value.copy()
+ else:
+ self.settings[key] = value
+ updated = True
+ if updated:
+ self._save_settings()
+
+ def _migrate_setting_keys(self) -> None:
+ """Migrate legacy camelCase setting keys to snake_case"""
+ key_migrations = {
+ 'optimizeExampleImages': 'optimize_example_images',
+ 'autoDownloadExampleImages': 'auto_download_example_images',
+ 'blurMatureContent': 'blur_mature_content',
+ 'autoplayOnHover': 'autoplay_on_hover',
+ 'displayDensity': 'display_density',
+ 'cardInfoDisplay': 'card_info_display',
+ 'includeTriggerWords': 'include_trigger_words',
+ 'compactMode': 'compact_mode',
+ }
+
+ updated = False
+ for old_key, new_key in key_migrations.items():
+ if old_key in self.settings:
+ if new_key not in self.settings:
+ self.settings[new_key] = self.settings[old_key]
+ del self.settings[old_key]
+ updated = True
+
+ if updated:
+ logger.info("Migrated legacy setting keys to snake_case")
+ self._save_settings()
+
def _migrate_download_path_template(self):
"""Migrate old download_path_template to new download_path_templates"""
old_template = self.settings.get('download_path_template')
templates = self.settings.get('download_path_templates')
-
+
# If old template exists and new templates don't exist, migrate
if old_template is not None and not templates:
logger.info("Migrating download_path_template to download_path_templates")
@@ -42,24 +111,30 @@ class SettingsManager:
logger.info("Migration completed")
def _auto_set_default_roots(self):
- """Auto set default root paths if only one folder is present and default is empty."""
+ """Auto set default root paths when only one folder is present and the current default is unset or not among the options."""
folder_paths = self.settings.get('folder_paths', {})
updated = False
# loras
loras = folder_paths.get('loras', [])
- if isinstance(loras, list) and len(loras) == 1 and not self.settings.get('default_lora_root'):
- self.settings['default_lora_root'] = loras[0]
- updated = True
+ if isinstance(loras, list) and len(loras) == 1:
+ current_lora_root = self.settings.get('default_lora_root')
+ if current_lora_root not in loras:
+ self.settings['default_lora_root'] = loras[0]
+ updated = True
# checkpoints
checkpoints = folder_paths.get('checkpoints', [])
- if isinstance(checkpoints, list) and len(checkpoints) == 1 and not self.settings.get('default_checkpoint_root'):
- self.settings['default_checkpoint_root'] = checkpoints[0]
- updated = True
+ if isinstance(checkpoints, list) and len(checkpoints) == 1:
+ current_checkpoint_root = self.settings.get('default_checkpoint_root')
+ if current_checkpoint_root not in checkpoints:
+ self.settings['default_checkpoint_root'] = checkpoints[0]
+ updated = True
# embeddings
embeddings = folder_paths.get('embeddings', [])
- if isinstance(embeddings, list) and len(embeddings) == 1 and not self.settings.get('default_embedding_root'):
- self.settings['default_embedding_root'] = embeddings[0]
- updated = True
+ if isinstance(embeddings, list) and len(embeddings) == 1:
+ current_embedding_root = self.settings.get('default_embedding_root')
+ if current_embedding_root not in embeddings:
+ self.settings['default_embedding_root'] = embeddings[0]
+ updated = True
if updated:
self._save_settings()
@@ -78,12 +153,11 @@ class SettingsManager:
def _get_default_settings(self) -> Dict[str, Any]:
"""Return default settings"""
- return {
- "civitai_api_key": "",
- "show_only_sfw": False,
- "language": "en",
- "enable_metadata_archive_db": False # Enable metadata archive database
- }
+ defaults = DEFAULT_SETTINGS.copy()
+ # Ensure nested dicts are independent copies
+ defaults['base_model_path_mappings'] = {}
+ defaults['download_path_templates'] = {}
+ return defaults
def get(self, key: str, default: Any = None) -> Any:
"""Get setting value"""
@@ -94,6 +168,13 @@ class SettingsManager:
self.settings[key] = value
self._save_settings()
+ def delete(self, key: str) -> None:
+ """Delete setting key and save"""
+ if key in self.settings:
+ del self.settings[key]
+ self._save_settings()
+ logger.info(f"Deleted setting: {key}")
+
def _save_settings(self) -> None:
"""Save settings to file"""
try:
diff --git a/py/services/tag_update_service.py b/py/services/tag_update_service.py
new file mode 100644
index 00000000..d560e7d6
--- /dev/null
+++ b/py/services/tag_update_service.py
@@ -0,0 +1,47 @@
+"""Service for updating tag collections on metadata records."""
+
+from __future__ import annotations
+
+import os
+
+from typing import Awaitable, Callable, Dict, List, Sequence
+
+
+class TagUpdateService:
+ """Encapsulate tag manipulation for models."""
+
+ def __init__(self, *, metadata_manager) -> None:
+ self._metadata_manager = metadata_manager
+
+ async def add_tags(
+ self,
+ *,
+ file_path: str,
+ new_tags: Sequence[str],
+ metadata_loader: Callable[[str], Awaitable[Dict[str, object]]],
+ update_cache: Callable[[str, str, Dict[str, object]], Awaitable[bool]],
+ ) -> List[str]:
+ """Add tags to a metadata entry while keeping case-insensitive uniqueness."""
+
+ base, _ = os.path.splitext(file_path)
+ metadata_path = f"{base}.metadata.json"
+ metadata = await metadata_loader(metadata_path)
+
+ existing_tags = list(metadata.get("tags", []))
+ existing_lower = [tag.lower() for tag in existing_tags]
+
+ tags_added: List[str] = []
+ for tag in new_tags:
+ if isinstance(tag, str) and tag.strip():
+ normalized = tag.strip()
+ if normalized.lower() not in existing_lower:
+ existing_tags.append(normalized)
+ existing_lower.append(normalized.lower())
+ tags_added.append(normalized)
+
+ metadata["tags"] = existing_tags
+ await self._metadata_manager.save_metadata(file_path, metadata)
+ await update_cache(file_path, file_path, metadata)
+
+ return existing_tags
+
diff --git a/py/services/use_cases/__init__.py b/py/services/use_cases/__init__.py
new file mode 100644
index 00000000..8a43318c
--- /dev/null
+++ b/py/services/use_cases/__init__.py
@@ -0,0 +1,37 @@
+"""Application-level orchestration services for model routes."""
+
+from .auto_organize_use_case import (
+ AutoOrganizeInProgressError,
+ AutoOrganizeUseCase,
+)
+from .bulk_metadata_refresh_use_case import (
+ BulkMetadataRefreshUseCase,
+ MetadataRefreshProgressReporter,
+)
+from .download_model_use_case import (
+ DownloadModelEarlyAccessError,
+ DownloadModelUseCase,
+ DownloadModelValidationError,
+)
+from .example_images import (
+ DownloadExampleImagesConfigurationError,
+ DownloadExampleImagesInProgressError,
+ DownloadExampleImagesUseCase,
+ ImportExampleImagesUseCase,
+ ImportExampleImagesValidationError,
+)
+
+__all__ = [
+ "AutoOrganizeInProgressError",
+ "AutoOrganizeUseCase",
+ "BulkMetadataRefreshUseCase",
+ "MetadataRefreshProgressReporter",
+ "DownloadModelEarlyAccessError",
+ "DownloadModelUseCase",
+ "DownloadModelValidationError",
+ "DownloadExampleImagesConfigurationError",
+ "DownloadExampleImagesInProgressError",
+ "DownloadExampleImagesUseCase",
+ "ImportExampleImagesUseCase",
+ "ImportExampleImagesValidationError",
+]
diff --git a/py/services/use_cases/auto_organize_use_case.py b/py/services/use_cases/auto_organize_use_case.py
new file mode 100644
index 00000000..0914739f
--- /dev/null
+++ b/py/services/use_cases/auto_organize_use_case.py
@@ -0,0 +1,56 @@
+"""Auto-organize use case orchestrating concurrency and progress handling."""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Optional, Protocol, Sequence
+
+from ..model_file_service import AutoOrganizeResult, ModelFileService, ProgressCallback
+
+
+class AutoOrganizeLockProvider(Protocol):
+ """Minimal protocol for objects exposing auto-organize locking primitives."""
+
+ def is_auto_organize_running(self) -> bool:
+ """Return ``True`` when an auto-organize operation is in-flight."""
+
+ async def get_auto_organize_lock(self) -> asyncio.Lock:
+ """Return the asyncio lock guarding auto-organize operations."""
+
+
+class AutoOrganizeInProgressError(RuntimeError):
+ """Raised when an auto-organize run is already active."""
+
+
+class AutoOrganizeUseCase:
+ """Coordinate auto-organize execution behind a shared lock."""
+
+ def __init__(
+ self,
+ *,
+ file_service: ModelFileService,
+ lock_provider: AutoOrganizeLockProvider,
+ ) -> None:
+ self._file_service = file_service
+ self._lock_provider = lock_provider
+
+ async def execute(
+ self,
+ *,
+ file_paths: Optional[Sequence[str]] = None,
+ progress_callback: Optional[ProgressCallback] = None,
+ ) -> AutoOrganizeResult:
+ """Run the auto-organize routine guarded by a shared lock."""
+
+ if self._lock_provider.is_auto_organize_running():
+ raise AutoOrganizeInProgressError("Auto-organize is already running")
+
+ lock = await self._lock_provider.get_auto_organize_lock()
+ if lock.locked():
+ raise AutoOrganizeInProgressError("Auto-organize is already running")
+
+ async with lock:
+ return await self._file_service.auto_organize_models(
+ file_paths=list(file_paths) if file_paths is not None else None,
+ progress_callback=progress_callback,
+ )
diff --git a/py/services/use_cases/bulk_metadata_refresh_use_case.py b/py/services/use_cases/bulk_metadata_refresh_use_case.py
new file mode 100644
index 00000000..6a809955
--- /dev/null
+++ b/py/services/use_cases/bulk_metadata_refresh_use_case.py
@@ -0,0 +1,122 @@
+"""Use case encapsulating the bulk metadata refresh orchestration."""
+
+from __future__ import annotations
+
+import logging
+from typing import Any, Dict, Optional, Protocol, Sequence
+
+from ..metadata_sync_service import MetadataSyncService
+
+
+class MetadataRefreshProgressReporter(Protocol):
+ """Protocol for progress reporters used during metadata refresh."""
+
+ async def on_progress(self, payload: Dict[str, Any]) -> None:
+ """Handle a metadata refresh progress update."""
+
+
+class BulkMetadataRefreshUseCase:
+ """Coordinate bulk metadata refreshes with progress emission."""
+
+ def __init__(
+ self,
+ *,
+ service,
+ metadata_sync: MetadataSyncService,
+ settings_service,
+ logger: Optional[logging.Logger] = None,
+ ) -> None:
+ self._service = service
+ self._metadata_sync = metadata_sync
+ self._settings = settings_service
+ self._logger = logger or logging.getLogger(__name__)
+
+ async def execute(
+ self,
+ *,
+ progress_callback: Optional[MetadataRefreshProgressReporter] = None,
+ ) -> Dict[str, Any]:
+ """Refresh metadata for all qualifying models."""
+
+ cache = await self._service.scanner.get_cached_data()
+ total_models = len(cache.raw_data)
+
+ enable_metadata_archive_db = self._settings.get("enable_metadata_archive_db", False)
+ to_process: Sequence[Dict[str, Any]] = [
+ model
+ for model in cache.raw_data
+ if model.get("sha256")
+ and (not model.get("civitai") or not model["civitai"].get("id"))
+ and (
+ (enable_metadata_archive_db and not model.get("db_checked", False))
+ or (not enable_metadata_archive_db and model.get("from_civitai") is True)
+ )
+ ]
+
+ total_to_process = len(to_process)
+ processed = 0
+ success = 0
+ needs_resort = False
+
+ async def emit(status: str, **extra: Any) -> None:
+ if progress_callback is None:
+ return
+ payload = {"status": status, "total": total_to_process, "processed": processed, "success": success}
+ payload.update(extra)
+ await progress_callback.on_progress(payload)
+
+ await emit("started")
+
+ for model in to_process:
+ try:
+ original_name = model.get("model_name")
+ result, _ = await self._metadata_sync.fetch_and_update_model(
+ sha256=model["sha256"],
+ file_path=model["file_path"],
+ model_data=model,
+ update_cache_func=self._service.scanner.update_single_model_cache,
+ )
+ if result:
+ success += 1
+ if original_name != model.get("model_name"):
+ needs_resort = True
+ processed += 1
+ await emit(
+ "processing",
+ processed=processed,
+ success=success,
+ current_name=model.get("model_name", "Unknown"),
+ )
+ except Exception as exc: # pragma: no cover - logging path
+ processed += 1
+ self._logger.error(
+ "Error fetching CivitAI data for %s: %s",
+ model.get("file_path"),
+ exc,
+ )
+
+ if needs_resort:
+ await cache.resort()
+
+ await emit("completed", processed=processed, success=success)
+
+ message = (
+ "Successfully updated "
+ f"{success} of {processed} processed {self._service.model_type}s (total: {total_models})"
+ )
+
+ return {"success": True, "message": message, "processed": processed, "updated": success, "total": total_models}
+
+ async def execute_with_error_handling(
+ self,
+ *,
+ progress_callback: Optional[MetadataRefreshProgressReporter] = None,
+ ) -> Dict[str, Any]:
+ """Wrapper providing progress notification on unexpected failures."""
+
+ try:
+ return await self.execute(progress_callback=progress_callback)
+ except Exception as exc:
+ if progress_callback is not None:
+ await progress_callback.on_progress({"status": "error", "error": str(exc)})
+ raise
diff --git a/py/services/use_cases/download_model_use_case.py b/py/services/use_cases/download_model_use_case.py
new file mode 100644
index 00000000..5aa25bda
--- /dev/null
+++ b/py/services/use_cases/download_model_use_case.py
@@ -0,0 +1,37 @@
+"""Use case for scheduling model downloads with consistent error handling."""
+
+from __future__ import annotations
+
+from typing import Any, Dict
+
+from ..download_coordinator import DownloadCoordinator
+
+
+class DownloadModelValidationError(ValueError):
+ """Raised when incoming payload validation fails."""
+
+
+class DownloadModelEarlyAccessError(RuntimeError):
+ """Raised when the download is gated behind Civitai early access."""
+
+
+class DownloadModelUseCase:
+ """Coordinate download scheduling through the coordinator service."""
+
+ def __init__(self, *, download_coordinator: DownloadCoordinator) -> None:
+ self._download_coordinator = download_coordinator
+
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
+ """Schedule a download and normalize error conditions."""
+
+ try:
+ return await self._download_coordinator.schedule_download(payload)
+ except ValueError as exc:
+ raise DownloadModelValidationError(str(exc)) from exc
+ except Exception as exc: # pragma: no cover - defensive logging path
+ message = str(exc)
+ if "401" in message:
+ raise DownloadModelEarlyAccessError(
+ "Early Access Restriction: This model requires purchase. Please buy early access on Civitai.com."
+ ) from exc
+ raise
diff --git a/py/services/use_cases/example_images/__init__.py b/py/services/use_cases/example_images/__init__.py
new file mode 100644
index 00000000..820de618
--- /dev/null
+++ b/py/services/use_cases/example_images/__init__.py
@@ -0,0 +1,19 @@
+"""Example image specific use case exports."""
+
+from .download_example_images_use_case import (
+ DownloadExampleImagesUseCase,
+ DownloadExampleImagesInProgressError,
+ DownloadExampleImagesConfigurationError,
+)
+from .import_example_images_use_case import (
+ ImportExampleImagesUseCase,
+ ImportExampleImagesValidationError,
+)
+
+__all__ = [
+ "DownloadExampleImagesUseCase",
+ "DownloadExampleImagesInProgressError",
+ "DownloadExampleImagesConfigurationError",
+ "ImportExampleImagesUseCase",
+ "ImportExampleImagesValidationError",
+]
diff --git a/py/services/use_cases/example_images/download_example_images_use_case.py b/py/services/use_cases/example_images/download_example_images_use_case.py
new file mode 100644
index 00000000..e9a51e13
--- /dev/null
+++ b/py/services/use_cases/example_images/download_example_images_use_case.py
@@ -0,0 +1,42 @@
+"""Use case coordinating example image downloads."""
+
+from __future__ import annotations
+
+from typing import Any, Dict
+
+from ....utils.example_images_download_manager import (
+ DownloadConfigurationError,
+ DownloadInProgressError,
+ ExampleImagesDownloadError,
+)
+
+
+class DownloadExampleImagesInProgressError(RuntimeError):
+ """Raised when a download is already running."""
+
+ def __init__(self, progress: Dict[str, Any]) -> None:
+ super().__init__("Download already in progress")
+ self.progress = progress
+
+
+class DownloadExampleImagesConfigurationError(ValueError):
+ """Raised when settings prevent downloads from starting."""
+
+
+class DownloadExampleImagesUseCase:
+ """Validate payloads and trigger the download manager."""
+
+ def __init__(self, *, download_manager) -> None:
+ self._download_manager = download_manager
+
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
+ """Start a download and translate manager errors."""
+
+ try:
+ return await self._download_manager.start_download(payload)
+ except DownloadInProgressError as exc:
+ raise DownloadExampleImagesInProgressError(exc.progress_snapshot) from exc
+ except DownloadConfigurationError as exc:
+ raise DownloadExampleImagesConfigurationError(str(exc)) from exc
+ except ExampleImagesDownloadError:
+ raise
diff --git a/py/services/use_cases/example_images/import_example_images_use_case.py b/py/services/use_cases/example_images/import_example_images_use_case.py
new file mode 100644
index 00000000..547b2f4e
--- /dev/null
+++ b/py/services/use_cases/example_images/import_example_images_use_case.py
@@ -0,0 +1,86 @@
+"""Use case for importing example images."""
+
+from __future__ import annotations
+
+import os
+import tempfile
+from contextlib import suppress
+from typing import Any, Dict, List
+
+from aiohttp import web
+
+from ....utils.example_images_processor import (
+ ExampleImagesImportError,
+ ExampleImagesProcessor,
+ ExampleImagesValidationError,
+)
+
+
+class ImportExampleImagesValidationError(ValueError):
+ """Raised when request validation fails."""
+
+
+class ImportExampleImagesUseCase:
+ """Parse upload payloads and delegate to the processor service."""
+
+ def __init__(self, *, processor: ExampleImagesProcessor) -> None:
+ self._processor = processor
+
+ async def execute(self, request: web.Request) -> Dict[str, Any]:
+ model_hash: str | None = None
+ files_to_import: List[str] = []
+ temp_files: List[str] = []
+
+ try:
+ if request.content_type and "multipart/form-data" in request.content_type:
+ reader = await request.multipart()
+
+ first_field = await reader.next()
+ if first_field and first_field.name == "model_hash":
+ model_hash = await first_field.text()
+ else:
+ # Support clients that send files first and hash later
+ if first_field is not None:
+ await self._collect_upload_file(first_field, files_to_import, temp_files)
+
+ async for field in reader:
+ if field.name == "model_hash" and not model_hash:
+ model_hash = await field.text()
+ elif field.name == "files":
+ await self._collect_upload_file(field, files_to_import, temp_files)
+ else:
+ data = await request.json()
+ model_hash = data.get("model_hash")
+ files_to_import = list(data.get("file_paths", []))
+
+ result = await self._processor.import_images(model_hash, files_to_import)
+ return result
+ except ExampleImagesValidationError as exc:
+ raise ImportExampleImagesValidationError(str(exc)) from exc
+ except ExampleImagesImportError:
+ raise
+ finally:
+ for path in temp_files:
+ with suppress(Exception):
+ os.remove(path)
+
+ async def _collect_upload_file(
+ self,
+ field: Any,
+ files_to_import: List[str],
+ temp_files: List[str],
+ ) -> None:
+ """Persist an uploaded file to disk and add it to the import list."""
+
+ filename = field.filename or "upload"
+ file_ext = os.path.splitext(filename)[1].lower()
+
+ with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as tmp_file:
+ temp_files.append(tmp_file.name)
+ while True:
+ chunk = await field.read_chunk()
+ if not chunk:
+ break
+ tmp_file.write(chunk)
+
+ files_to_import.append(tmp_file.name)
diff --git a/py/services/websocket_progress_callback.py b/py/services/websocket_progress_callback.py
index 1a390f30..21423044 100644
--- a/py/services/websocket_progress_callback.py
+++ b/py/services/websocket_progress_callback.py
@@ -1,11 +1,29 @@
-from typing import Dict, Any
+"""Progress callback implementations backed by the shared WebSocket manager."""
+
+from typing import Any, Dict, Protocol
+
from .model_file_service import ProgressCallback
from .websocket_manager import ws_manager
-class WebSocketProgressCallback(ProgressCallback):
- """WebSocket implementation of progress callback"""
-
+class ProgressReporter(Protocol):
+ """Protocol representing an async progress callback."""
+
async def on_progress(self, progress_data: Dict[str, Any]) -> None:
- """Send progress data via WebSocket"""
- await ws_manager.broadcast_auto_organize_progress(progress_data)
\ No newline at end of file
+ """Handle a progress update payload."""
+
+
+class WebSocketProgressCallback(ProgressCallback):
+ """WebSocket implementation of progress callback."""
+
+ async def on_progress(self, progress_data: Dict[str, Any]) -> None:
+ """Send progress data via WebSocket."""
+ await ws_manager.broadcast_auto_organize_progress(progress_data)
+
+
+class WebSocketBroadcastCallback:
+ """Generic WebSocket progress callback broadcasting to all clients."""
+
+ async def on_progress(self, progress_data: Dict[str, Any]) -> None:
+ """Send the provided payload to all connected clients."""
+ await ws_manager.broadcast(progress_data)
diff --git a/py/utils/constants.py b/py/utils/constants.py
index 88932766..243badff 100644
--- a/py/utils/constants.py
+++ b/py/utils/constants.py
@@ -53,8 +53,8 @@ AUTO_ORGANIZE_BATCH_SIZE = 50 # Process models in batches to avoid overwhelming
# Civitai model tags in priority order for subfolder organization
CIVITAI_MODEL_TAGS = [
- 'character', 'style', 'concept', 'clothing',
- 'realistic', 'anime', 'toon', 'furry',
- 'poses', 'background', 'tool', 'vehicle', 'buildings',
+ 'character', 'concept', 'clothing',
+ 'realistic', 'anime', 'toon', 'furry', 'style',
+ 'poses', 'background', 'tool', 'vehicle', 'buildings',
'objects', 'assets', 'animal', 'action'
]
\ No newline at end of file
diff --git a/py/utils/example_images_download_manager.py b/py/utils/example_images_download_manager.py
index e3f46244..9ddf03a4 100644
--- a/py/utils/example_images_download_manager.py
+++ b/py/utils/example_images_download_manager.py
@@ -1,205 +1,216 @@
+from __future__ import annotations
+
import logging
import os
import asyncio
import json
import time
-from aiohttp import web
+from typing import Any, Dict
+
from ..services.service_registry import ServiceRegistry
from ..utils.metadata_manager import MetadataManager
from .example_images_processor import ExampleImagesProcessor
from .example_images_metadata import MetadataUpdater
-from ..services.websocket_manager import ws_manager # Add this import at the top
from ..services.downloader import get_downloader
+from ..services.settings_manager import settings
+
+
+class ExampleImagesDownloadError(RuntimeError):
+ """Base error for example image download operations."""
+
+
+class DownloadInProgressError(ExampleImagesDownloadError):
+ """Raised when a download is already running."""
+
+ def __init__(self, progress_snapshot: dict) -> None:
+ super().__init__("Download already in progress")
+ self.progress_snapshot = progress_snapshot
+
+
+class DownloadNotRunningError(ExampleImagesDownloadError):
+ """Raised when pause/resume is requested without an active download."""
+
+ def __init__(self, message: str = "No download in progress") -> None:
+ super().__init__(message)
+
+
+class DownloadConfigurationError(ExampleImagesDownloadError):
+ """Raised when configuration prevents starting a download."""
+
logger = logging.getLogger(__name__)
-# Download status tracking
-download_task = None
-is_downloading = False
-download_progress = {
- 'total': 0,
- 'completed': 0,
- 'current_model': '',
- 'status': 'idle', # idle, running, paused, completed, error
- 'errors': [],
- 'last_error': None,
- 'start_time': None,
- 'end_time': None,
- 'processed_models': set(), # Track models that have been processed
- 'refreshed_models': set(), # Track models that had metadata refreshed
- 'failed_models': set() # Track models that failed to download after metadata refresh
-}
+
+class _DownloadProgress(dict):
+ """Mutable mapping maintaining download progress with set-aware serialisation."""
+
+ def __init__(self) -> None:
+ super().__init__()
+ self.reset()
+
+ def reset(self) -> None:
+ """Reset the progress dictionary to its initial state."""
+
+ self.update(
+ total=0,
+ completed=0,
+ current_model='',
+ status='idle',
+ errors=[],
+ last_error=None,
+ start_time=None,
+ end_time=None,
+ processed_models=set(),
+ refreshed_models=set(),
+ failed_models=set(),
+ )
+
+ def snapshot(self) -> dict:
+ """Return a JSON-serialisable snapshot of the current progress."""
+
+ snapshot = dict(self)
+ snapshot['processed_models'] = list(self['processed_models'])
+ snapshot['refreshed_models'] = list(self['refreshed_models'])
+ snapshot['failed_models'] = list(self['failed_models'])
+ return snapshot
class DownloadManager:
- """Manages downloading example images for models"""
-
- @staticmethod
- async def start_download(request):
- """
- Start downloading example images for models
-
- Expects a JSON body with:
- {
- "output_dir": "path/to/output", # Base directory to save example images
- "optimize": true, # Whether to optimize images (default: true)
- "model_types": ["lora", "checkpoint"], # Model types to process (default: both)
- "delay": 1.0 # Delay between downloads to avoid rate limiting (default: 1.0)
- }
- """
- global download_task, is_downloading, download_progress
-
- if is_downloading:
- # Create a copy for JSON serialization
- response_progress = download_progress.copy()
- response_progress['processed_models'] = list(download_progress['processed_models'])
- response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
- response_progress['failed_models'] = list(download_progress['failed_models'])
-
- return web.json_response({
- 'success': False,
- 'error': 'Download already in progress',
- 'status': response_progress
- }, status=400)
-
- try:
- # Parse the request body
- data = await request.json()
- output_dir = data.get('output_dir')
- optimize = data.get('optimize', True)
- model_types = data.get('model_types', ['lora', 'checkpoint'])
- delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
-
- if not output_dir:
- return web.json_response({
- 'success': False,
- 'error': 'Missing output_dir parameter'
- }, status=400)
-
- # Create the output directory
- os.makedirs(output_dir, exist_ok=True)
-
- # Initialize progress tracking
- download_progress['total'] = 0
- download_progress['completed'] = 0
- download_progress['current_model'] = ''
- download_progress['status'] = 'running'
- download_progress['errors'] = []
- download_progress['last_error'] = None
- download_progress['start_time'] = time.time()
- download_progress['end_time'] = None
-
- # Get the processed models list from a file if it exists
- progress_file = os.path.join(output_dir, '.download_progress.json')
- if os.path.exists(progress_file):
- try:
- with open(progress_file, 'r', encoding='utf-8') as f:
- saved_progress = json.load(f)
- download_progress['processed_models'] = set(saved_progress.get('processed_models', []))
- download_progress['failed_models'] = set(saved_progress.get('failed_models', []))
- logger.debug(f"Loaded previous progress, {len(download_progress['processed_models'])} models already processed, {len(download_progress['failed_models'])} models marked as failed")
- except Exception as e:
- logger.error(f"Failed to load progress file: {e}")
- download_progress['processed_models'] = set()
- download_progress['failed_models'] = set()
- else:
- download_progress['processed_models'] = set()
- download_progress['failed_models'] = set()
-
- # Start the download task
- is_downloading = True
- download_task = asyncio.create_task(
- DownloadManager._download_all_example_images(
- output_dir,
- optimize,
- model_types,
- delay
- )
- )
-
- # Create a copy for JSON serialization
- response_progress = download_progress.copy()
- response_progress['processed_models'] = list(download_progress['processed_models'])
- response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
- response_progress['failed_models'] = list(download_progress['failed_models'])
-
- return web.json_response({
- 'success': True,
- 'message': 'Download started',
- 'status': response_progress
- })
-
- except Exception as e:
- logger.error(f"Failed to start example images download: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- @staticmethod
- async def get_status(request):
- """Get the current status of example images download"""
- global download_progress
-
- # Create a copy of the progress dict with the set converted to a list for JSON serialization
- response_progress = download_progress.copy()
- response_progress['processed_models'] = list(download_progress['processed_models'])
- response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
- response_progress['failed_models'] = list(download_progress['failed_models'])
-
- return web.json_response({
- 'success': True,
- 'is_downloading': is_downloading,
- 'status': response_progress
- })
+ """Manages downloading example images for models."""
- @staticmethod
- async def pause_download(request):
- """Pause the example images download"""
- global download_progress
-
- if not is_downloading:
- return web.json_response({
- 'success': False,
- 'error': 'No download in progress'
- }, status=400)
-
- download_progress['status'] = 'paused'
-
- return web.json_response({
+ def __init__(self, *, ws_manager, state_lock: asyncio.Lock | None = None) -> None:
+ self._download_task: asyncio.Task | None = None
+ self._is_downloading = False
+ self._progress = _DownloadProgress()
+ self._ws_manager = ws_manager
+ self._state_lock = state_lock or asyncio.Lock()
+
+ async def start_download(self, options: dict):
+ """Start downloading example images for models."""
+
+ async with self._state_lock:
+ if self._is_downloading:
+ raise DownloadInProgressError(self._progress.snapshot())
+
+ try:
+ data = options or {}
+ auto_mode = data.get('auto_mode', False)
+ optimize = data.get('optimize', True)
+ model_types = data.get('model_types', ['lora', 'checkpoint'])
+ delay = float(data.get('delay', 0.2))
+
+ output_dir = settings.get('example_images_path')
+
+ if not output_dir:
+ error_msg = 'Example images path not configured in settings'
+ if auto_mode:
+ logger.debug(error_msg)
+ return {
+ 'success': True,
+ 'message': 'Example images path not configured, skipping auto download'
+ }
+ raise DownloadConfigurationError(error_msg)
+
+ os.makedirs(output_dir, exist_ok=True)
+
+ self._progress.reset()
+ self._progress['status'] = 'running'
+ self._progress['start_time'] = time.time()
+ self._progress['end_time'] = None
+
+ progress_file = os.path.join(output_dir, '.download_progress.json')
+ if os.path.exists(progress_file):
+ try:
+ with open(progress_file, 'r', encoding='utf-8') as f:
+ saved_progress = json.load(f)
+ self._progress['processed_models'] = set(saved_progress.get('processed_models', []))
+ self._progress['failed_models'] = set(saved_progress.get('failed_models', []))
+ logger.debug(
+ "Loaded previous progress, %s models already processed, %s models marked as failed",
+ len(self._progress['processed_models']),
+ len(self._progress['failed_models']),
+ )
+ except Exception as e:
+ logger.error(f"Failed to load progress file: {e}")
+ self._progress['processed_models'] = set()
+ self._progress['failed_models'] = set()
+ else:
+ self._progress['processed_models'] = set()
+ self._progress['failed_models'] = set()
+
+ self._is_downloading = True
+ self._download_task = asyncio.create_task(
+ self._download_all_example_images(
+ output_dir,
+ optimize,
+ model_types,
+ delay
+ )
+ )
+
+ snapshot = self._progress.snapshot()
+ except Exception as e:
+ self._is_downloading = False
+ self._download_task = None
+ logger.error(f"Failed to start example images download: {e}", exc_info=True)
+ raise ExampleImagesDownloadError(str(e)) from e
+
+ await self._broadcast_progress(status='running')
+
+ return {
+ 'success': True,
+ 'message': 'Download started',
+ 'status': snapshot
+ }
+
+ async def get_status(self, request):
+ """Get the current status of example images download."""
+
+ return {
+ 'success': True,
+ 'is_downloading': self._is_downloading,
+ 'status': self._progress.snapshot(),
+ }
+
+ async def pause_download(self, request):
+ """Pause the example images download."""
+
+ async with self._state_lock:
+ if not self._is_downloading:
+ raise DownloadNotRunningError()
+
+ self._progress['status'] = 'paused'
+
+ await self._broadcast_progress(status='paused')
+
+ return {
'success': True,
'message': 'Download paused'
- })
+ }
- @staticmethod
- async def resume_download(request):
- """Resume the example images download"""
- global download_progress
-
- if not is_downloading:
- return web.json_response({
- 'success': False,
- 'error': 'No download in progress'
- }, status=400)
-
- if download_progress['status'] == 'paused':
- download_progress['status'] = 'running'
-
- return web.json_response({
- 'success': True,
- 'message': 'Download resumed'
- })
- else:
- return web.json_response({
- 'success': False,
- 'error': f"Download is in '{download_progress['status']}' state, cannot resume"
- }, status=400)
+ async def resume_download(self, request):
+ """Resume the example images download."""
+
+ async with self._state_lock:
+ if not self._is_downloading:
+ raise DownloadNotRunningError()
+
+ if self._progress['status'] == 'paused':
+ self._progress['status'] = 'running'
+ else:
+ raise DownloadNotRunningError(
+ f"Download is in '{self._progress['status']}' state, cannot resume"
+ )
+
+ await self._broadcast_progress(status='running')
+
+ return {
+ 'success': True,
+ 'message': 'Download resumed'
+ }
- @staticmethod
- async def _download_all_example_images(output_dir, optimize, model_types, delay):
- """Download example images for all models"""
- global is_downloading, download_progress
-
- # Get unified downloader
+ async def _download_all_example_images(self, output_dir, optimize, model_types, delay):
+ """Download example images for all models."""
+
downloader = await get_downloader()
try:
@@ -227,59 +238,67 @@ class DownloadManager:
all_models.append((scanner_type, model, scanner))
# Update total count
- download_progress['total'] = len(all_models)
- logger.debug(f"Found {download_progress['total']} models to process")
+ self._progress['total'] = len(all_models)
+ logger.debug(f"Found {self._progress['total']} models to process")
+ await self._broadcast_progress(status='running')
# Process each model
for i, (scanner_type, model, scanner) in enumerate(all_models):
# Main logic for processing model is here, but actual operations are delegated to other classes
- was_remote_download = await DownloadManager._process_model(
- scanner_type, model, scanner,
+ was_remote_download = await self._process_model(
+ scanner_type, model, scanner,
output_dir, optimize, downloader
)
# Update progress
- download_progress['completed'] += 1
+ self._progress['completed'] += 1
+ await self._broadcast_progress(status='running')
# Only add delay after remote download of models, and not after processing the last model
- if was_remote_download and i < len(all_models) - 1 and download_progress['status'] == 'running':
+ if was_remote_download and i < len(all_models) - 1 and self._progress['status'] == 'running':
await asyncio.sleep(delay)
# Mark as completed
- download_progress['status'] = 'completed'
- download_progress['end_time'] = time.time()
- logger.debug(f"Example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
-
+ self._progress['status'] = 'completed'
+ self._progress['end_time'] = time.time()
+ logger.debug(
+ "Example images download completed: %s/%s models processed",
+ self._progress['completed'],
+ self._progress['total'],
+ )
+ await self._broadcast_progress(status='completed')
+
except Exception as e:
error_msg = f"Error during example images download: {str(e)}"
logger.error(error_msg, exc_info=True)
- download_progress['errors'].append(error_msg)
- download_progress['last_error'] = error_msg
- download_progress['status'] = 'error'
- download_progress['end_time'] = time.time()
-
+ self._progress['errors'].append(error_msg)
+ self._progress['last_error'] = error_msg
+ self._progress['status'] = 'error'
+ self._progress['end_time'] = time.time()
+ await self._broadcast_progress(status='error', extra={'error': error_msg})
+
finally:
# Save final progress to file
try:
- DownloadManager._save_progress(output_dir)
+ self._save_progress(output_dir)
except Exception as e:
logger.error(f"Failed to save progress file: {e}")
-
+
# Set download status to not downloading
- is_downloading = False
+ async with self._state_lock:
+ self._is_downloading = False
+ self._download_task = None
- @staticmethod
- async def _process_model(scanner_type, model, scanner, output_dir, optimize, downloader):
- """Process a single model download"""
- global download_progress
-
+ async def _process_model(self, scanner_type, model, scanner, output_dir, optimize, downloader):
+ """Process a single model download."""
+
# Check if download is paused
- while download_progress['status'] == 'paused':
+ while self._progress['status'] == 'paused':
await asyncio.sleep(1)
-
+
# Check if download should continue
- if download_progress['status'] != 'running':
- logger.info(f"Download stopped: {download_progress['status']}")
+ if self._progress['status'] != 'running':
+ logger.info(f"Download stopped: {self._progress['status']}")
return False # Return False to indicate no remote download happened
model_hash = model.get('sha256', '').lower()
@@ -289,15 +308,16 @@ class DownloadManager:
try:
# Update current model info
- download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
+ self._progress['current_model'] = f"{model_name} ({model_hash[:8]})"
+ await self._broadcast_progress(status='running')
# Skip if already in failed models
- if model_hash in download_progress['failed_models']:
+ if model_hash in self._progress['failed_models']:
logger.debug(f"Skipping known failed model: {model_name}")
return False
# Skip if already processed AND directory exists with files
- if model_hash in download_progress['processed_models']:
+ if model_hash in self._progress['processed_models']:
model_dir = os.path.join(output_dir, model_hash)
has_files = os.path.exists(model_dir) and any(os.listdir(model_dir))
if has_files:
@@ -306,7 +326,7 @@ class DownloadManager:
else:
logger.info(f"Model {model_name} marked as processed but folder empty or missing, reprocessing")
# Remove from processed models since we need to reprocess
- download_progress['processed_models'].discard(model_hash)
+ self._progress['processed_models'].discard(model_hash)
# Create model directory
model_dir = os.path.join(output_dir, model_hash)
@@ -322,7 +342,7 @@ class DownloadManager:
await MetadataUpdater.update_metadata_from_local_examples(
model_hash, model, scanner_type, scanner, model_dir
)
- download_progress['processed_models'].add(model_hash)
+ self._progress['processed_models'].add(model_hash)
return False # Return False to indicate no remote download happened
# If no local images, try to download from remote
@@ -334,57 +354,55 @@ class DownloadManager:
)
# If metadata is stale, try to refresh it
- if is_stale and model_hash not in download_progress['refreshed_models']:
+ if is_stale and model_hash not in self._progress['refreshed_models']:
await MetadataUpdater.refresh_model_metadata(
- model_hash, model_name, scanner_type, scanner
+ model_hash, model_name, scanner_type, scanner, self._progress
)
-
+
# Get the updated model data
updated_model = await MetadataUpdater.get_updated_model(
model_hash, scanner
)
-
+
if updated_model and updated_model.get('civitai', {}).get('images'):
# Retry download with updated metadata
updated_images = updated_model.get('civitai', {}).get('images', [])
success, _ = await ExampleImagesProcessor.download_model_images(
model_hash, model_name, updated_images, model_dir, optimize, downloader
)
-
- download_progress['refreshed_models'].add(model_hash)
+
+ self._progress['refreshed_models'].add(model_hash)
# Mark as processed if successful, or as failed if unsuccessful after refresh
if success:
- download_progress['processed_models'].add(model_hash)
+ self._progress['processed_models'].add(model_hash)
else:
# If we refreshed metadata and still failed, mark as permanently failed
- if model_hash in download_progress['refreshed_models']:
- download_progress['failed_models'].add(model_hash)
+ if model_hash in self._progress['refreshed_models']:
+ self._progress['failed_models'].add(model_hash)
logger.info(f"Marking model {model_name} as failed after metadata refresh")
return True # Return True to indicate a remote download happened
else:
# No civitai data or images available, mark as failed to avoid future attempts
- download_progress['failed_models'].add(model_hash)
+ self._progress['failed_models'].add(model_hash)
logger.debug(f"No civitai images available for model {model_name}, marking as failed")
# Save progress periodically
- if download_progress['completed'] % 10 == 0 or download_progress['completed'] == download_progress['total'] - 1:
- DownloadManager._save_progress(output_dir)
+ if self._progress['completed'] % 10 == 0 or self._progress['completed'] == self._progress['total'] - 1:
+ self._save_progress(output_dir)
return False # Default return if no conditions met
except Exception as e:
error_msg = f"Error processing model {model.get('model_name')}: {str(e)}"
logger.error(error_msg, exc_info=True)
- download_progress['errors'].append(error_msg)
- download_progress['last_error'] = error_msg
+ self._progress['errors'].append(error_msg)
+ self._progress['last_error'] = error_msg
return False # Return False on exception
- @staticmethod
- def _save_progress(output_dir):
- """Save download progress to file"""
- global download_progress
+ def _save_progress(self, output_dir):
+ """Save download progress to file."""
try:
progress_file = os.path.join(output_dir, '.download_progress.json')
@@ -399,11 +417,11 @@ class DownloadManager:
# Create new progress data
progress_data = {
- 'processed_models': list(download_progress['processed_models']),
- 'refreshed_models': list(download_progress['refreshed_models']),
- 'failed_models': list(download_progress['failed_models']),
- 'completed': download_progress['completed'],
- 'total': download_progress['total'],
+ 'processed_models': list(self._progress['processed_models']),
+ 'refreshed_models': list(self._progress['refreshed_models']),
+ 'failed_models': list(self._progress['failed_models']),
+ 'completed': self._progress['completed'],
+ 'total': self._progress['total'],
'last_update': time.time()
}
@@ -418,101 +436,67 @@ class DownloadManager:
except Exception as e:
logger.error(f"Failed to save progress file: {e}")
- @staticmethod
- async def start_force_download(request):
- """
- Force download example images for specific models
-
- Expects a JSON body with:
- {
- "model_hashes": ["hash1", "hash2", ...], # List of model hashes to download
- "output_dir": "path/to/output", # Base directory to save example images
- "optimize": true, # Whether to optimize images (default: true)
- "model_types": ["lora", "checkpoint"], # Model types to process (default: both)
- "delay": 1.0 # Delay between downloads (default: 1.0)
- }
- """
- global download_task, is_downloading, download_progress
+ async def start_force_download(self, options: dict):
+ """Force download example images for specific models."""
- if is_downloading:
- return web.json_response({
- 'success': False,
- 'error': 'Download already in progress'
- }, status=400)
+ async with self._state_lock:
+ if self._is_downloading:
+ raise DownloadInProgressError(self._progress.snapshot())
- try:
- # Parse the request body
- data = await request.json()
+ data = options or {}
model_hashes = data.get('model_hashes', [])
- output_dir = data.get('output_dir')
optimize = data.get('optimize', True)
model_types = data.get('model_types', ['lora', 'checkpoint'])
- delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
-
+ delay = float(data.get('delay', 0.2))
+
if not model_hashes:
- return web.json_response({
- 'success': False,
- 'error': 'Missing model_hashes parameter'
- }, status=400)
-
+ raise DownloadConfigurationError('Missing model_hashes parameter')
+
+ output_dir = settings.get('example_images_path')
+
if not output_dir:
- return web.json_response({
- 'success': False,
- 'error': 'Missing output_dir parameter'
- }, status=400)
-
- # Create the output directory
+ raise DownloadConfigurationError('Example images path not configured in settings')
+
os.makedirs(output_dir, exist_ok=True)
-
- # Initialize progress tracking
- download_progress['total'] = len(model_hashes)
- download_progress['completed'] = 0
- download_progress['current_model'] = ''
- download_progress['status'] = 'running'
- download_progress['errors'] = []
- download_progress['last_error'] = None
- download_progress['start_time'] = time.time()
- download_progress['end_time'] = None
- download_progress['processed_models'] = set()
- download_progress['refreshed_models'] = set()
- download_progress['failed_models'] = set()
- # Set download status to downloading
- is_downloading = True
+ self._progress.reset()
+ self._progress['total'] = len(model_hashes)
+ self._progress['status'] = 'running'
+ self._progress['start_time'] = time.time()
+ self._progress['end_time'] = None
- # Execute the download function directly instead of creating a background task
- result = await DownloadManager._download_specific_models_example_images_sync(
+ self._is_downloading = True
+
+ await self._broadcast_progress(status='running')
+
+ try:
+ result = await self._download_specific_models_example_images_sync(
model_hashes,
- output_dir,
- optimize,
+ output_dir,
+ optimize,
model_types,
delay
)
- # Set download status to not downloading
- is_downloading = False
+ async with self._state_lock:
+ self._is_downloading = False
- return web.json_response({
+ return {
'success': True,
'message': 'Force download completed',
'result': result
- })
+ }
except Exception as e:
- # Set download status to not downloading
- is_downloading = False
+ async with self._state_lock:
+ self._is_downloading = False
logger.error(f"Failed during forced example images download: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
+ await self._broadcast_progress(status='error', extra={'error': str(e)})
+ raise ExampleImagesDownloadError(str(e)) from e
- @staticmethod
- async def _download_specific_models_example_images_sync(model_hashes, output_dir, optimize, model_types, delay):
- """Download example images for specific models only - synchronous version"""
- global download_progress
-
- # Get unified downloader
+ async def _download_specific_models_example_images_sync(self, model_hashes, output_dir, optimize, model_types, delay):
+ """Download example images for specific models only - synchronous version."""
+
downloader = await get_downloader()
try:
@@ -540,24 +524,18 @@ class DownloadManager:
models_to_process.append((scanner_type, model, scanner))
# Update total count based on found models
- download_progress['total'] = len(models_to_process)
- logger.debug(f"Found {download_progress['total']} models to process")
-
+ self._progress['total'] = len(models_to_process)
+ logger.debug(f"Found {self._progress['total']} models to process")
+
# Send initial progress via WebSocket
- await ws_manager.broadcast({
- 'type': 'example_images_progress',
- 'processed': 0,
- 'total': download_progress['total'],
- 'status': 'running',
- 'current_model': ''
- })
+ await self._broadcast_progress(status='running')
# Process each model
success_count = 0
for i, (scanner_type, model, scanner) in enumerate(models_to_process):
# Force process this model regardless of previous status
- was_successful = await DownloadManager._process_specific_model(
- scanner_type, model, scanner,
+ was_successful = await self._process_specific_model(
+ scanner_type, model, scanner,
output_dir, optimize, downloader
)
@@ -565,59 +543,44 @@ class DownloadManager:
success_count += 1
# Update progress
- download_progress['completed'] += 1
-
+ self._progress['completed'] += 1
+
# Send progress update via WebSocket
- await ws_manager.broadcast({
- 'type': 'example_images_progress',
- 'processed': download_progress['completed'],
- 'total': download_progress['total'],
- 'status': 'running',
- 'current_model': download_progress['current_model']
- })
+ await self._broadcast_progress(status='running')
# Only add delay after remote download, and not after processing the last model
- if was_successful and i < len(models_to_process) - 1 and download_progress['status'] == 'running':
+ if was_successful and i < len(models_to_process) - 1 and self._progress['status'] == 'running':
await asyncio.sleep(delay)
# Mark as completed
- download_progress['status'] = 'completed'
- download_progress['end_time'] = time.time()
- logger.debug(f"Forced example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
-
+ self._progress['status'] = 'completed'
+ self._progress['end_time'] = time.time()
+ logger.debug(
+ "Forced example images download completed: %s/%s models processed",
+ self._progress['completed'],
+ self._progress['total'],
+ )
+
# Send final progress via WebSocket
- await ws_manager.broadcast({
- 'type': 'example_images_progress',
- 'processed': download_progress['completed'],
- 'total': download_progress['total'],
- 'status': 'completed',
- 'current_model': ''
- })
+ await self._broadcast_progress(status='completed')
return {
- 'total': download_progress['total'],
- 'processed': download_progress['completed'],
+ 'total': self._progress['total'],
+ 'processed': self._progress['completed'],
'successful': success_count,
- 'errors': download_progress['errors']
+ 'errors': self._progress['errors']
}
except Exception as e:
error_msg = f"Error during forced example images download: {str(e)}"
logger.error(error_msg, exc_info=True)
- download_progress['errors'].append(error_msg)
- download_progress['last_error'] = error_msg
- download_progress['status'] = 'error'
- download_progress['end_time'] = time.time()
-
+ self._progress['errors'].append(error_msg)
+ self._progress['last_error'] = error_msg
+ self._progress['status'] = 'error'
+ self._progress['end_time'] = time.time()
+
# Send error status via WebSocket
- await ws_manager.broadcast({
- 'type': 'example_images_progress',
- 'processed': download_progress['completed'],
- 'total': download_progress['total'],
- 'status': 'error',
- 'error': error_msg,
- 'current_model': ''
- })
+ await self._broadcast_progress(status='error', extra={'error': error_msg})
raise
@@ -625,18 +588,16 @@ class DownloadManager:
# No need to close any sessions since we use the global downloader
pass
- @staticmethod
- async def _process_specific_model(scanner_type, model, scanner, output_dir, optimize, downloader):
- """Process a specific model for forced download, ignoring previous download status"""
- global download_progress
-
+ async def _process_specific_model(self, scanner_type, model, scanner, output_dir, optimize, downloader):
+ """Process a specific model for forced download, ignoring previous download status."""
+
# Check if download is paused
- while download_progress['status'] == 'paused':
+ while self._progress['status'] == 'paused':
await asyncio.sleep(1)
# Check if download should continue
- if download_progress['status'] != 'running':
- logger.info(f"Download stopped: {download_progress['status']}")
+ if self._progress['status'] != 'running':
+ logger.info(f"Download stopped: {self._progress['status']}")
return False
model_hash = model.get('sha256', '').lower()
@@ -646,7 +607,8 @@ class DownloadManager:
try:
# Update current model info
- download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
+ self._progress['current_model'] = f"{model_name} ({model_hash[:8]})"
+ await self._broadcast_progress(status='running')
# Create model directory
model_dir = os.path.join(output_dir, model_hash)
@@ -662,7 +624,7 @@ class DownloadManager:
await MetadataUpdater.update_metadata_from_local_examples(
model_hash, model, scanner_type, scanner, model_dir
)
- download_progress['processed_models'].add(model_hash)
+ self._progress['processed_models'].add(model_hash)
return False # Return False to indicate no remote download happened
# If no local images, try to download from remote
@@ -674,9 +636,9 @@ class DownloadManager:
)
# If metadata is stale, try to refresh it
- if is_stale and model_hash not in download_progress['refreshed_models']:
+ if is_stale and model_hash not in self._progress['refreshed_models']:
await MetadataUpdater.refresh_model_metadata(
- model_hash, model_name, scanner_type, scanner
+ model_hash, model_name, scanner_type, scanner, self._progress
)
# Get the updated model data
@@ -694,18 +656,18 @@ class DownloadManager:
# Combine failed images from both attempts
failed_images.extend(additional_failed_images)
- download_progress['refreshed_models'].add(model_hash)
+ self._progress['refreshed_models'].add(model_hash)
# For forced downloads, remove failed images from metadata
if failed_images:
# Create a copy of images excluding failed ones
- await DownloadManager._remove_failed_images_from_metadata(
+ await self._remove_failed_images_from_metadata(
model_hash, model_name, failed_images, scanner
)
# Mark as processed
if success or failed_images: # Mark as processed if we successfully downloaded some images or removed failed ones
- download_progress['processed_models'].add(model_hash)
+ self._progress['processed_models'].add(model_hash)
return True # Return True to indicate a remote download happened
else:
@@ -715,12 +677,11 @@ class DownloadManager:
except Exception as e:
error_msg = f"Error processing model {model.get('model_name')}: {str(e)}"
logger.error(error_msg, exc_info=True)
- download_progress['errors'].append(error_msg)
- download_progress['last_error'] = error_msg
+ self._progress['errors'].append(error_msg)
+ self._progress['last_error'] = error_msg
return False # Return False on exception
- @staticmethod
- async def _remove_failed_images_from_metadata(model_hash, model_name, failed_images, scanner):
+ async def _remove_failed_images_from_metadata(self, model_hash, model_name, failed_images, scanner):
"""Remove failed images from model metadata"""
try:
# Get current model data
@@ -762,4 +723,55 @@ class DownloadManager:
await scanner.update_single_model_cache(file_path, file_path, model_data)
except Exception as e:
- logger.error(f"Error removing failed images from metadata for {model_name}: {e}", exc_info=True)
\ No newline at end of file
+ logger.error(f"Error removing failed images from metadata for {model_name}: {e}", exc_info=True)
+
+ async def _broadcast_progress(
+ self,
+ *,
+ status: str | None = None,
+ extra: Dict[str, Any] | None = None,
+ ) -> None:
+ payload = self._build_progress_payload(status=status, extra=extra)
+ try:
+ await self._ws_manager.broadcast(payload)
+ except Exception as exc: # pragma: no cover - defensive logging
+ logger.warning("Failed to broadcast example image progress: %s", exc)
+
+ def _build_progress_payload(
+ self,
+ *,
+ status: str | None = None,
+ extra: Dict[str, Any] | None = None,
+ ) -> Dict[str, Any]:
+ payload: Dict[str, Any] = {
+ 'type': 'example_images_progress',
+ 'processed': self._progress['completed'],
+ 'total': self._progress['total'],
+ 'status': status or self._progress['status'],
+ 'current_model': self._progress['current_model'],
+ }
+
+ if self._progress['errors']:
+ payload['errors'] = list(self._progress['errors'])
+ if self._progress['last_error']:
+ payload['last_error'] = self._progress['last_error']
+
+ if extra:
+ payload.update(extra)
+
+ return payload
+
+
+_default_download_manager: DownloadManager | None = None
+
+
+def get_default_download_manager(ws_manager) -> DownloadManager:
+ """Return the singleton download manager used by default routes."""
+
+ global _default_download_manager
+ if (
+ _default_download_manager is None
+ or getattr(_default_download_manager, "_ws_manager", None) is not ws_manager
+ ):
+ _default_download_manager = DownloadManager(ws_manager=ws_manager)
+ return _default_download_manager
diff --git a/py/utils/example_images_metadata.py b/py/utils/example_images_metadata.py
index 496d5ad0..780eb43b 100644
--- a/py/utils/example_images_metadata.py
+++ b/py/utils/example_images_metadata.py
@@ -1,19 +1,39 @@
import logging
import os
import re
-from ..utils.metadata_manager import MetadataManager
-from ..utils.routes_common import ModelRouteUtils
+
+from ..recipes.constants import GEN_PARAM_KEYS
+from ..services.metadata_service import get_default_metadata_provider, get_metadata_provider
+from ..services.metadata_sync_service import MetadataSyncService
+from ..services.preview_asset_service import PreviewAssetService
+from ..services.settings_manager import settings
+from ..services.downloader import get_downloader
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
from ..utils.exif_utils import ExifUtils
-from ..recipes.constants import GEN_PARAM_KEYS
+from ..utils.metadata_manager import MetadataManager
logger = logging.getLogger(__name__)
+_preview_service = PreviewAssetService(
+ metadata_manager=MetadataManager,
+ downloader_factory=get_downloader,
+ exif_utils=ExifUtils,
+)
+
+_metadata_sync_service = MetadataSyncService(
+ metadata_manager=MetadataManager,
+ preview_service=_preview_service,
+ settings=settings,
+ default_metadata_provider_factory=get_default_metadata_provider,
+ metadata_provider_selector=get_metadata_provider,
+)
+
+
class MetadataUpdater:
"""Handles updating model metadata related to example images"""
@staticmethod
- async def refresh_model_metadata(model_hash, model_name, scanner_type, scanner):
+ async def refresh_model_metadata(model_hash, model_name, scanner_type, scanner, progress: dict | None = None):
"""Refresh model metadata from CivitAI
Args:
@@ -25,8 +45,6 @@ class MetadataUpdater:
Returns:
bool: True if metadata was successfully refreshed, False otherwise
"""
- from ..utils.example_images_download_manager import download_progress
-
try:
# Find the model in the scanner cache
cache = await scanner.get_cached_data()
@@ -47,31 +65,32 @@ class MetadataUpdater:
return False
# Track that we're refreshing this model
- download_progress['refreshed_models'].add(model_hash)
+ if progress is not None:
+ progress['refreshed_models'].add(model_hash)
- # Use ModelRouteUtils to refresh metadata
async def update_cache_func(old_path, new_path, metadata):
return await scanner.update_single_model_cache(old_path, new_path, metadata)
- success = await ModelRouteUtils.fetch_and_update_model(
- model_hash,
- file_path,
- model_data,
- update_cache_func
+ success, error = await _metadata_sync_service.fetch_and_update_model(
+ sha256=model_hash,
+ file_path=file_path,
+ model_data=model_data,
+ update_cache_func=update_cache_func,
)
if success:
logger.info(f"Successfully refreshed metadata for {model_name}")
return True
else:
- logger.warning(f"Failed to refresh metadata for {model_name}")
+ logger.warning(f"Failed to refresh metadata for {model_name}, {error}")
return False
-
+
except Exception as e:
error_msg = f"Error refreshing metadata for {model_name}: {str(e)}"
logger.error(error_msg, exc_info=True)
- download_progress['errors'].append(error_msg)
- download_progress['last_error'] = error_msg
+ if progress is not None:
+ progress['errors'].append(error_msg)
+ progress['last_error'] = error_msg
return False
@staticmethod
diff --git a/py/utils/example_images_processor.py b/py/utils/example_images_processor.py
index 9dba4e2c..7f108ef9 100644
--- a/py/utils/example_images_processor.py
+++ b/py/utils/example_images_processor.py
@@ -1,7 +1,6 @@
import logging
import os
import re
-import tempfile
import random
import string
from aiohttp import web
@@ -13,6 +12,14 @@ from ..utils.metadata_manager import MetadataManager
logger = logging.getLogger(__name__)
+
+class ExampleImagesImportError(RuntimeError):
+ """Base error for example image import operations."""
+
+
+class ExampleImagesValidationError(ExampleImagesImportError):
+ """Raised when input validation fails."""
+
class ExampleImagesProcessor:
"""Processes and manipulates example images"""
@@ -23,17 +30,60 @@ class ExampleImagesProcessor:
return ''.join(random.choice(chars) for _ in range(length))
@staticmethod
- def get_civitai_optimized_url(image_url):
- """Convert Civitai image URL to its optimized WebP version"""
+ def get_civitai_optimized_url(media_url):
+ """Convert Civitai media URL (image or video) to its optimized version"""
base_pattern = r'(https://image\.civitai\.com/[^/]+/[^/]+)'
- match = re.match(base_pattern, image_url)
+ match = re.match(base_pattern, media_url)
if match:
base_url = match.group(1)
- return f"{base_url}/optimized=true/image.webp"
+ return f"{base_url}/optimized=true"
- return image_url
+ return media_url
+ @staticmethod
+ def _get_file_extension_from_content_or_headers(content, headers, fallback_url=None):
+ """Determine file extension from content magic bytes or headers"""
+ # Check magic bytes for common formats
+ if content:
+ if content.startswith(b'\xFF\xD8\xFF'):
+ return '.jpg'
+ elif content.startswith(b'\x89PNG\r\n\x1A\n'):
+ return '.png'
+ elif content.startswith(b'GIF87a') or content.startswith(b'GIF89a'):
+ return '.gif'
+ elif content.startswith(b'RIFF') and b'WEBP' in content[:12]:
+ return '.webp'
+ elif content.startswith(b'\x00\x00\x00\x18ftypmp4') or content.startswith(b'\x00\x00\x00\x20ftypmp4'):
+ return '.mp4'
+ elif content.startswith(b'\x1A\x45\xDF\xA3'):
+ return '.webm'
+
+ # Check Content-Type header
+ if headers:
+ content_type = headers.get('content-type', '').lower()
+ type_map = {
+ 'image/jpeg': '.jpg',
+ 'image/png': '.png',
+ 'image/gif': '.gif',
+ 'image/webp': '.webp',
+ 'video/mp4': '.mp4',
+ 'video/webm': '.webm',
+ 'video/quicktime': '.mov'
+ }
+ if content_type in type_map:
+ return type_map[content_type]
+
+ # Fallback to URL extension if available
+ if fallback_url:
+ filename = os.path.basename(fallback_url.split('?')[0])
+ ext = os.path.splitext(filename)[1].lower()
+ if ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or ext in SUPPORTED_MEDIA_EXTENSIONS['videos']:
+ return ext
+
+ # Default fallback
+ return '.jpg'
+
@staticmethod
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, downloader):
"""Download images for a single model
@@ -48,45 +98,49 @@ class ExampleImagesProcessor:
if not image_url:
continue
- # Get image filename from URL
- image_filename = os.path.basename(image_url.split('?')[0])
- image_ext = os.path.splitext(image_filename)[1].lower()
-
- # Handle images and videos
- is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
- is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
-
- if not (is_image or is_video):
- logger.debug(f"Skipping unsupported file type: {image_filename}")
- continue
-
- # Use 0-based indexing instead of 1-based indexing
- save_filename = f"image_{i}{image_ext}"
-
- # If optimizing images and this is a Civitai image, use their pre-optimized WebP version
- if is_image and optimize and 'civitai.com' in image_url:
+ # Apply optimization for Civitai URLs if enabled
+ original_url = image_url
+ if optimize and 'civitai.com' in image_url:
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
- save_filename = f"image_{i}.webp"
- # Check if already downloaded
- save_path = os.path.join(model_dir, save_filename)
- if os.path.exists(save_path):
- logger.debug(f"File already exists: {save_path}")
- continue
-
- # Download the file
+ # Download the file first to determine the actual file type
try:
- logger.debug(f"Downloading {save_filename} for {model_name}")
+ logger.debug(f"Downloading media file {i} for {model_name}")
- # Download using the unified downloader
- success, content = await downloader.download_to_memory(
+ # Download using the unified downloader with headers
+ success, content, headers = await downloader.download_to_memory(
image_url,
- use_auth=False # Example images don't need auth
+ use_auth=False, # Example images don't need auth
+ return_headers=True
)
if success:
+ # Determine file extension from content or headers
+ media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
+ content, headers, original_url
+ )
+
+ # Check if the detected file type is supported
+ is_image = media_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
+ is_video = media_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
+
+ if not (is_image or is_video):
+ logger.debug(f"Skipping unsupported file type: {media_ext}")
+ continue
+
+ # Use 0-based indexing with the detected extension
+ save_filename = f"image_{i}{media_ext}"
+ save_path = os.path.join(model_dir, save_filename)
+
+ # Check if already downloaded
+ if os.path.exists(save_path):
+ logger.debug(f"File already exists: {save_path}")
+ continue
+
+ # Save the file
with open(save_path, 'wb') as f:
f.write(content)
+
elif "404" in str(content):
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
logger.warning(error_msg)
@@ -119,45 +173,49 @@ class ExampleImagesProcessor:
if not image_url:
continue
- # Get image filename from URL
- image_filename = os.path.basename(image_url.split('?')[0])
- image_ext = os.path.splitext(image_filename)[1].lower()
-
- # Handle images and videos
- is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
- is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
-
- if not (is_image or is_video):
- logger.debug(f"Skipping unsupported file type: {image_filename}")
- continue
-
- # Use 0-based indexing instead of 1-based indexing
- save_filename = f"image_{i}{image_ext}"
-
- # If optimizing images and this is a Civitai image, use their pre-optimized WebP version
- if is_image and optimize and 'civitai.com' in image_url:
+ # Apply optimization for Civitai URLs if enabled
+ original_url = image_url
+ if optimize and 'civitai.com' in image_url:
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
- save_filename = f"image_{i}.webp"
- # Check if already downloaded
- save_path = os.path.join(model_dir, save_filename)
- if os.path.exists(save_path):
- logger.debug(f"File already exists: {save_path}")
- continue
-
- # Download the file
+ # Download the file first to determine the actual file type
try:
- logger.debug(f"Downloading {save_filename} for {model_name}")
+ logger.debug(f"Downloading media file {i} for {model_name}")
- # Download using the unified downloader
- success, content = await downloader.download_to_memory(
+ # Download using the unified downloader with headers
+ success, content, headers = await downloader.download_to_memory(
image_url,
- use_auth=False # Example images don't need auth
+ use_auth=False, # Example images don't need auth
+ return_headers=True
)
if success:
+ # Determine file extension from content or headers
+ media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
+ content, headers, original_url
+ )
+
+ # Check if the detected file type is supported
+ is_image = media_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
+ is_video = media_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
+
+ if not (is_image or is_video):
+ logger.debug(f"Skipping unsupported file type: {media_ext}")
+ continue
+
+ # Use 0-based indexing with the detected extension
+ save_filename = f"image_{i}{media_ext}"
+ save_path = os.path.join(model_dir, save_filename)
+
+ # Check if already downloaded
+ if os.path.exists(save_path):
+ logger.debug(f"File already exists: {save_path}")
+ continue
+
+ # Save the file
with open(save_path, 'wb') as f:
f.write(content)
+
elif "404" in str(content):
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
logger.warning(error_msg)
@@ -248,90 +306,29 @@ class ExampleImagesProcessor:
return False
@staticmethod
- async def import_images(request):
- """
- Import local example images
-
- Accepts:
- - multipart/form-data form with model_hash and files fields
- or
- - JSON request with model_hash and file_paths
-
- Returns:
- - Success status and list of imported files
- """
+ async def import_images(model_hash: str, files_to_import: list[str]):
+ """Import local example images for a model."""
+
+ if not model_hash:
+ raise ExampleImagesValidationError('Missing model_hash parameter')
+
+ if not files_to_import:
+ raise ExampleImagesValidationError('No files provided to import')
+
try:
- model_hash = None
- files_to_import = []
- temp_files_to_cleanup = []
-
- # Check if it's a multipart form-data request (direct file upload)
- if request.content_type and 'multipart/form-data' in request.content_type:
- reader = await request.multipart()
-
- # First get model_hash
- field = await reader.next()
- if field.name == 'model_hash':
- model_hash = await field.text()
-
- # Then process all files
- while True:
- field = await reader.next()
- if field is None:
- break
-
- if field.name == 'files':
- # Create a temporary file with appropriate suffix for type detection
- file_name = field.filename
- file_ext = os.path.splitext(file_name)[1].lower()
-
- with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as tmp_file:
- temp_path = tmp_file.name
- temp_files_to_cleanup.append(temp_path) # Track for cleanup
-
- # Write chunks to the temporary file
- while True:
- chunk = await field.read_chunk()
- if not chunk:
- break
- tmp_file.write(chunk)
-
- # Add to the list of files to process
- files_to_import.append(temp_path)
- else:
- # Parse JSON request (legacy method using file paths)
- data = await request.json()
- model_hash = data.get('model_hash')
- files_to_import = data.get('file_paths', [])
-
- if not model_hash:
- return web.json_response({
- 'success': False,
- 'error': 'Missing model_hash parameter'
- }, status=400)
-
- if not files_to_import:
- return web.json_response({
- 'success': False,
- 'error': 'No files provided to import'
- }, status=400)
-
# Get example images path
example_images_path = settings.get('example_images_path')
if not example_images_path:
- return web.json_response({
- 'success': False,
- 'error': 'No example images path configured'
- }, status=400)
-
+ raise ExampleImagesValidationError('No example images path configured')
+
# Find the model and get current metadata
lora_scanner = await ServiceRegistry.get_lora_scanner()
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
-
+
model_data = None
scanner = None
-
+
# Check both scanners to find the model
for scan_obj in [lora_scanner, checkpoint_scanner, embedding_scanner]:
cache = await scan_obj.get_cached_data()
@@ -342,21 +339,20 @@ class ExampleImagesProcessor:
break
if model_data:
break
-
+
if not model_data:
- return web.json_response({
- 'success': False,
- 'error': f"Model with hash {model_hash} not found in cache"
- }, status=404)
-
+ raise ExampleImagesImportError(
+ f"Model with hash {model_hash} not found in cache"
+ )
+
# Create model folder
model_folder = os.path.join(example_images_path, model_hash)
os.makedirs(model_folder, exist_ok=True)
-
+
imported_files = []
errors = []
newly_imported_paths = []
-
+
# Process each file path
for file_path in files_to_import:
try:
@@ -364,26 +360,26 @@ class ExampleImagesProcessor:
if not os.path.isfile(file_path):
errors.append(f"File not found: {file_path}")
continue
-
+
# Check if file type is supported
file_ext = os.path.splitext(file_path)[1].lower()
- if not (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
+ if not (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
errors.append(f"Unsupported file type: {file_path}")
continue
-
+
# Generate new filename using short ID instead of UUID
short_id = ExampleImagesProcessor.generate_short_id()
new_filename = f"custom_{short_id}{file_ext}"
-
+
dest_path = os.path.join(model_folder, new_filename)
-
+
# Copy the file
import shutil
shutil.copy2(file_path, dest_path)
# Store both the dest_path and the short_id
newly_imported_paths.append((dest_path, short_id))
-
+
# Add to imported files list
imported_files.append({
'name': new_filename,
@@ -393,39 +389,31 @@ class ExampleImagesProcessor:
})
except Exception as e:
errors.append(f"Error importing {file_path}: {str(e)}")
-
+
# Update metadata with new example images
regular_images, custom_images = await MetadataUpdater.update_metadata_after_import(
- model_hash,
+ model_hash,
model_data,
scanner,
newly_imported_paths
)
-
- return web.json_response({
+
+ return {
'success': len(imported_files) > 0,
- 'message': f'Successfully imported {len(imported_files)} files' +
+ 'message': f'Successfully imported {len(imported_files)} files' +
(f' with {len(errors)} errors' if errors else ''),
'files': imported_files,
'errors': errors,
'regular_images': regular_images,
'custom_images': custom_images,
"model_file_path": model_data.get('file_path', ''),
- })
-
+ }
+
+ except ExampleImagesImportError:
+ raise
except Exception as e:
logger.error(f"Failed to import example images: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
- finally:
- # Clean up temporary files
- for temp_file in temp_files_to_cleanup:
- try:
- os.remove(temp_file)
- except Exception as e:
- logger.error(f"Failed to remove temporary file {temp_file}: {e}")
+ raise ExampleImagesImportError(str(e)) from e
@staticmethod
async def delete_custom_image(request):
@@ -569,4 +557,7 @@ class ExampleImagesProcessor:
return web.json_response({
'success': False,
'error': str(e)
- }, status=500)
\ No newline at end of file
+ }, status=500)
+
+
+
\ No newline at end of file
diff --git a/py/utils/exif_utils.py b/py/utils/exif_utils.py
index 1c5a9f80..ff96703c 100644
--- a/py/utils/exif_utils.py
+++ b/py/utils/exif_utils.py
@@ -4,7 +4,7 @@ import logging
from typing import Optional
from io import BytesIO
import os
-from PIL import Image
+from PIL import Image, PngImagePlugin
logger = logging.getLogger(__name__)
@@ -86,9 +86,10 @@ class ExifUtils:
# For PNG, try to update parameters directly
if img_format == 'PNG':
- # We'll save with parameters in the PNG info
- info_dict = {'parameters': metadata}
- img.save(image_path, format='PNG', pnginfo=info_dict)
+ # Use PngInfo instead of plain dictionary
+ png_info = PngImagePlugin.PngInfo()
+ png_info.add_text("parameters", metadata)
+ img.save(image_path, format='PNG', pnginfo=png_info)
return image_path
# For WebP format, use PIL's exif parameter directly
diff --git a/py/utils/models.py b/py/utils/models.py
index 9e1dc737..159146d5 100644
--- a/py/utils/models.py
+++ b/py/utils/models.py
@@ -24,6 +24,8 @@ class BaseModelMetadata:
civitai_deleted: bool = False # Whether deleted from Civitai
favorite: bool = False # Whether the model is a favorite
exclude: bool = False # Whether to exclude this model from the cache
+ db_checked: bool = False # Whether checked in archive DB
+ last_checked_at: float = 0 # Last checked timestamp
_unknown_fields: Dict[str, Any] = field(default_factory=dict, repr=False, compare=False) # Store unknown fields
def __post_init__(self):
diff --git a/py/utils/routes_common.py b/py/utils/routes_common.py
deleted file mode 100644
index 80765f7b..00000000
--- a/py/utils/routes_common.py
+++ /dev/null
@@ -1,1172 +0,0 @@
-import os
-import json
-import logging
-from typing import Dict, List, Callable, Awaitable
-from aiohttp import web
-
-from .model_utils import determine_base_model
-from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
-from ..config import config
-from ..services.service_registry import ServiceRegistry
-from ..services.downloader import get_downloader
-from ..utils.exif_utils import ExifUtils
-from ..utils.metadata_manager import MetadataManager
-from ..services.websocket_manager import ws_manager
-from ..services.metadata_service import get_default_metadata_provider, get_metadata_provider
-from ..services.settings_manager import settings
-
-logger = logging.getLogger(__name__)
-
-
-class ModelRouteUtils:
- """Shared utilities for model routes (LoRAs, Checkpoints, etc.)"""
-
- @staticmethod
- async def load_local_metadata(metadata_path: str) -> Dict:
- """Load local metadata file"""
- if os.path.exists(metadata_path):
- try:
- with open(metadata_path, 'r', encoding='utf-8') as f:
- return json.load(f)
- except Exception as e:
- logger.error(f"Error loading metadata from {metadata_path}: {e}")
- return {}
-
- @staticmethod
- async def handle_not_found_on_civitai(metadata_path: str, local_metadata: Dict) -> None:
- """Handle case when model is not found on CivitAI"""
- local_metadata['from_civitai'] = False
- await MetadataManager.save_metadata(metadata_path, local_metadata)
-
- @staticmethod
- def is_civitai_api_metadata(meta: dict) -> bool:
- """
- Determine if the given civitai metadata is from the civitai API.
- Returns True if both 'files' and 'images' exist and are non-empty,
- and the 'source' is not 'archive_db'.
- """
- if not isinstance(meta, dict):
- return False
- files = meta.get('files')
- images = meta.get('images')
- source = meta.get('source')
- return bool(files) and bool(images) and source != 'archive_db'
-
- @staticmethod
- async def update_model_metadata(metadata_path: str, local_metadata: Dict,
- civitai_metadata: Dict, metadata_provider=None) -> None:
- """Update local metadata with CivitAI data"""
- # Save existing trainedWords and customImages if they exist
- existing_civitai = local_metadata.get('civitai') or {} # Use empty dict if None
-
- # Check if we should skip the update to avoid overwriting richer data
- if civitai_metadata.get('source') == 'archive_db' and ModelRouteUtils.is_civitai_api_metadata(existing_civitai):
- logger.info(f"Skip civitai update for {local_metadata.get('model_name', '')} ({existing_civitai.get('name', '')})")
- else:
- # Create a new civitai metadata by updating existing with new
- merged_civitai = existing_civitai.copy()
- merged_civitai.update(civitai_metadata)
-
- if civitai_metadata.get('source') == 'archive_db':
- model_name = civitai_metadata.get('model', {}).get('name', '')
- version_name = civitai_metadata.get('name', '')
- logger.info(f"Recovered metadata from archive_db for deleted model: {model_name} ({version_name})")
-
- # Special handling for trainedWords - ensure we don't lose any existing trained words
- if 'trainedWords' in existing_civitai:
- existing_trained_words = existing_civitai.get('trainedWords', [])
- new_trained_words = civitai_metadata.get('trainedWords', [])
- # Use a set to combine words without duplicates, then convert back to list
- merged_trained_words = list(set(existing_trained_words + new_trained_words))
- merged_civitai['trainedWords'] = merged_trained_words
-
- # Update local metadata with merged civitai data
- local_metadata['civitai'] = merged_civitai
- local_metadata['from_civitai'] = True
-
- # Update model-related metadata from civitai_metadata.model
- if 'model' in civitai_metadata and civitai_metadata['model']:
- model_data = civitai_metadata['model']
-
- # Update model name if available and not already set
- if model_data.get('name'):
- local_metadata['model_name'] = model_data['name']
-
- # Update modelDescription if missing or empty in local_metadata
- if not local_metadata.get('modelDescription') and model_data.get('description'):
- local_metadata['modelDescription'] = model_data['description']
-
- # Update tags if missing or empty in local_metadata
- if not local_metadata.get('tags') and model_data.get('tags'):
- local_metadata['tags'] = model_data['tags']
-
- # Update creator in civitai metadata if missing
- if model_data.get('creator') and not local_metadata.get('civitai', {}).get('creator'):
- if 'civitai' not in local_metadata:
- local_metadata['civitai'] = {}
- local_metadata['civitai']['creator'] = model_data['creator']
-
- # Update base model
- local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
-
- # Update preview if needed
- if not local_metadata.get('preview_url') or not os.path.exists(local_metadata['preview_url']):
- first_preview = next((img for img in civitai_metadata.get('images', [])), None)
- if (first_preview):
- # Determine if content is video or image
- is_video = first_preview['type'] == 'video'
-
- if is_video:
- # For videos use .mp4 extension
- preview_ext = '.mp4'
- else:
- # For images use .webp extension
- preview_ext = '.webp'
-
- base_name = os.path.splitext(os.path.splitext(os.path.basename(metadata_path))[0])[0]
- preview_filename = base_name + preview_ext
- preview_path = os.path.join(os.path.dirname(metadata_path), preview_filename)
-
- if is_video:
- # Download video as is using downloader
- downloader = await get_downloader()
- success, result = await downloader.download_file(
- first_preview['url'],
- preview_path,
- use_auth=False
- )
- if success:
- local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
- local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
- else:
- # For images, download and then optimize to WebP using downloader
- downloader = await get_downloader()
- success, content = await downloader.download_to_memory(
- first_preview['url'],
- use_auth=False
- )
- if success:
- try:
- # Optimize and convert to WebP
- optimized_data, _ = ExifUtils.optimize_image(
- image_data=content, # Use downloaded content directly
- target_width=CARD_PREVIEW_WIDTH,
- format='webp',
- quality=85,
- preserve_metadata=False
- )
-
- # Save the optimized WebP image
- with open(preview_path, 'wb') as f:
- f.write(optimized_data)
-
- # Update metadata
- local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
- local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
-
- except Exception as e:
- logger.error(f"Error optimizing preview image: {e}")
- # If optimization fails, save the original content
- try:
- with open(preview_path, 'wb') as f:
- f.write(content)
- local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
- local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
- except Exception as save_error:
- logger.error(f"Error saving preview image: {save_error}")
-
- # Save updated metadata
- await MetadataManager.save_metadata(metadata_path, local_metadata)
-
- @staticmethod
- async def fetch_and_update_model(
- sha256: str,
- file_path: str,
- model_data: dict,
- update_cache_func: Callable[[str, str, Dict], Awaitable[bool]]
- ) -> bool:
- """Fetch and update metadata for a single model
-
- Args:
- sha256: SHA256 hash of the model file
- file_path: Path to the model file
- model_data: The model object in cache to update
- update_cache_func: Function to update the cache with new metadata
-
- Returns:
- bool: True if successful, False otherwise
- """
- try:
- # Validate input parameters
- if not isinstance(model_data, dict):
- logger.error(f"Invalid model_data type: {type(model_data)}")
- return False
-
- metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
-
- # Check if model metadata exists
- local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
-
- if model_data.get('from_civitai') is False:
- if not settings.get('enable_metadata_archive_db', False):
- return False
- # Likely deleted from CivitAI, use archive_db if available
- metadata_provider = await get_metadata_provider('sqlite')
- else:
- metadata_provider = await get_default_metadata_provider()
-
- civitai_metadata = await metadata_provider.get_model_by_hash(sha256)
- if not civitai_metadata:
- # Mark as not from CivitAI if not found
- local_metadata['from_civitai'] = False
- model_data['from_civitai'] = False
- await MetadataManager.save_metadata(file_path, local_metadata)
- return False
-
- # Update metadata
- await ModelRouteUtils.update_model_metadata(
- metadata_path,
- local_metadata,
- civitai_metadata,
- metadata_provider
- )
-
- # Update cache object directly using safe .get() method
- update_dict = {
- 'model_name': local_metadata.get('model_name'),
- 'preview_url': local_metadata.get('preview_url'),
- 'from_civitai': True,
- 'civitai': civitai_metadata
- }
- model_data.update(update_dict)
-
- # Update cache using the provided function
- await update_cache_func(file_path, file_path, local_metadata)
-
- return True
-
- except KeyError as e:
- logger.error(f"Error fetching CivitAI data - Missing key: {e} in model_data={model_data}")
- return False
- except Exception as e:
- logger.error(f"Error fetching CivitAI data: {str(e)}", exc_info=True) # Include stack trace
- return False
-
- @staticmethod
- def filter_civitai_data(data: Dict, minimal: bool = False) -> Dict:
- """Filter relevant fields from CivitAI data"""
- if not data:
- return {}
-
- fields = ["id", "modelId", "name", "trainedWords"] if minimal else [
- "id", "modelId", "name", "createdAt", "updatedAt",
- "publishedAt", "trainedWords", "baseModel", "description",
- "model", "images", "customImages", "creator"
- ]
- return {k: data[k] for k in fields if k in data}
-
- @staticmethod
- async def delete_model_files(target_dir: str, file_name: str) -> List[str]:
- """Delete model and associated files
-
- Args:
- target_dir: Directory containing the model files
- file_name: Base name of the model file without extension
-
- Returns:
- List of deleted file paths
- """
- patterns = [
- f"{file_name}.safetensors", # Required
- f"{file_name}.metadata.json",
- ]
-
- # Add all preview file extensions
- for ext in PREVIEW_EXTENSIONS:
- patterns.append(f"{file_name}{ext}")
-
- deleted = []
- main_file = patterns[0]
- main_path = os.path.join(target_dir, main_file).replace(os.sep, '/')
-
- if os.path.exists(main_path):
- # Delete file
- os.remove(main_path)
- deleted.append(main_path)
- else:
- logger.warning(f"Model file not found: {main_file}")
-
- # Delete optional files
- for pattern in patterns[1:]:
- path = os.path.join(target_dir, pattern)
- if os.path.exists(path):
- try:
- os.remove(path)
- deleted.append(pattern)
- except Exception as e:
- logger.warning(f"Failed to delete {pattern}: {e}")
-
- return deleted
-
- @staticmethod
- def get_multipart_ext(filename):
- """Get extension that may have multiple parts like .metadata.json or .metadata.json.bak"""
- parts = filename.split(".")
- if len(parts) == 3: # If contains 2-part extension
- return "." + ".".join(parts[-2:]) # Take the last two parts, like ".metadata.json"
- elif len(parts) >= 4: # If contains 3-part or more extensions
- return "." + ".".join(parts[-3:]) # Take the last three parts, like ".metadata.json.bak"
- return os.path.splitext(filename)[1] # Otherwise take the regular extension, like ".safetensors"
-
- # New common endpoint handlers
-
- @staticmethod
- async def handle_delete_model(request: web.Request, scanner) -> web.Response:
- """Handle model deletion request
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance with cache management methods
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_path = data.get('file_path')
- if not file_path:
- return web.Response(text='Model path is required', status=400)
-
- target_dir = os.path.dirname(file_path)
- file_name = os.path.splitext(os.path.basename(file_path))[0]
-
- deleted_files = await ModelRouteUtils.delete_model_files(
- target_dir,
- file_name
- )
-
- # Remove from cache
- cache = await scanner.get_cached_data()
- cache.raw_data = [item for item in cache.raw_data if item['file_path'] != file_path]
- await cache.resort()
-
- # Update hash index if available
- if hasattr(scanner, '_hash_index') and scanner._hash_index:
- scanner._hash_index.remove_by_path(file_path)
-
- return web.json_response({
- 'success': True,
- 'deleted_files': deleted_files
- })
-
- except Exception as e:
- logger.error(f"Error deleting model: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
-
- @staticmethod
- async def handle_fetch_civitai(request: web.Request, scanner) -> web.Response:
- """Handle CivitAI metadata fetch request
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance with cache management methods
-
- Returns:
- web.Response: The HTTP response with metadata on success
- """
- try:
- data = await request.json()
- metadata_path = os.path.splitext(data['file_path'])[0] + '.metadata.json'
-
- # Check if model metadata exists
- local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
- if not local_metadata or not local_metadata.get('sha256'):
- return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
-
- # Get metadata provider and fetch from unified provider
- metadata_provider = await get_default_metadata_provider()
-
- # Fetch and update metadata
- civitai_metadata = await metadata_provider.get_model_by_hash(local_metadata["sha256"])
- if not civitai_metadata:
- await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
- return web.json_response({"success": False, "error": "Not found on CivitAI"}, status=404)
-
- await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, metadata_provider)
-
- # Update the cache
- await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
-
- # Return the updated metadata along with success status
- return web.json_response({"success": True, "metadata": local_metadata})
-
- except Exception as e:
- logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
- return web.json_response({"success": False, "error": str(e)}, status=500)
-
- @staticmethod
- async def handle_replace_preview(request: web.Request, scanner) -> web.Response:
- """Handle preview image replacement request"""
- try:
- reader = await request.multipart()
-
- # Read preview file data
- field = await reader.next()
- if field.name != 'preview_file':
- raise ValueError("Expected 'preview_file' field")
- content_type = field.headers.get('Content-Type', 'image/png')
-
- # Try to get original filename if available
- content_disposition = field.headers.get('Content-Disposition', '')
- original_filename = None
- import re
- filename_match = re.search(r'filename="(.*?)"', content_disposition)
- if filename_match:
- original_filename = filename_match.group(1)
-
- preview_data = await field.read()
-
- # Read model path
- field = await reader.next()
- if field.name != 'model_path':
- raise ValueError("Expected 'model_path' field")
- model_path = (await field.read()).decode()
-
- # Read NSFW level
- nsfw_level = 0 # Default to 0 (unknown)
- field = await reader.next()
- if field and field.name == 'nsfw_level':
- try:
- nsfw_level = int((await field.read()).decode())
- except (ValueError, TypeError):
- logger.warning("Invalid NSFW level format, using default 0")
-
- # Save preview file
- base_name = os.path.splitext(os.path.basename(model_path))[0]
- folder = os.path.dirname(model_path)
-
- # Determine format based on content type and original filename
- is_gif = False
- if original_filename and original_filename.lower().endswith('.gif'):
- is_gif = True
- elif content_type.lower() == 'image/gif':
- is_gif = True
-
- # Determine if content is video or image and handle specific formats
- if content_type.startswith('video/'):
- # For videos, preserve original format if possible
- if original_filename:
- extension = os.path.splitext(original_filename)[1].lower()
- # Default to .mp4 if no extension or unrecognized
- if not extension or extension not in ['.mp4', '.webm', '.mov', '.avi']:
- extension = '.mp4'
- else:
- # Try to determine extension from content type
- if 'webm' in content_type:
- extension = '.webm'
- else:
- extension = '.mp4' # Default
- optimized_data = preview_data # No optimization for videos
- elif is_gif:
- # Preserve GIF format without optimization
- extension = '.gif'
- optimized_data = preview_data
- else:
- # For other images, optimize and convert to WebP
- optimized_data, _ = ExifUtils.optimize_image(
- image_data=preview_data,
- target_width=CARD_PREVIEW_WIDTH,
- format='webp',
- quality=85,
- preserve_metadata=False
- )
- extension = '.webp'
-
- # Delete any existing preview files for this model
- for ext in PREVIEW_EXTENSIONS:
- existing_preview = os.path.join(folder, base_name + ext)
- if os.path.exists(existing_preview):
- try:
- os.remove(existing_preview)
- logger.debug(f"Deleted existing preview: {existing_preview}")
- except Exception as e:
- logger.warning(f"Failed to delete existing preview {existing_preview}: {e}")
-
- preview_path = os.path.join(folder, base_name + extension).replace(os.sep, '/')
-
- with open(preview_path, 'wb') as f:
- f.write(optimized_data)
-
- # Update preview path and NSFW level in metadata
- metadata_path = os.path.splitext(model_path)[0] + '.metadata.json'
- if os.path.exists(metadata_path):
- try:
- with open(metadata_path, 'r', encoding='utf-8') as f:
- metadata = json.load(f)
-
- # Update preview_url and preview_nsfw_level in the metadata dict
- metadata['preview_url'] = preview_path
- metadata['preview_nsfw_level'] = nsfw_level
-
- await MetadataManager.save_metadata(model_path, metadata)
- except Exception as e:
- logger.error(f"Error updating metadata: {e}")
-
- # Update preview URL in scanner cache
- await scanner.update_preview_in_cache(model_path, preview_path, nsfw_level)
-
- return web.json_response({
- "success": True,
- "preview_url": config.get_preview_static_url(preview_path),
- "preview_nsfw_level": nsfw_level
- })
-
- except Exception as e:
- logger.error(f"Error replacing preview: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
-
- @staticmethod
- async def handle_exclude_model(request: web.Request, scanner) -> web.Response:
- """Handle model exclusion request
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance with cache management methods
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_path = data.get('file_path')
- if not file_path:
- return web.Response(text='Model path is required', status=400)
-
- # Update metadata to mark as excluded
- metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
- metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
- metadata['exclude'] = True
-
- # Save updated metadata
- await MetadataManager.save_metadata(file_path, metadata)
-
- # Update cache
- cache = await scanner.get_cached_data()
-
- # Find and remove model from cache
- model_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
- if model_to_remove:
- # Update tags count
- for tag in model_to_remove.get('tags', []):
- if tag in scanner._tags_count:
- scanner._tags_count[tag] = max(0, scanner._tags_count[tag] - 1)
- if scanner._tags_count[tag] == 0:
- del scanner._tags_count[tag]
-
- # Remove from hash index if available
- if hasattr(scanner, '_hash_index') and scanner._hash_index:
- scanner._hash_index.remove_by_path(file_path)
-
- # Remove from cache data
- cache.raw_data = [item for item in cache.raw_data if item['file_path'] != file_path]
- await cache.resort()
-
- # Add to excluded models list
- scanner._excluded_models.append(file_path)
-
- return web.json_response({
- 'success': True,
- 'message': f"Model {os.path.basename(file_path)} excluded"
- })
-
- except Exception as e:
- logger.error(f"Error excluding model: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
-
- @staticmethod
- async def handle_download_model(request: web.Request) -> web.Response:
- """Handle model download request"""
- try:
- download_manager = await ServiceRegistry.get_download_manager()
- data = await request.json()
-
- # Get or generate a download ID
- download_id = data.get('download_id', ws_manager.generate_download_id())
-
- # Create progress callback with download ID
- async def progress_callback(progress):
- await ws_manager.broadcast_download_progress(download_id, {
- 'status': 'progress',
- 'progress': progress,
- 'download_id': download_id
- })
-
- # Check which identifier is provided and convert to int
- model_id = None
- model_version_id = None
-
- if data.get('model_id'):
- try:
- model_id = int(data.get('model_id'))
- except (TypeError, ValueError):
- return web.json_response({
- 'success': False,
- 'error': "Invalid model_id: Must be an integer"
- }, status=400)
-
- # Convert model_version_id to int if provided
- if data.get('model_version_id'):
- try:
- model_version_id = int(data.get('model_version_id'))
- except (TypeError, ValueError):
- return web.json_response({
- 'success': False,
- 'error': "Invalid model_version_id: Must be an integer"
- }, status=400)
-
- # At least one identifier is required
- if not model_id and not model_version_id:
- return web.json_response({
- 'success': False,
- 'error': "Missing required parameter: Please provide either 'model_id' or 'model_version_id'"
- }, status=400)
-
- use_default_paths = data.get('use_default_paths', False)
-
- # Pass the download_id to download_from_civitai
- result = await download_manager.download_from_civitai(
- model_id=model_id,
- model_version_id=model_version_id,
- save_dir=data.get('model_root'),
- relative_path=data.get('relative_path', ''),
- use_default_paths=use_default_paths,
- progress_callback=progress_callback,
- download_id=download_id # Pass download_id explicitly
- )
-
- # Include download_id in the response
- result['download_id'] = download_id
-
- if not result.get('success', False):
- error_message = result.get('error', 'Unknown error')
-
- return web.json_response({
- 'success': False,
- 'error': error_message,
- 'download_id': download_id
- }, status=500)
-
- return web.json_response(result)
-
- except Exception as e:
- error_message = str(e)
-
- # Check if this might be an early access error
- if '401' in error_message:
- logger.warning(f"Early access error (401): {error_message}")
- return web.json_response({
- 'success': False,
- 'error': "Early Access Restriction: This model requires purchase. Please buy early access on Civitai.com."
- }, status=401)
-
- logger.error(f"Error downloading model: {error_message}")
- return web.json_response({
- 'success': False,
- 'error': error_message
- }, status=500)
-
- @staticmethod
- async def handle_cancel_download(request: web.Request) -> web.Response:
- """Handle cancellation of a download task
-
- Args:
- request: The aiohttp request
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- download_manager = await ServiceRegistry.get_download_manager()
- download_id = request.match_info.get('download_id')
- if not download_id:
- return web.json_response({
- 'success': False,
- 'error': 'Download ID is required'
- }, status=400)
-
- result = await download_manager.cancel_download(download_id)
-
- # Notify clients about cancellation via WebSocket
- await ws_manager.broadcast_download_progress(download_id, {
- 'status': 'cancelled',
- 'progress': 0,
- 'download_id': download_id,
- 'message': 'Download cancelled by user'
- })
-
- return web.json_response(result)
-
- except Exception as e:
- logger.error(f"Error cancelling download: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- @staticmethod
- async def handle_list_downloads(request: web.Request) -> web.Response:
- """Get list of active downloads
-
- Args:
- request: The aiohttp request
-
- Returns:
- web.Response: The HTTP response with list of downloads
- """
- try:
- download_manager = await ServiceRegistry.get_download_manager()
- result = await download_manager.get_active_downloads()
- return web.json_response(result)
- except Exception as e:
- logger.error(f"Error listing downloads: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- @staticmethod
- async def handle_bulk_delete_models(request: web.Request, scanner) -> web.Response:
- """Handle bulk deletion of models
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance with cache management methods
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_paths = data.get('file_paths', [])
-
- if not file_paths:
- return web.json_response({
- 'success': False,
- 'error': 'No file paths provided for deletion'
- }, status=400)
-
- # Use the scanner's bulk delete method to handle all cache and file operations
- result = await scanner.bulk_delete_models(file_paths)
-
- return web.json_response({
- 'success': result.get('success', False),
- 'total_deleted': result.get('total_deleted', 0),
- 'total_attempted': result.get('total_attempted', len(file_paths)),
- 'results': result.get('results', [])
- })
-
- except Exception as e:
- logger.error(f"Error in bulk delete: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- @staticmethod
- async def handle_relink_civitai(request: web.Request, scanner) -> web.Response:
- """Handle CivitAI metadata re-linking request by model ID and/or version ID
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance with cache management methods
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_path = data.get('file_path')
- model_id = int(data.get('model_id'))
- model_version_id = None
- if data.get('model_version_id'):
- model_version_id = int(data.get('model_version_id'))
-
- if not file_path or not model_id:
- return web.json_response({"success": False, "error": "Both file_path and model_id are required"}, status=400)
-
- metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
-
- # Check if model metadata exists
- local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
-
- # Get metadata provider and fetch metadata using get_model_version which includes more comprehensive data
- metadata_provider = await get_default_metadata_provider()
- civitai_metadata = await metadata_provider.get_model_version(model_id, model_version_id)
- if not civitai_metadata:
- error_msg = f"Model version not found on CivitAI for ID: {model_id}"
- if model_version_id:
- error_msg += f" with version: {model_version_id}"
- return web.json_response({"success": False, "error": error_msg}, status=404)
-
- # Try to find the primary model file to get the SHA256 hash
- primary_model_file = None
- for file in civitai_metadata.get('files', []):
- if file.get('primary', False) and file.get('type') == 'Model':
- primary_model_file = file
- break
-
- # Update the SHA256 hash in local metadata if available
- if primary_model_file and primary_model_file.get('hashes', {}).get('SHA256'):
- local_metadata['sha256'] = primary_model_file['hashes']['SHA256'].lower()
-
- # Update metadata with CivitAI information
- await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, metadata_provider)
-
- # Update the cache
- await scanner.update_single_model_cache(file_path, file_path, local_metadata)
-
- return web.json_response({
- "success": True,
- "message": f"Model successfully re-linked to Civitai model {model_id}" +
- (f" version {model_version_id}" if model_version_id else ""),
- "hash": local_metadata.get('sha256', '')
- })
-
- except Exception as e:
- logger.error(f"Error re-linking to CivitAI: {e}", exc_info=True)
- return web.json_response({"success": False, "error": str(e)}, status=500)
-
- @staticmethod
- async def handle_verify_duplicates(request: web.Request, scanner) -> web.Response:
- """Handle verification of duplicate model hashes
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance with cache management methods
-
- Returns:
- web.Response: The HTTP response with verification results
- """
- try:
- data = await request.json()
- file_paths = data.get('file_paths', [])
-
- if not file_paths:
- return web.json_response({
- 'success': False,
- 'error': 'No file paths provided for verification'
- }, status=400)
-
- # Results tracking
- results = {
- 'verified_as_duplicates': True, # Start true, set to false if any mismatch
- 'mismatched_files': [],
- 'new_hash_map': {}
- }
-
- # Get expected hash from the first file's metadata
- expected_hash = None
- first_metadata_path = os.path.splitext(file_paths[0])[0] + '.metadata.json'
- first_metadata = await ModelRouteUtils.load_local_metadata(first_metadata_path)
- if first_metadata and 'sha256' in first_metadata:
- expected_hash = first_metadata['sha256'].lower()
-
- # Process each file
- for file_path in file_paths:
- # Skip files that don't exist
- if not os.path.exists(file_path):
- continue
-
- # Calculate actual hash
- try:
- from .file_utils import calculate_sha256
- actual_hash = await calculate_sha256(file_path)
-
- # Get metadata
- metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
- metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
-
- # Compare hashes
- stored_hash = metadata.get('sha256', '').lower();
-
- # Set expected hash from first file if not yet set
- if not expected_hash:
- expected_hash = stored_hash;
-
- # Check if hash matches expected hash
- if actual_hash != expected_hash:
- results['verified_as_duplicates'] = False
- results['mismatched_files'].append(file_path)
- results['new_hash_map'][file_path] = actual_hash
-
- # Check if stored hash needs updating
- if actual_hash != stored_hash:
- # Update metadata with actual hash
- metadata['sha256'] = actual_hash
-
- # Save updated metadata
- await MetadataManager.save_metadata(file_path, metadata)
-
- # Update cache
- await scanner.update_single_model_cache(file_path, file_path, metadata)
- except Exception as e:
- logger.error(f"Error verifying hash for {file_path}: {e}")
- results['mismatched_files'].append(file_path)
- results['new_hash_map'][file_path] = "error_calculating_hash"
- results['verified_as_duplicates'] = False
-
- return web.json_response({
- 'success': True,
- **results
- })
-
- except Exception as e:
- logger.error(f"Error verifying duplicate models: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- @staticmethod
- async def handle_rename_model(request: web.Request, scanner) -> web.Response:
- """Handle renaming a model file and its associated files
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_path = data.get('file_path')
- new_file_name = data.get('new_file_name')
-
- if not file_path or not new_file_name:
- return web.json_response({
- 'success': False,
- 'error': 'File path and new file name are required'
- }, status=400)
-
- # Validate the new file name (no path separators or invalid characters)
- invalid_chars = ['/', '\\', ':', '*', '?', '"', '<', '>', '|']
- if any(char in new_file_name for char in invalid_chars):
- return web.json_response({
- 'success': False,
- 'error': 'Invalid characters in file name'
- }, status=400)
-
- # Get the directory and current file name
- target_dir = os.path.dirname(file_path)
- old_file_name = os.path.splitext(os.path.basename(file_path))[0]
-
- # Check if the target file already exists
- new_file_path = os.path.join(target_dir, f"{new_file_name}.safetensors").replace(os.sep, '/')
- if os.path.exists(new_file_path):
- return web.json_response({
- 'success': False,
- 'error': 'A file with this name already exists'
- }, status=400)
-
- # Define the patterns for associated files
- patterns = [
- f"{old_file_name}.safetensors", # Required
- f"{old_file_name}.metadata.json",
- f"{old_file_name}.metadata.json.bak",
- ]
-
- # Add all preview file extensions
- for ext in PREVIEW_EXTENSIONS:
- patterns.append(f"{old_file_name}{ext}")
-
- # Find all matching files
- existing_files = []
- for pattern in patterns:
- path = os.path.join(target_dir, pattern)
- if os.path.exists(path):
- existing_files.append((path, pattern))
-
- # Get the hash from the main file to update hash index
- hash_value = None
- metadata = None
- metadata_path = os.path.join(target_dir, f"{old_file_name}.metadata.json")
-
- if os.path.exists(metadata_path):
- metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
- hash_value = metadata.get('sha256')
- logger.info(f"hash_value: {hash_value}, metadata_path: {metadata_path}, metadata: {metadata}")
- # Rename all files
- renamed_files = []
- new_metadata_path = None
- new_preview = None
-
- for old_path, pattern in existing_files:
- # Get the file extension like .safetensors or .metadata.json
- ext = ModelRouteUtils.get_multipart_ext(pattern)
-
- # Create the new path
- new_path = os.path.join(target_dir, f"{new_file_name}{ext}").replace(os.sep, '/')
-
- # Rename the file
- os.rename(old_path, new_path)
- renamed_files.append(new_path)
-
- # Keep track of metadata path for later update
- if ext == '.metadata.json':
- new_metadata_path = new_path
-
- # Update the metadata file with new file name and paths
- if new_metadata_path and metadata:
- # Update file_name, file_path and preview_url in metadata
- metadata['file_name'] = new_file_name
- metadata['file_path'] = new_file_path
-
- # Update preview_url if it exists
- if 'preview_url' in metadata and metadata['preview_url']:
- old_preview = metadata['preview_url']
- ext = ModelRouteUtils.get_multipart_ext(old_preview)
- new_preview = os.path.join(target_dir, f"{new_file_name}{ext}").replace(os.sep, '/')
- metadata['preview_url'] = new_preview
-
- # Save updated metadata
- await MetadataManager.save_metadata(new_file_path, metadata)
-
- # Update the scanner cache
- if metadata:
- await scanner.update_single_model_cache(file_path, new_file_path, metadata)
-
- # Update recipe files and cache if hash is available and recipe_scanner exists
- if hash_value and hasattr(scanner, 'update_lora_filename_by_hash'):
- recipe_scanner = await ServiceRegistry.get_recipe_scanner()
- if recipe_scanner:
- recipes_updated, cache_updated = await recipe_scanner.update_lora_filename_by_hash(hash_value, new_file_name)
- logger.info(f"Updated {recipes_updated} recipe files and {cache_updated} cache entries for renamed model")
-
- return web.json_response({
- 'success': True,
- 'new_file_path': new_file_path,
- 'new_preview_path': config.get_preview_static_url(new_preview),
- 'renamed_files': renamed_files,
- 'reload_required': False
- })
-
- except Exception as e:
- logger.error(f"Error renaming model: {e}", exc_info=True)
- return web.json_response({
- 'success': False,
- 'error': str(e)
- }, status=500)
-
- @staticmethod
- async def handle_save_metadata(request: web.Request, scanner) -> web.Response:
- """Handle saving metadata updates
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_path = data.get('file_path')
- if not file_path:
- return web.Response(text='File path is required', status=400)
-
- # Remove file path from data to avoid saving it
- metadata_updates = {k: v for k, v in data.items() if k != 'file_path'}
-
- # Get metadata file path
- metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
-
- # Load existing metadata
- metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
-
- # Handle nested updates (for civitai.trainedWords)
- for key, value in metadata_updates.items():
- if isinstance(value, dict) and key in metadata and isinstance(metadata[key], dict):
- # Deep update for nested dictionaries
- for nested_key, nested_value in value.items():
- metadata[key][nested_key] = nested_value
- else:
- # Regular update for top-level keys
- metadata[key] = value
-
- # Save updated metadata
- await MetadataManager.save_metadata(file_path, metadata)
-
- # Update cache
- await scanner.update_single_model_cache(file_path, file_path, metadata)
-
- # If model_name was updated, resort the cache
- if 'model_name' in metadata_updates:
- cache = await scanner.get_cached_data()
- await cache.resort()
-
- return web.json_response({'success': True})
-
- except Exception as e:
- logger.error(f"Error saving metadata: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
-
- @staticmethod
- async def handle_add_tags(request: web.Request, scanner) -> web.Response:
- """Handle adding tags to model metadata
-
- Args:
- request: The aiohttp request
- scanner: The model scanner instance
-
- Returns:
- web.Response: The HTTP response
- """
- try:
- data = await request.json()
- file_path = data.get('file_path')
- new_tags = data.get('tags', [])
-
- if not file_path:
- return web.Response(text='File path is required', status=400)
-
- if not isinstance(new_tags, list):
- return web.Response(text='Tags must be a list', status=400)
-
- # Get metadata file path
- metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
-
- # Load existing metadata
- metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
-
- # Get existing tags (case insensitive)
- existing_tags = metadata.get('tags', [])
- existing_tags_lower = [tag.lower() for tag in existing_tags]
-
- # Add new tags that don't already exist (case insensitive check)
- tags_added = []
- for tag in new_tags:
- if isinstance(tag, str) and tag.strip():
- tag_stripped = tag.strip()
- if tag_stripped.lower() not in existing_tags_lower:
- existing_tags.append(tag_stripped)
- existing_tags_lower.append(tag_stripped.lower())
- tags_added.append(tag_stripped)
-
- # Update metadata with combined tags
- metadata['tags'] = existing_tags
-
- # Save updated metadata
- await MetadataManager.save_metadata(file_path, metadata)
-
- # Update cache
- await scanner.update_single_model_cache(file_path, file_path, metadata)
-
- return web.json_response({
- 'success': True,
- 'tags': existing_tags
- })
-
- except Exception as e:
- logger.error(f"Error adding tags: {e}", exc_info=True)
- return web.Response(text=str(e), status=500)
diff --git a/py/utils/usage_stats.py b/py/utils/usage_stats.py
index 08021964..fc9d4d72 100644
--- a/py/utils/usage_stats.py
+++ b/py/utils/usage_stats.py
@@ -1,6 +1,5 @@
import os
import json
-import sys
import time
import asyncio
import logging
@@ -12,7 +11,7 @@ from ..config import config
from ..services.service_registry import ServiceRegistry
# Check if running in standalone mode
-standalone_mode = 'nodes' not in sys.modules
+standalone_mode = os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
if not standalone_mode:
from ..metadata_collector.metadata_registry import MetadataRegistry
@@ -62,7 +61,7 @@ class UsageStats:
self._bg_task = asyncio.create_task(self._background_processor())
self._initialized = True
- logger.info("Usage statistics tracker initialized")
+ logger.debug("Usage statistics tracker initialized")
def _get_stats_file_path(self) -> str:
"""Get the path to the stats JSON file"""
@@ -164,7 +163,7 @@ class UsageStats:
if "last_save_time" in loaded_stats:
self.stats["last_save_time"] = loaded_stats["last_save_time"]
- logger.info(f"Loaded usage statistics from {self._stats_file_path}")
+ logger.debug(f"Loaded usage statistics from {self._stats_file_path}")
except Exception as e:
logger.error(f"Error loading usage statistics: {e}")
diff --git a/pyproject.toml b/pyproject.toml
index 202fb961..8ac3aebd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,7 +1,7 @@
[project]
name = "comfyui-lora-manager"
description = "Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!"
-version = "0.9.2"
+version = "0.9.5"
license = {file = "LICENSE"}
dependencies = [
"aiohttp",
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 00000000..6f82885c
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,11 @@
+[pytest]
+addopts = -v --import-mode=importlib
+testpaths = tests
+python_files = test_*.py
+python_classes = Test*
+python_functions = test_*
+# Register async marker for coroutine-style tests
+markers =
+ asyncio: execute test within asyncio event loop
+# Skip problematic directories to avoid import conflicts
+norecursedirs = .git .tox dist build *.egg __pycache__ py
\ No newline at end of file
diff --git a/refs/civitai_api_model_by_modelId.json b/refs/civitai_api_model_by_modelId.json
new file mode 100644
index 00000000..2cd20f20
--- /dev/null
+++ b/refs/civitai_api_model_by_modelId.json
@@ -0,0 +1,110 @@
+{
+ "id": 1231067,
+ "name": "Vivid Impressions Storybook Style",
+ "description": "If you'd like to support me, feel free to visit my Ko-Fi page. ❤️ Please share your images using the \"+add post \" button below. It supports the creators. Thanks! 💕 Trigger word: ppstorybookStrength: 0.8, experiment as you like ",
+ "allowNoCredit": true,
+ "allowCommercialUse": [
+ "Image",
+ "RentCivit",
+ "Rent",
+ "Sell"
+ ],
+ "allowDerivatives": true,
+ "allowDifferentLicense": true,
+ "type": "LORA",
+ "minor": false,
+ "sfwOnly": false,
+ "poi": false,
+ "nsfw": false,
+ "nsfwLevel": 1,
+ "availability": "Public",
+ "cosmetic": null,
+ "supportsGeneration": true,
+ "stats": {
+ "downloadCount": 2183,
+ "favoriteCount": 0,
+ "thumbsUpCount": 416,
+ "thumbsDownCount": 0,
+ "commentCount": 12,
+ "ratingCount": 0,
+ "rating": 0,
+ "tippedAmountCount": 360
+ },
+ "creator": {
+ "username": "PixelPawsAI",
+ "image": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/f3a1aa7c-0159-4dd8-884a-1e7ceb350f96/width=96/PixelPawsAI.jpeg"
+ },
+ "tags": [
+ "style",
+ "illustration",
+ "storybook"
+ ],
+ "modelVersions": [
+ {
+ "id": 1387174,
+ "index": 0,
+ "name": "v1.0",
+ "baseModel": "Flux.1 D",
+ "baseModelType": "Standard",
+ "createdAt": "2025-02-08T11:15:47.197Z",
+ "publishedAt": "2025-02-08T11:29:04.487Z",
+ "status": "Published",
+ "availability": "Public",
+ "nsfwLevel": 1,
+ "trainedWords": [
+ "ppstorybook"
+ ],
+ "covered": true,
+ "stats": {
+ "downloadCount": 2183,
+ "ratingCount": 0,
+ "rating": 0,
+ "thumbsUpCount": 416,
+ "thumbsDownCount": 0
+ },
+ "files": [
+ {
+ "id": 1289799,
+ "sizeKB": 18829.1484375,
+ "name": "pp-storybook_rank2_bf16.safetensors",
+ "type": "Model",
+ "pickleScanResult": "Success",
+ "pickleScanMessage": "No Pickle imports",
+ "virusScanResult": "Success",
+ "virusScanMessage": null,
+ "scannedAt": "2025-02-08T11:21:04.247Z",
+ "metadata": {
+ "format": "SafeTensor"
+ },
+ "hashes": {
+ "AutoV1": "F414C813",
+ "AutoV2": "9753338AB6",
+ "SHA256": "9753338AB693CA82BF89ED77A5D1912879E40051463EC6E330FB9866CE798668",
+ "CRC32": "A65AE7B3",
+ "BLAKE3": "A5F8AB95AC2486345E4ACCAE541FF19D97ED53EFB0A7CC9226636975A0437591",
+ "AutoV3": "34A22376739D"
+ },
+ "downloadUrl": "https://civitai.com/api/download/models/1387174",
+ "primary": true
+ }
+ ],
+ "images": [
+ {
+ "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/42b875cf-c62b-41fa-a349-383b7f074351/original=true/56547310.jpeg",
+ "nsfwLevel": 1,
+ "width": 832,
+ "height": 1216,
+ "hash": "U5IiO6s-4Vn+0~EO^5xa00VsL#IU_O?E7yWC",
+ "type": "image",
+ "minor": false,
+ "poi": false,
+ "hasMeta": true,
+ "hasPositivePrompt": true,
+ "onSite": false,
+ "remixOfId": null
+ }
+ ],
+ "downloadUrl": "https://civitai.com/api/download/models/1387174"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/refs/flux_output.json b/refs/flux_output.json
deleted file mode 100644
index 7d19efc7..00000000
--- a/refs/flux_output.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "loras": " ",
- "gen_params": {
- "prompt": "in the style of ppWhimsy, ral-frctlgmtry, ppstorybook,Stylized geek cat artist with glasses and a paintbrush, smiling at the viewer while holding a sign that reads 'Stay tuned!', solid white background",
- "negative_prompt": "",
- "steps": "25",
- "sampler": "dpmpp_2m",
- "scheduler": "beta",
- "cfg": "1",
- "seed": "48",
- "guidance": 3.5,
- "size": "896x1152",
- "clip_skip": "2"
- }
-}
\ No newline at end of file
diff --git a/refs/flux_prompt.json b/refs/flux_prompt.json
deleted file mode 100644
index 82a51077..00000000
--- a/refs/flux_prompt.json
+++ /dev/null
@@ -1,314 +0,0 @@
-{
- "6": {
- "inputs": {
- "text": [
- "46",
- 0
- ],
- "clip": [
- "58",
- 1
- ]
- },
- "class_type": "CLIPTextEncode",
- "_meta": {
- "title": "CLIP Text Encode (Positive Prompt)"
- }
- },
- "8": {
- "inputs": {
- "samples": [
- "31",
- 0
- ],
- "vae": [
- "39",
- 0
- ]
- },
- "class_type": "VAEDecode",
- "_meta": {
- "title": "VAE Decode"
- }
- },
- "27": {
- "inputs": {
- "width": 896,
- "height": 1152,
- "batch_size": 1
- },
- "class_type": "EmptySD3LatentImage",
- "_meta": {
- "title": "EmptySD3LatentImage"
- }
- },
- "31": {
- "inputs": {
- "seed": 44,
- "steps": 25,
- "cfg": 1,
- "sampler_name": "dpmpp_2m",
- "scheduler": "beta",
- "denoise": 1,
- "model": [
- "58",
- 0
- ],
- "positive": [
- "35",
- 0
- ],
- "negative": [
- "33",
- 0
- ],
- "latent_image": [
- "27",
- 0
- ]
- },
- "class_type": "KSampler",
- "_meta": {
- "title": "KSampler"
- }
- },
- "33": {
- "inputs": {
- "text": "",
- "clip": [
- "58",
- 1
- ]
- },
- "class_type": "CLIPTextEncode",
- "_meta": {
- "title": "CLIP Text Encode (Negative Prompt)"
- }
- },
- "35": {
- "inputs": {
- "guidance": 3.5,
- "conditioning": [
- "6",
- 0
- ]
- },
- "class_type": "FluxGuidance",
- "_meta": {
- "title": "FluxGuidance"
- }
- },
- "37": {
- "inputs": {
- "unet_name": "flux\\flux1-dev-fp8-e4m3fn.safetensors",
- "weight_dtype": "fp8_e4m3fn_fast"
- },
- "class_type": "UNETLoader",
- "_meta": {
- "title": "Load Diffusion Model"
- }
- },
- "38": {
- "inputs": {
- "clip_name1": "t5xxl_fp8_e4m3fn.safetensors",
- "clip_name2": "clip_l.safetensors",
- "type": "flux",
- "device": "default"
- },
- "class_type": "DualCLIPLoader",
- "_meta": {
- "title": "DualCLIPLoader"
- }
- },
- "39": {
- "inputs": {
- "vae_name": "flux1\\ae.safetensors"
- },
- "class_type": "VAELoader",
- "_meta": {
- "title": "Load VAE"
- }
- },
- "46": {
- "inputs": {
- "string1": [
- "59",
- 0
- ],
- "string2": [
- "51",
- 0
- ],
- "delimiter": ","
- },
- "class_type": "JoinStrings",
- "_meta": {
- "title": "Join Strings"
- }
- },
- "50": {
- "inputs": {
- "images": [
- "8",
- 0
- ]
- },
- "class_type": "PreviewImage",
- "_meta": {
- "title": "Preview Image"
- }
- },
- "51": {
- "inputs": {
- "string": "Stylized geek cat artist with glasses and a paintbrush, smiling at the viewer while holding a sign that reads 'Stay tuned!', solid white background",
- "strip_newlines": true
- },
- "class_type": "StringConstantMultiline",
- "_meta": {
- "title": "positive"
- }
- },
- "58": {
- "inputs": {
- "text": "",
- "loras": [
- {
- "name": "pp-enchanted-whimsy",
- "strength": "0.90",
- "active": false
- },
- {
- "name": "ral-frctlgmtry_flux",
- "strength": "0.85",
- "active": false
- },
- {
- "name": "pp-storybook_rank2_bf16",
- "strength": 0.8,
- "active": true
- },
- {
- "name": "__dummy_item1__",
- "strength": 0,
- "active": false,
- "_isDummy": true
- },
- {
- "name": "__dummy_item2__",
- "strength": 0,
- "active": false,
- "_isDummy": true
- }
- ],
- "model": [
- "37",
- 0
- ],
- "clip": [
- "38",
- 0
- ]
- },
- "class_type": "Lora Loader (LoraManager)",
- "_meta": {
- "title": "Lora Loader (LoraManager)"
- }
- },
- "59": {
- "inputs": {
- "group_mode": "",
- "toggle_trigger_words": [
- {
- "text": "ppstorybook",
- "active": false
- },
- {
- "text": "__dummy_item__",
- "active": false,
- "_isDummy": true
- },
- {
- "text": "__dummy_item__",
- "active": false,
- "_isDummy": true
- }
- ],
- "orinalMessage": "ppstorybook",
- "trigger_words": [
- "58",
- 2
- ]
- },
- "class_type": "TriggerWord Toggle (LoraManager)",
- "_meta": {
- "title": "TriggerWord Toggle (LoraManager)"
- }
- },
- "61": {
- "inputs": {
- "add_noise": "enable",
- "noise_seed": 1111423448930884,
- "steps": 20,
- "cfg": 8,
- "sampler_name": "euler",
- "scheduler": "normal",
- "start_at_step": 0,
- "end_at_step": 10000,
- "return_with_leftover_noise": "disable"
- },
- "class_type": "KSamplerAdvanced",
- "_meta": {
- "title": "KSampler (Advanced)"
- }
- },
- "62": {
- "inputs": {
- "sigmas": [
- "63",
- 0
- ]
- },
- "class_type": "SamplerCustomAdvanced",
- "_meta": {
- "title": "SamplerCustomAdvanced"
- }
- },
- "63": {
- "inputs": {
- "scheduler": "normal",
- "steps": 20,
- "denoise": 1
- },
- "class_type": "BasicScheduler",
- "_meta": {
- "title": "BasicScheduler"
- }
- },
- "64": {
- "inputs": {
- "seed": 1089899258710474,
- "steps": 20,
- "cfg": 8,
- "sampler_name": "euler",
- "scheduler": "normal",
- "denoise": 1
- },
- "class_type": "KSampler",
- "_meta": {
- "title": "KSampler"
- }
- },
- "65": {
- "inputs": {
- "text": ",Stylized geek cat artist with glasses and a paintbrush, smiling at the viewer while holding a sign that reads 'Stay tuned!', solid white background",
- "anything": [
- "46",
- 0
- ]
- },
- "class_type": "easy showAnything",
- "_meta": {
- "title": "Show Any"
- }
- }
-}
\ No newline at end of file
diff --git a/refs/output.json b/refs/output.json
deleted file mode 100644
index aa5a9702..00000000
--- a/refs/output.json
+++ /dev/null
@@ -1,258 +0,0 @@
-{
- "id": 649516,
- "name": "Cynthia -シロナ - Pokemon Diamond and Pearl - PDXL LORA",
- "description": "Warning: Without Adetailer eyes are fucked (rainbow color and artefact)
Trained on Pony Diffusion V6 XL with 63 pictures. Best result with weight between : 0.8-1.
Basic prompts : 1girl, cynthia \\(pokemon\\), blonde hair, hair over one eye, very long hair, grey eyes, eyelashes, hair ornament Outfit prompts : fur collar, black coat, fur-trimmed coat, long sleeves, black pants, black shirt, high heels
Reviews are really appreciated, i love to see the community use my work, that's why I share it. If you like my work, you can tip me here.
Got a specific request ? I'm open for commission on my kofi or fiverr gig *! If you provide enough data, OCs are accepted
",
- "allowNoCredit": true,
- "allowCommercialUse": [
- "Image",
- "RentCivit"
- ],
- "allowDerivatives": true,
- "allowDifferentLicense": true,
- "type": "LORA",
- "minor": false,
- "sfwOnly": false,
- "poi": false,
- "nsfw": false,
- "nsfwLevel": 29,
- "availability": "Public",
- "cosmetic": null,
- "supportsGeneration": true,
- "stats": {
- "downloadCount": 811,
- "favoriteCount": 0,
- "thumbsUpCount": 175,
- "thumbsDownCount": 0,
- "commentCount": 4,
- "ratingCount": 0,
- "rating": 0,
- "tippedAmountCount": 10
- },
- "creator": {
- "username": "Konan",
- "image": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7cd552a1-60fe-4baf-a0e4-f7d5d5381711/width=96/Konan.jpeg"
- },
- "tags": [
- "anime",
- "character",
- "cynthia",
- "woman",
- "pokemon",
- "pokegirl"
- ],
- "modelVersions": [
- {
- "id": 726676,
- "index": 0,
- "name": "v1.0",
- "baseModel": "Pony",
- "createdAt": "2024-08-16T01:13:16.099Z",
- "publishedAt": "2024-08-16T01:14:44.984Z",
- "status": "Published",
- "availability": "Public",
- "nsfwLevel": 29,
- "trainedWords": [
- "1girl, cynthia \\(pokemon\\), blonde hair, hair over one eye, very long hair, grey eyes, eyelashes, hair ornament",
- "fur collar, black coat, fur-trimmed coat, long sleeves, black pants, black shirt, high heels"
- ],
- "covered": true,
- "stats": {
- "downloadCount": 811,
- "ratingCount": 0,
- "rating": 0,
- "thumbsUpCount": 175,
- "thumbsDownCount": 0
- },
- "files": [
- {
- "id": 641092,
- "sizeKB": 56079.65234375,
- "name": "CynthiaXL.safetensors",
- "type": "Model",
- "pickleScanResult": "Success",
- "pickleScanMessage": "No Pickle imports",
- "virusScanResult": "Success",
- "virusScanMessage": null,
- "scannedAt": "2024-08-16T01:17:19.087Z",
- "metadata": {
- "format": "SafeTensor"
- },
- "hashes": {},
- "downloadUrl": "https://civitai.com/api/download/models/726676",
- "primary": true
- }
- ],
- "images": [
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/b346d757-2b59-4aeb-9f09-3bee2724519d/width=1248/24511993.jpeg",
- "nsfwLevel": 1,
- "width": 1248,
- "height": 1824,
- "hash": "UqNc==RP.9s+~pxvIst7kWWBWBjY%MWBt7WB",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/fc132ac0-cc1c-4b68-a1d7-5b97b0996ac2/width=1248/24511997.jpeg",
- "nsfwLevel": 1,
- "width": 1248,
- "height": 1824,
- "hash": "UMGSS+?tTw.60MIX9cbb~WxHRRR-NEtLRiR%",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7b3237d1-e672-466a-85d0-cc5dd42ab130/width=1160/24512001.jpeg",
- "nsfwLevel": 4,
- "width": 1160,
- "height": 1696,
- "hash": "U9NA6f~o00%h00wvIYt74:ER-=D%5600DiE1",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/ccd7d11d-4fa9-4434-85a1-fb999312e60d/width=1248/24511991.jpeg",
- "nsfwLevel": 1,
- "width": 1248,
- "height": 1824,
- "hash": "UyNTg.j?~qxu?aoLRkj]%MfkM{jZaya}a#ax",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/1743be6d-7fe5-4b55-9f19-c931618fa259/width=1248/24511996.jpeg",
- "nsfwLevel": 4,
- "width": 1248,
- "height": 1824,
- "hash": "UGOC~n^+?w~6Tx_4oM^$yYEkMds74:9F#*xY",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/91693c98-d037-4489-882c-100eb26019a0/width=1160/24512010.jpeg",
- "nsfwLevel": 4,
- "width": 1160,
- "height": 1696,
- "hash": "UJI}kp^-Kl%hXAIX4;Nf^+M|9GRP0Mt8%L%2",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/49c7a294-ac5b-4832-98e5-2acd0f1a8782/width=1248/24512017.jpeg",
- "nsfwLevel": 4,
- "width": 1248,
- "height": 1824,
- "hash": "UML;8Qn|9G%3mnWA4nWFMf%N?Hae~qog-oNF",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/d7b442f2-6ead-4a7a-9578-54d9ec2ff148/width=1248/24512015.jpeg",
- "nsfwLevel": 1,
- "width": 1248,
- "height": 1824,
- "hash": "UPGR#kt8xw%M0LWC9bWC?wxtR*NLM^jrxWM|",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/d840f1e9-3dd3-4531-b83a-1ba2c6b7feaa/width=1160/24512004.jpeg",
- "nsfwLevel": 8,
- "width": 1160,
- "height": 1696,
- "hash": "ULNm1i_39wi^*I%hDiM_tlo#xuV?^kNIxCs,",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/520387ae-c176-43e3-92bd-5cd2a672475e/width=1248/24512012.jpeg",
- "nsfwLevel": 4,
- "width": 1248,
- "height": 1824,
- "hash": "URM%l.%M.9Ip~poIkExu_3V@M|xuD%oJM{D*",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/9ea28b94-f326-4776-83ff-851cc203c627/width=1248/24511988.jpeg",
- "nsfwLevel": 1,
- "width": 1248,
- "height": 1824,
- "hash": "U-PZloog_Nxut6j]WXWB-;j?IVa#ofaxj]j]",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- },
- {
- "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/2e749dbb-7d5a-48f1-8e29-fea5022a5fe9/width=1248/24522268.jpeg",
- "nsfwLevel": 16,
- "width": 1248,
- "height": 1824,
- "hash": "UPLgtm9Z0z=|0yRRE2-A9rWAoNE1~DwOr=t7",
- "type": "image",
- "minor": false,
- "poi": false,
- "hasMeta": true,
- "hasPositivePrompt": true,
- "onSite": false,
- "remixOfId": null
- }
- ],
- "downloadUrl": "https://civitai.com/api/download/models/726676"
- }
- ]
-}
\ No newline at end of file
diff --git a/refs/prompt.json b/refs/prompt.json
deleted file mode 100644
index 96f62b0a..00000000
--- a/refs/prompt.json
+++ /dev/null
@@ -1,401 +0,0 @@
-{
- "6": {
- "inputs": {
- "text": [
- "301",
- 0
- ],
- "clip": [
- "299",
- 1
- ]
- },
- "class_type": "CLIPTextEncode",
- "_meta": {
- "title": "CLIP Text Encode (Prompt)"
- }
- },
- "8": {
- "inputs": {
- "samples": [
- "13",
- 1
- ],
- "vae": [
- "10",
- 0
- ]
- },
- "class_type": "VAEDecode",
- "_meta": {
- "title": "VAE Decode"
- }
- },
- "10": {
- "inputs": {
- "vae_name": "flux1\\ae.safetensors"
- },
- "class_type": "VAELoader",
- "_meta": {
- "title": "Load VAE"
- }
- },
- "11": {
- "inputs": {
- "clip_name1": "t5xxl_fp8_e4m3fn.safetensors",
- "clip_name2": "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
- "type": "flux",
- "device": "default"
- },
- "class_type": "DualCLIPLoader",
- "_meta": {
- "title": "DualCLIPLoader"
- }
- },
- "13": {
- "inputs": {
- "noise": [
- "147",
- 0
- ],
- "guider": [
- "22",
- 0
- ],
- "sampler": [
- "16",
- 0
- ],
- "sigmas": [
- "17",
- 0
- ],
- "latent_image": [
- "48",
- 0
- ]
- },
- "class_type": "SamplerCustomAdvanced",
- "_meta": {
- "title": "SamplerCustomAdvanced"
- }
- },
- "16": {
- "inputs": {
- "sampler_name": "dpmpp_2m"
- },
- "class_type": "KSamplerSelect",
- "_meta": {
- "title": "KSamplerSelect"
- }
- },
- "17": {
- "inputs": {
- "scheduler": "beta",
- "steps": [
- "246",
- 0
- ],
- "denoise": 1,
- "model": [
- "28",
- 0
- ]
- },
- "class_type": "BasicScheduler",
- "_meta": {
- "title": "BasicScheduler"
- }
- },
- "22": {
- "inputs": {
- "model": [
- "28",
- 0
- ],
- "conditioning": [
- "29",
- 0
- ]
- },
- "class_type": "BasicGuider",
- "_meta": {
- "title": "BasicGuider"
- }
- },
- "28": {
- "inputs": {
- "max_shift": 1.1500000000000001,
- "base_shift": 0.5,
- "width": [
- "48",
- 1
- ],
- "height": [
- "48",
- 2
- ],
- "model": [
- "299",
- 0
- ]
- },
- "class_type": "ModelSamplingFlux",
- "_meta": {
- "title": "ModelSamplingFlux"
- }
- },
- "29": {
- "inputs": {
- "guidance": 3.5,
- "conditioning": [
- "6",
- 0
- ]
- },
- "class_type": "FluxGuidance",
- "_meta": {
- "title": "FluxGuidance"
- }
- },
- "48": {
- "inputs": {
- "resolution": "832x1216 (0.68)",
- "batch_size": 1,
- "width_override": 0,
- "height_override": 0
- },
- "class_type": "SDXLEmptyLatentSizePicker+",
- "_meta": {
- "title": "🔧 SDXL Empty Latent Size Picker"
- }
- },
- "65": {
- "inputs": {
- "unet_name": "flux\\flux1-dev-fp8-e4m3fn.safetensors",
- "weight_dtype": "fp8_e4m3fn_fast"
- },
- "class_type": "UNETLoader",
- "_meta": {
- "title": "Load Diffusion Model"
- }
- },
- "147": {
- "inputs": {
- "noise_seed": 651532572596956
- },
- "class_type": "RandomNoise",
- "_meta": {
- "title": "RandomNoise"
- }
- },
- "148": {
- "inputs": {
- "wildcard_text": "__some-prompts__",
- "populated_text": "A surreal digital artwork showcases a forward-thinking inventor captivated by his intricate mechanical creation through a large magnifying glass. Viewed from an unconventional perspective, the scene reveals an eccentric assembly of gears, springs, and brass instruments within his workshop. Soft, ethereal light radiates from the invention, casting enigmatic shadows on the walls as time appears to bend around its metallic form, invoking a sense of curiosity, wonder, and exhilaration in discovery.",
- "mode": "fixed",
- "seed": 553084268162351,
- "Select to add Wildcard": "Select the Wildcard to add to the text"
- },
- "class_type": "ImpactWildcardProcessor",
- "_meta": {
- "title": "ImpactWildcardProcessor"
- }
- },
- "151": {
- "inputs": {
- "text": "A hyper-realistic close-up portrait of a young woman with shoulder-length black hair styled in edgy, futuristic layers, adorned with glowing tips. She wears mecha eyewear with a neon green visor that transitions into iridescent shades of teal and gold. The frame is sleek, with angular edges and fine mechanical detailing. Her expression is fierce and confident, with flawless skin highlighted by the neon reflections. She wears a high-tech bodysuit with integrated LED lines and metallic panels. The background depicts a hazy rendition of The Great Wave off Kanagawa by Hokusai, its powerful waves blending seamlessly with the neon tones, amplifying her intense, defiant aura."
- },
- "class_type": "Text Multiline",
- "_meta": {
- "title": "Text Multiline"
- }
- },
- "191": {
- "inputs": {
- "text": "A cinematic, oil painting masterpiece captures the essence of impressionistic surrealism, inspired by Claude Monet. A mysterious woman in a flowing crimson dress stands at the edge of a tranquil lake, where lily pads shimmer under an ethereal, golden twilight. The water’s surface reflects a dreamlike sky, its swirling hues of violet and sapphire melting together like liquid light. The thick, expressive brushstrokes lend depth to the scene, evoking a sense of nostalgia and quiet longing, as if the world itself is caught between reality and a fleeting dream. \nA mesmerizing oil painting masterpiece inspired by Salvador Dalí, blending surrealism with post-impressionist texture. A lone violinist plays atop a melting clock tower, his form distorted by the passage of time. The sky is a cascade of swirling, liquid oranges and deep blues, where floating staircases spiral endlessly into the horizon. The impasto technique gives depth and movement to the surreal elements, making time itself feel fluid, as if the world is dissolving into a dream. \nA stunning impressionistic oil painting evokes the spirit of Edvard Munch, capturing a solitary figure standing on a rain-soaked street, illuminated by the glow of flickering gas lamps. The swirling, chaotic strokes of deep blues and fiery reds reflect the turbulence of emotion, while the blurred reflections in the wet cobblestone suggest a merging of past and present. The faceless figure, draped in a dark overcoat, seems lost in thought, embodying the ephemeral nature of memory and time. \nA breathtaking oil painting masterpiece, inspired by Gustav Klimt, presents a celestial ballroom where faceless dancers swirl in an eternal waltz beneath a gilded, star-speckled sky. Their golden garments shimmer with intricate patterns, blending into the opulent mosaic floor that seems to stretch into infinity. The dreamlike composition, rich in warm amber and deep sapphire hues, captures an otherworldly elegance, as if the dancers are suspended in a moment that transcends time. \nA visionary oil painting inspired by Marc Chagall depicts a dreamlike cityscape where gravity ceases to exist. A couple floats above a crimson-tinted town, their forms dissolving into the swirling strokes of a vast, cerulean sky. The buildings below twist and bend in rhythmic motion, their windows glowing like tiny stars. The thick, textured brushwork conveys a sense of weightlessness and wonder, as if love itself has defied the laws of the universe. \nAn impressionistic oil painting in the style of J.M.W. Turner, depicting a ghostly ship sailing through a sea of swirling golden mist. The waves crash and dissolve into abstract, fiery strokes of orange and deep indigo, blurring the line between ocean and sky. The ship appears almost ethereal, as if drifting between worlds, lost in the ever-changing tides of memory and myth. The dynamic brushstrokes capture the relentless power of nature and the fleeting essence of time. \nA captivating oil painting masterpiece, infused with surrealist impressionism, portrays a grand library where books float midair, their pages unraveling into ribbons of light. The towering shelves twist into the heavens, vanishing into an infinite, starry void. A lone scholar, illuminated by the glow of a suspended lantern, reaches for a book that seems to pulse with life. The scene pulses with mystery, where the impasto textures bring depth to the interplay between knowledge and dreams. \nA luminous impressionistic oil painting captures the melancholic beauty of an abandoned carnival, its faded carousel horses frozen mid-gallop beneath a sky of swirling lavender and gold. The wind carries fragments of forgotten laughter through the empty fairground, where scattered ticket stubs and crumbling banners whisper tales of joy long past. The thick, textured brushstrokes blend nostalgia with an eerie dreamlike quality, as if the carnival exists only in the echoes of memory. \nA surreal oil painting in the spirit of René Magritte, featuring a towering lighthouse that emits not light, but cascading waterfalls from its peak. The swirling sky, painted in deep midnight blues, is punctuated by glowing, crescent moons that defy gravity. A lone figure stands at the water’s edge, gazing up in quiet contemplation, as if caught between wonder and the unknown. The painting’s rich textures and luminous colors create an enigmatic, dreamlike landscape. \nA striking impressionistic oil painting, reminiscent of Van Gogh, portrays a lone traveler on a winding cobblestone path, their silhouette bathed in the golden glow of lantern-lit cherry blossoms. The petals swirl through the night air like glowing embers, blending with the deep, rhythmic strokes of a star-filled indigo sky. The scene captures a feeling of wistful solitude, as if the traveler is walking not only through the city, but through the fleeting nature of time itself."
- },
- "class_type": "Text Multiline",
- "_meta": {
- "title": "Text Multiline"
- }
- },
- "203": {
- "inputs": {
- "string1": [
- "289",
- 0
- ],
- "string2": [
- "293",
- 0
- ],
- "delimiter": ", "
- },
- "class_type": "JoinStrings",
- "_meta": {
- "title": "Join Strings"
- }
- },
- "208": {
- "inputs": {
- "file_path": "",
- "dictionary_name": "[filename]",
- "label": "TextBatch",
- "mode": "automatic",
- "index": 0,
- "multiline_text": [
- "191",
- 0
- ]
- },
- "class_type": "Text Load Line From File",
- "_meta": {
- "title": "Text Load Line From File"
- }
- },
- "226": {
- "inputs": {
- "images": [
- "8",
- 0
- ]
- },
- "class_type": "PreviewImage",
- "_meta": {
- "title": "Preview Image"
- }
- },
- "246": {
- "inputs": {
- "value": 25
- },
- "class_type": "INTConstant",
- "_meta": {
- "title": "Steps"
- }
- },
- "289": {
- "inputs": {
- "group_mode": true,
- "toggle_trigger_words": [
- {
- "text": "bo-exposure",
- "active": true
- },
- {
- "text": "__dummy_item__",
- "active": false,
- "_isDummy": true
- },
- {
- "text": "__dummy_item__",
- "active": false,
- "_isDummy": true
- }
- ],
- "orinalMessage": "bo-exposure",
- "trigger_words": [
- "299",
- 2
- ]
- },
- "class_type": "TriggerWord Toggle (LoraManager)",
- "_meta": {
- "title": "TriggerWord Toggle (LoraManager)"
- }
- },
- "293": {
- "inputs": {
- "input": 1,
- "text1": [
- "208",
- 0
- ],
- "text2": [
- "151",
- 0
- ]
- },
- "class_type": "easy textSwitch",
- "_meta": {
- "title": "Text Switch"
- }
- },
- "297": {
- "inputs": {
- "text": ""
- },
- "class_type": "Lora Stacker (LoraManager)",
- "_meta": {
- "title": "Lora Stacker (LoraManager)"
- }
- },
- "298": {
- "inputs": {
- "anything": [
- "297",
- 0
- ]
- },
- "class_type": "easy showAnything",
- "_meta": {
- "title": "Show Any"
- }
- },
- "299": {
- "inputs": {
- "text": " ",
- "loras": [
- {
- "name": "boFLUX Double Exposure Magic v2",
- "strength": 0.8,
- "active": true
- },
- {
- "name": "FluxDFaeTasticDetails",
- "strength": 0.65,
- "active": true
- },
- {
- "name": "__dummy_item1__",
- "strength": 0,
- "active": false,
- "_isDummy": true
- },
- {
- "name": "__dummy_item2__",
- "strength": 0,
- "active": false,
- "_isDummy": true
- }
- ],
- "model": [
- "65",
- 0
- ],
- "clip": [
- "11",
- 0
- ],
- "lora_stack": [
- "297",
- 0
- ]
- },
- "class_type": "Lora Loader (LoraManager)",
- "_meta": {
- "title": "Lora Loader (LoraManager)"
- }
- },
- "301": {
- "inputs": {
- "string": "A hyper-realistic close-up portrait of a young woman with shoulder-length black hair styled in edgy, futuristic layers, adorned with glowing tips. She wears mecha eyewear with a neon green visor that transitions into iridescent shades of teal and gold. The frame is sleek, with angular edges and fine mechanical detailing. Her expression is fierce and confident, with flawless skin highlighted by the neon reflections. She wears a high-tech bodysuit with integrated LED lines and metallic panels. The background depicts a hazy rendition of The Great Wave off Kanagawa by Hokusai, its powerful waves blending seamlessly with the neon tones, amplifying her intense, defiant aura.",
- "strip_newlines": true
- },
- "class_type": "StringConstantMultiline",
- "_meta": {
- "title": "String Constant Multiline"
- }
- }
-}
\ No newline at end of file
diff --git a/refs/version.json b/refs/version.json
new file mode 100644
index 00000000..03fd8fd3
--- /dev/null
+++ b/refs/version.json
@@ -0,0 +1,91 @@
+{
+ "id": 1255556,
+ "modelId": 1117241,
+ "name": "v1.0",
+ "createdAt": "2025-01-08T06:13:08.839Z",
+ "updatedAt": "2025-01-08T06:28:54.156Z",
+ "status": "Published",
+ "publishedAt": "2025-01-08T06:28:54.155Z",
+ "trainedWords": ["in the style of ppWhimsy"],
+ "trainingStatus": null,
+ "trainingDetails": null,
+ "baseModel": "Flux.1 D",
+ "baseModelType": "Standard",
+ "earlyAccessEndsAt": null,
+ "earlyAccessConfig": null,
+ "description": null,
+ "uploadType": "Created",
+ "usageControl": "Download",
+ "air": "urn:air:flux1:lora:civitai:1117241@1255556",
+ "stats": {
+ "downloadCount": 210,
+ "ratingCount": 0,
+ "rating": 0,
+ "thumbsUpCount": 26
+ },
+ "model": {
+ "name": "Enchanted Whimsy style (Flux)",
+ "type": "LORA",
+ "nsfw": false,
+ "poi": false
+ },
+ "files": [
+ {
+ "id": 1160774,
+ "sizeKB": 38828.8125,
+ "name": "pp-enchanted-whimsy.safetensors",
+ "type": "Model",
+ "pickleScanResult": "Success",
+ "pickleScanMessage": "No Pickle imports",
+ "virusScanResult": "Success",
+ "virusScanMessage": null,
+ "scannedAt": "2025-01-08T06:16:27.731Z",
+ "metadata": {
+ "format": "SafeTensor",
+ "size": null,
+ "fp": null
+ },
+ "hashes": {
+ "AutoV1": "40CAF049",
+ "AutoV2": "3202778C3E",
+ "SHA256": "3202778C3EBE5CF7EBE5FC51561DEAE8611F4362036EB7C02EFA033C705E6240",
+ "CRC32": "69DCD953",
+ "BLAKE3": "ED04580DDB1AD36D8B87F4B0800F5930C7E5D4A7269BDC2BE26ED77EA1A34697",
+ "AutoV3": "BF82986F8597"
+ },
+ "primary": true,
+ "downloadUrl": "https://civitai.com/api/download/models/1255556"
+ }
+ ],
+ "images": [
+ {
+ "url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/707aef9b-36fb-46c2-ac41-adcab539d3a6/width=832/50270101.jpeg",
+ "nsfwLevel": 1,
+ "width": 832,
+ "height": 1216,
+ "hash": "U7Am@@$^J3%100R;pLR.M]tQ-ps+?wRiVrof",
+ "type": "image",
+ "metadata": {
+ "hash": "U7Am@@$^J3%100R;pLR.M]tQ-ps+?wRiVrof",
+ "size": 702313,
+ "width": 832,
+ "height": 1216
+ },
+ "minor": false,
+ "poi": false,
+ "meta": {
+ "prompt": "in the style of ppWhimsy, a close-up of a boy with a crown of ferns and tiny horns, his eyes wide with wonder as a family of glowing hedgehogs nestle in his hands, their spines shimmering with soft pastel colors"
+ },
+ "availability": "Public",
+ "hasMeta": true,
+ "hasPositivePrompt": true,
+ "onSite": false,
+ "remixOfId": null
+ }
+ ],
+ "downloadUrl": "https://civitai.com/api/download/models/1255556",
+ "creator": {
+ "username": "PixelPawsAI",
+ "image": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/f3a1aa7c-0159-4dd8-884a-1e7ceb350f96/width=96/PixelPawsAI.jpeg"
+ }
+}
diff --git a/requirements.txt b/requirements.txt
index 9e280c64..4051dc74 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,3 +9,4 @@ numpy
natsort
GitPython
aiosqlite
+beautifulsoup4
diff --git a/scripts/run_frontend_coverage.js b/scripts/run_frontend_coverage.js
new file mode 100755
index 00000000..922e4035
--- /dev/null
+++ b/scripts/run_frontend_coverage.js
@@ -0,0 +1,205 @@
+#!/usr/bin/env node
+import { spawnSync } from 'node:child_process';
+import { mkdirSync, rmSync, readdirSync, readFileSync, writeFileSync, existsSync } from 'node:fs';
+import path from 'node:path';
+import { fileURLToPath } from 'node:url';
+
+const __dirname = path.dirname(fileURLToPath(import.meta.url));
+const repoRoot = path.resolve(__dirname, '..');
+const coverageRoot = path.join(repoRoot, 'coverage');
+const v8OutputDir = path.join(coverageRoot, '.v8');
+const frontendCoverageDir = path.join(coverageRoot, 'frontend');
+
+rmSync(v8OutputDir, { recursive: true, force: true });
+rmSync(frontendCoverageDir, { recursive: true, force: true });
+mkdirSync(v8OutputDir, { recursive: true });
+mkdirSync(frontendCoverageDir, { recursive: true });
+
+const vitestCli = path.join(repoRoot, 'node_modules', 'vitest', 'vitest.mjs');
+
+if (!existsSync(vitestCli)) {
+ console.error('Failed to locate Vitest CLI at', vitestCli);
+ console.error('Try reinstalling frontend dependencies with `npm install`.');
+ process.exit(1);
+}
+
+const env = { ...process.env, NODE_V8_COVERAGE: v8OutputDir };
+
+const spawnOptions = { stdio: 'inherit', env };
+const result = spawnSync(process.execPath, [vitestCli, 'run'], spawnOptions);
+
+if (result.error) {
+ console.error('Failed to execute Vitest:', result.error.message);
+ process.exit(result.status ?? 1);
+}
+
+if (result.status !== 0) {
+ process.exit(result.status ?? 1);
+}
+
+const fileCoverage = collectCoverageFromV8(v8OutputDir, repoRoot);
+writeCoverageOutputs(fileCoverage, frontendCoverageDir, repoRoot);
+printSummary(fileCoverage);
+rmSync(v8OutputDir, { recursive: true, force: true });
+
+function collectCoverageFromV8(v8Dir, rootDir) {
+ const coverageMap = new Map();
+ const files = readdirSync(v8Dir).filter((file) => file.endsWith('.json'));
+
+ for (const file of files) {
+ const reportPath = path.join(v8Dir, file);
+ const report = JSON.parse(readFileSync(reportPath, 'utf8'));
+ if (!Array.isArray(report.result)) {
+ continue;
+ }
+
+ for (const script of report.result) {
+ const filePath = normalizeFilePath(script.url, rootDir);
+ if (!filePath) {
+ continue;
+ }
+
+ if (!filePath.startsWith('static/')) {
+ continue;
+ }
+
+ if (!filePath.endsWith('.js')) {
+ continue;
+ }
+
+ const absolutePath = path.join(rootDir, filePath);
+ let lineMap = coverageMap.get(filePath);
+ if (!lineMap) {
+ lineMap = new Map();
+ coverageMap.set(filePath, lineMap);
+ }
+
+ const source = readFileSync(absolutePath, 'utf8');
+ const lineOffsets = calculateLineOffsets(source);
+
+ for (const fn of script.functions ?? []) {
+ for (const range of fn.ranges ?? []) {
+ if (range.startOffset === range.endOffset) {
+ continue;
+ }
+ const count = typeof range.count === 'number' ? range.count : 0;
+ const startLine = findLineNumber(range.startOffset, lineOffsets);
+ const endLine = findLineNumber(Math.max(range.endOffset - 1, range.startOffset), lineOffsets);
+ for (let line = startLine; line <= endLine; line += 1) {
+ const current = lineMap.get(line);
+ if (current === undefined || count > current) {
+ lineMap.set(line, count);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return coverageMap;
+}
+
+function normalizeFilePath(url, rootDir) {
+ if (!url) {
+ return null;
+ }
+ try {
+ const parsed = new URL(url);
+ if (parsed.protocol !== 'file:') {
+ return null;
+ }
+ const absolute = fileURLToPath(parsed);
+ const relative = path.relative(rootDir, absolute);
+ if (relative.startsWith('..') || path.isAbsolute(relative)) {
+ return null;
+ }
+ return relative.replace(/\\/g, '/');
+ } catch {
+ if (url.startsWith(rootDir)) {
+ return url.slice(rootDir.length + 1).replace(/\\/g, '/');
+ }
+ return null;
+ }
+}
+
+function calculateLineOffsets(content) {
+ const offsets = [0];
+ for (let index = 0; index < content.length; index += 1) {
+ if (content.charCodeAt(index) === 10) {
+ offsets.push(index + 1);
+ }
+ }
+ offsets.push(content.length);
+ return offsets;
+}
+
+function findLineNumber(offset, lineOffsets) {
+ let low = 0;
+ let high = lineOffsets.length - 1;
+ while (low < high) {
+ const mid = Math.floor((low + high + 1) / 2);
+ if (lineOffsets[mid] <= offset) {
+ low = mid;
+ } else {
+ high = mid - 1;
+ }
+ }
+ return low + 1;
+}
+
+function writeCoverageOutputs(coverageMap, outputDir, rootDir) {
+ const summary = {
+ total: { lines: { total: 0, covered: 0, pct: 100 } },
+ files: {},
+ };
+
+ let lcovContent = '';
+
+ for (const [relativePath, lineMap] of [...coverageMap.entries()].sort()) {
+ const lines = [...lineMap.entries()].sort((a, b) => a[0] - b[0]);
+ const total = lines.length;
+ const covered = lines.filter(([, count]) => count > 0).length;
+ const pct = total === 0 ? 100 : (covered / total) * 100;
+
+ summary.files[relativePath] = {
+ lines: {
+ total,
+ covered,
+ pct,
+ },
+ };
+
+ summary.total.lines.total += total;
+ summary.total.lines.covered += covered;
+
+ const absolute = path.join(rootDir, relativePath);
+ lcovContent += 'TN:\n';
+ lcovContent += `SF:${absolute.replace(/\\/g, '/')}\n`;
+ for (const [line, count] of lines) {
+ lcovContent += `DA:${line},${count}\n`;
+ }
+ lcovContent += `LF:${total}\n`;
+ lcovContent += `LH:${covered}\n`;
+ lcovContent += 'end_of_record\n';
+ }
+
+ summary.total.lines.pct = summary.total.lines.total === 0
+ ? 100
+ : (summary.total.lines.covered / summary.total.lines.total) * 100;
+
+ writeFileSync(path.join(outputDir, 'coverage-summary.json'), JSON.stringify(summary, null, 2));
+ writeFileSync(path.join(outputDir, 'lcov.info'), lcovContent, 'utf8');
+}
+
+function printSummary(coverageMap) {
+ let totalLines = 0;
+ let totalCovered = 0;
+ for (const lineMap of coverageMap.values()) {
+ const lines = lineMap.size;
+ const covered = [...lineMap.values()].filter((count) => count > 0).length;
+ totalLines += lines;
+ totalCovered += covered;
+ }
+ const pct = totalLines === 0 ? 100 : (totalCovered / totalLines) * 100;
+ console.log(`\nFrontend coverage: ${totalCovered}/${totalLines} lines (${pct.toFixed(2)}%)`);
+}
diff --git a/settings.json.example b/settings.json.example
index 0765577b..673aa76d 100644
--- a/settings.json.example
+++ b/settings.json.example
@@ -1,6 +1,5 @@
{
"civitai_api_key": "your_civitai_api_key_here",
- "show_only_sfw": false,
"folder_paths": {
"loras": [
"C:/path/to/your/loras_folder",
diff --git a/standalone.py b/standalone.py
index b8532abf..95c45ca7 100644
--- a/standalone.py
+++ b/standalone.py
@@ -2,6 +2,10 @@ from pathlib import Path
import os
import sys
import json
+from py.middleware.cache_middleware import cache_control
+
+# Set environment variable to indicate standalone mode
+os.environ["COMFYUI_LORA_MANAGER_STANDALONE"] = "1"
# Create mock modules for py/nodes directory - add this before any other imports
def mock_nodes_directory():
@@ -129,7 +133,7 @@ class StandaloneServer:
"""Server implementation for standalone mode"""
def __init__(self):
- self.app = web.Application(logger=logger)
+ self.app = web.Application(logger=logger, middlewares=[cache_control])
self.instance = self # Make it compatible with PromptServer.instance pattern
# Ensure the app's access logger is configured to reduce verbosity
@@ -417,7 +421,7 @@ class StandaloneLoraManager(LoraManager):
RecipeRoutes.setup_routes(app)
UpdateRoutes.setup_routes(app)
MiscRoutes.setup_routes(app)
- ExampleImagesRoutes.setup_routes(app)
+ ExampleImagesRoutes.setup_routes(app, ws_manager=ws_manager)
# Setup WebSocket routes that are shared across all model types
app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection)
diff --git a/static/css/components/modal/settings-modal.css b/static/css/components/modal/settings-modal.css
index a13dc856..8165bb67 100644
--- a/static/css/components/modal/settings-modal.css
+++ b/static/css/components/modal/settings-modal.css
@@ -101,7 +101,7 @@
.api-key-input input {
width: 100%;
padding: 6px 40px 6px 10px; /* Add left padding */
- height: 32px;
+ height: 20px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--lora-surface);
@@ -123,6 +123,36 @@
opacity: 1;
}
+/* Text input wrapper styles for consistent input styling */
+.text-input-wrapper {
+ width: 100%;
+ position: relative;
+ display: flex;
+ align-items: center;
+}
+
+.text-input-wrapper input {
+ width: 100%;
+ padding: 6px 10px;
+ height: 20px;
+ border-radius: var(--border-radius-xs);
+ border: 1px solid var(--border-color);
+ background-color: var(--lora-surface);
+ color: var(--text-color);
+ font-size: 0.95em;
+}
+
+.text-input-wrapper input:focus {
+ border-color: var(--lora-accent);
+ outline: none;
+ box-shadow: 0 0 0 2px rgba(var(--lora-accent-rgb, 79, 70, 229), 0.1);
+}
+
+/* Dark theme specific adjustments */
+[data-theme="dark"] .text-input-wrapper input {
+ background-color: rgba(30, 30, 30, 0.9);
+}
+
.input-help {
font-size: 0.85em;
color: var(--text-color);
@@ -312,7 +342,7 @@ input:checked + .toggle-slider:before {
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--lora-surface);
- color: var (--text-color);
+ color: var(--text-color);
font-size: 0.95em;
height: 32px;
}
@@ -346,7 +376,7 @@ input:checked + .toggle-slider:before {
padding: var(--space-1);
margin-top: 8px;
font-family: monospace;
- font-size: 1.1em;
+ font-size: 0.9em;
color: var(--lora-accent);
display: none;
}
@@ -571,10 +601,31 @@ input:checked + .toggle-slider:before {
background-color: rgba(30, 30, 30, 0.9);
}
+/* Proxy Settings Styles */
+.proxy-settings-group {
+ margin-left: var(--space-1);
+ padding-left: var(--space-1);
+ border-left: 2px solid var(--lora-border);
+ animation: slideDown 0.3s ease-out;
+}
+
+.proxy-settings-group .setting-item {
+ margin-bottom: var(--space-2);
+}
+
/* Responsive adjustments */
@media (max-width: 768px) {
.placeholder-info {
flex-direction: column;
align-items: flex-start;
}
+
+ .proxy-settings-group {
+ margin-left: 0;
+ padding-left: var(--space-1);
+ border-left: none;
+ border-top: 1px solid var(--lora-border);
+ padding-top: var(--space-2);
+ margin-top: var(--space-2);
+ }
}
\ No newline at end of file
diff --git a/static/css/components/sidebar.css b/static/css/components/sidebar.css
index 008c16a5..c0af436d 100644
--- a/static/css/components/sidebar.css
+++ b/static/css/components/sidebar.css
@@ -233,7 +233,7 @@
}
.sidebar-tree-children.expanded {
- max-height: 9999px;
+ max-height: 50000px;
}
.sidebar-tree-children .sidebar-tree-node-content {
diff --git a/static/js/api/apiConfig.js b/static/js/api/apiConfig.js
index 33622dc5..aa168413 100644
--- a/static/js/api/apiConfig.js
+++ b/static/js/api/apiConfig.js
@@ -55,48 +55,48 @@ export function getApiEndpoints(modelType) {
return {
// Base CRUD operations
- list: `/api/${modelType}/list`,
- delete: `/api/${modelType}/delete`,
- exclude: `/api/${modelType}/exclude`,
- rename: `/api/${modelType}/rename`,
- save: `/api/${modelType}/save-metadata`,
+ list: `/api/lm/${modelType}/list`,
+ delete: `/api/lm/${modelType}/delete`,
+ exclude: `/api/lm/${modelType}/exclude`,
+ rename: `/api/lm/${modelType}/rename`,
+ save: `/api/lm/${modelType}/save-metadata`,
// Bulk operations
- bulkDelete: `/api/${modelType}/bulk-delete`,
+ bulkDelete: `/api/lm/${modelType}/bulk-delete`,
// Tag operations
- addTags: `/api/${modelType}/add-tags`,
+ addTags: `/api/lm/${modelType}/add-tags`,
// Move operations (now common for all model types that support move)
- moveModel: `/api/${modelType}/move_model`,
- moveBulk: `/api/${modelType}/move_models_bulk`,
+ moveModel: `/api/lm/${modelType}/move_model`,
+ moveBulk: `/api/lm/${modelType}/move_models_bulk`,
// CivitAI integration
- fetchCivitai: `/api/${modelType}/fetch-civitai`,
- fetchAllCivitai: `/api/${modelType}/fetch-all-civitai`,
- relinkCivitai: `/api/${modelType}/relink-civitai`,
- civitaiVersions: `/api/${modelType}/civitai/versions`,
+ fetchCivitai: `/api/lm/${modelType}/fetch-civitai`,
+ fetchAllCivitai: `/api/lm/${modelType}/fetch-all-civitai`,
+ relinkCivitai: `/api/lm/${modelType}/relink-civitai`,
+ civitaiVersions: `/api/lm/${modelType}/civitai/versions`,
// Preview management
- replacePreview: `/api/${modelType}/replace-preview`,
+ replacePreview: `/api/lm/${modelType}/replace-preview`,
// Query operations
- scan: `/api/${modelType}/scan`,
- topTags: `/api/${modelType}/top-tags`,
- baseModels: `/api/${modelType}/base-models`,
- roots: `/api/${modelType}/roots`,
- folders: `/api/${modelType}/folders`,
- folderTree: `/api/${modelType}/folder-tree`,
- unifiedFolderTree: `/api/${modelType}/unified-folder-tree`,
- duplicates: `/api/${modelType}/find-duplicates`,
- conflicts: `/api/${modelType}/find-filename-conflicts`,
- verify: `/api/${modelType}/verify-duplicates`,
- metadata: `/api/${modelType}/metadata`,
- modelDescription: `/api/${modelType}/model-description`,
+ scan: `/api/lm/${modelType}/scan`,
+ topTags: `/api/lm/${modelType}/top-tags`,
+ baseModels: `/api/lm/${modelType}/base-models`,
+ roots: `/api/lm/${modelType}/roots`,
+ folders: `/api/lm/${modelType}/folders`,
+ folderTree: `/api/lm/${modelType}/folder-tree`,
+ unifiedFolderTree: `/api/lm/${modelType}/unified-folder-tree`,
+ duplicates: `/api/lm/${modelType}/find-duplicates`,
+ conflicts: `/api/lm/${modelType}/find-filename-conflicts`,
+ verify: `/api/lm/${modelType}/verify-duplicates`,
+ metadata: `/api/lm/${modelType}/metadata`,
+ modelDescription: `/api/lm/${modelType}/model-description`,
// Auto-organize operations
- autoOrganize: `/api/${modelType}/auto-organize`,
- autoOrganizeProgress: `/api/${modelType}/auto-organize-progress`,
+ autoOrganize: `/api/lm/${modelType}/auto-organize`,
+ autoOrganizeProgress: `/api/lm/${modelType}/auto-organize-progress`,
// Model-specific endpoints (will be merged with specific configs)
specific: {}
@@ -108,24 +108,24 @@ export function getApiEndpoints(modelType) {
*/
export const MODEL_SPECIFIC_ENDPOINTS = {
[MODEL_TYPES.LORA]: {
- letterCounts: `/api/${MODEL_TYPES.LORA}/letter-counts`,
- notes: `/api/${MODEL_TYPES.LORA}/get-notes`,
- triggerWords: `/api/${MODEL_TYPES.LORA}/get-trigger-words`,
- previewUrl: `/api/${MODEL_TYPES.LORA}/preview-url`,
- civitaiUrl: `/api/${MODEL_TYPES.LORA}/civitai-url`,
- metadata: `/api/${MODEL_TYPES.LORA}/metadata`,
- getTriggerWordsPost: `/api/${MODEL_TYPES.LORA}/get_trigger_words`,
- civitaiModelByVersion: `/api/${MODEL_TYPES.LORA}/civitai/model/version`,
- civitaiModelByHash: `/api/${MODEL_TYPES.LORA}/civitai/model/hash`,
+ letterCounts: `/api/lm/${MODEL_TYPES.LORA}/letter-counts`,
+ notes: `/api/lm/${MODEL_TYPES.LORA}/get-notes`,
+ triggerWords: `/api/lm/${MODEL_TYPES.LORA}/get-trigger-words`,
+ previewUrl: `/api/lm/${MODEL_TYPES.LORA}/preview-url`,
+ civitaiUrl: `/api/lm/${MODEL_TYPES.LORA}/civitai-url`,
+ metadata: `/api/lm/${MODEL_TYPES.LORA}/metadata`,
+ getTriggerWordsPost: `/api/lm/${MODEL_TYPES.LORA}/get_trigger_words`,
+ civitaiModelByVersion: `/api/lm/${MODEL_TYPES.LORA}/civitai/model/version`,
+ civitaiModelByHash: `/api/lm/${MODEL_TYPES.LORA}/civitai/model/hash`,
},
[MODEL_TYPES.CHECKPOINT]: {
- info: `/api/${MODEL_TYPES.CHECKPOINT}/info`,
- checkpoints_roots: `/api/${MODEL_TYPES.CHECKPOINT}/checkpoints_roots`,
- unet_roots: `/api/${MODEL_TYPES.CHECKPOINT}/unet_roots`,
- metadata: `/api/${MODEL_TYPES.CHECKPOINT}/metadata`,
+ info: `/api/lm/${MODEL_TYPES.CHECKPOINT}/info`,
+ checkpoints_roots: `/api/lm/${MODEL_TYPES.CHECKPOINT}/checkpoints_roots`,
+ unet_roots: `/api/lm/${MODEL_TYPES.CHECKPOINT}/unet_roots`,
+ metadata: `/api/lm/${MODEL_TYPES.CHECKPOINT}/metadata`,
},
[MODEL_TYPES.EMBEDDING]: {
- metadata: `/api/${MODEL_TYPES.EMBEDDING}/metadata`,
+ metadata: `/api/lm/${MODEL_TYPES.EMBEDDING}/metadata`,
}
};
@@ -173,11 +173,11 @@ export function getCurrentModelType(explicitType = null) {
// Download API endpoints (shared across all model types)
export const DOWNLOAD_ENDPOINTS = {
- download: '/api/download-model',
- downloadGet: '/api/download-model-get',
- cancelGet: '/api/cancel-download-get',
- progress: '/api/download-progress',
- exampleImages: '/api/force-download-example-images' // New endpoint for downloading example images
+ download: '/api/lm/download-model',
+ downloadGet: '/api/lm/download-model-get',
+ cancelGet: '/api/lm/cancel-download-get',
+ progress: '/api/lm/download-progress',
+ exampleImages: '/api/lm/force-download-example-images' // New endpoint for downloading example images
};
// WebSocket endpoints
diff --git a/static/js/api/baseModelApi.js b/static/js/api/baseModelApi.js
index 2a7c058d..2d24efe4 100644
--- a/static/js/api/baseModelApi.js
+++ b/static/js/api/baseModelApi.js
@@ -538,13 +538,13 @@ export class BaseModelApiClient {
completionMessage = translate('toast.api.bulkMetadataCompletePartial', { success: successCount, total: totalItems, type: this.apiConfig.config.displayName }, `Refreshed ${successCount} of ${totalItems} ${this.apiConfig.config.displayName}s`);
showToast('toast.api.bulkMetadataCompletePartial', { success: successCount, total: totalItems, type: this.apiConfig.config.displayName }, 'warning');
- if (failedItems.length > 0) {
- const failureMessage = failedItems.length <= 3
- ? failedItems.map(item => `${item.fileName}: ${item.error}`).join('\n')
- : failedItems.slice(0, 3).map(item => `${item.fileName}: ${item.error}`).join('\n') +
- `\n(and ${failedItems.length - 3} more)`;
- showToast('toast.api.bulkMetadataFailureDetails', { failures: failureMessage }, 'warning', 6000);
- }
+ // if (failedItems.length > 0) {
+ // const failureMessage = failedItems.length <= 3
+ // ? failedItems.map(item => `${item.fileName}: ${item.error}`).join('\n')
+ // : failedItems.slice(0, 3).map(item => `${item.fileName}: ${item.error}`).join('\n') +
+ // `\n(and ${failedItems.length - 3} more)`;
+ // showToast('toast.api.bulkMetadataFailureDetails', { failures: failureMessage }, 'warning', 6000);
+ // }
} else {
completionMessage = translate('toast.api.bulkMetadataCompleteNone', { type: this.apiConfig.config.displayName }, `Failed to refresh metadata for any ${this.apiConfig.config.displayName}s`);
showToast('toast.api.bulkMetadataCompleteNone', { type: this.apiConfig.config.displayName }, 'error');
@@ -938,14 +938,14 @@ export class BaseModelApiClient {
ws.onerror = reject;
});
- // Get the output directory from storage
- const outputDir = getStorageItem('example_images_path', '');
+ // Get the output directory from state
+ const outputDir = state.global?.settings?.example_images_path || '';
if (!outputDir) {
throw new Error('Please set the example images path in the settings first.');
}
// Determine optimize setting
- const optimize = state.global?.settings?.optimizeExampleImages ?? true;
+ const optimize = state.global?.settings?.optimize_example_images ?? true;
// Make the API request to start the download process
const response = await fetch(DOWNLOAD_ENDPOINTS.exampleImages, {
diff --git a/static/js/api/recipeApi.js b/static/js/api/recipeApi.js
index fec0d02f..3b912905 100644
--- a/static/js/api/recipeApi.js
+++ b/static/js/api/recipeApi.js
@@ -21,7 +21,7 @@ export async function fetchRecipesPage(page = 1, pageSize = 100) {
// If we have a specific recipe ID to load
if (pageState.customFilter?.active && pageState.customFilter?.recipeId) {
// Special case: load specific recipe
- const response = await fetch(`/api/recipe/${pageState.customFilter.recipeId}`);
+ const response = await fetch(`/api/lm/recipe/${pageState.customFilter.recipeId}`);
if (!response.ok) {
throw new Error(`Failed to load recipe: ${response.statusText}`);
@@ -72,7 +72,7 @@ export async function fetchRecipesPage(page = 1, pageSize = 100) {
}
// Fetch recipes
- const response = await fetch(`/api/recipes?${params.toString()}`);
+ const response = await fetch(`/api/lm/recipes?${params.toString()}`);
if (!response.ok) {
throw new Error(`Failed to load recipes: ${response.statusText}`);
@@ -207,7 +207,7 @@ export async function refreshRecipes() {
state.loadingManager.showSimpleLoading('Refreshing recipes...');
// Call the API endpoint to rebuild the recipe cache
- const response = await fetch('/api/recipes/scan');
+ const response = await fetch('/api/lm/recipes/scan');
if (!response.ok) {
const data = await response.json();
@@ -274,7 +274,7 @@ export async function updateRecipeMetadata(filePath, updates) {
const basename = filePath.split('/').pop().split('\\').pop();
const recipeId = basename.substring(0, basename.lastIndexOf('.'));
- const response = await fetch(`/api/recipe/${recipeId}/update`, {
+ const response = await fetch(`/api/lm/recipe/${recipeId}/update`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
diff --git a/static/js/checkpoints.js b/static/js/checkpoints.js
index a26099d3..de341008 100644
--- a/static/js/checkpoints.js
+++ b/static/js/checkpoints.js
@@ -5,7 +5,7 @@ import { ModelDuplicatesManager } from './components/ModelDuplicatesManager.js';
import { MODEL_TYPES } from './api/apiConfig.js';
// Initialize the Checkpoints page
-class CheckpointsPageManager {
+export class CheckpointsPageManager {
constructor() {
// Initialize page controls
this.pageControls = createPageControls(MODEL_TYPES.CHECKPOINT);
@@ -31,17 +31,21 @@ class CheckpointsPageManager {
async initialize() {
// Initialize common page features (including context menus)
appCore.initializePageFeatures();
-
+
console.log('Checkpoints Manager initialized');
}
}
-// Initialize everything when DOM is ready
-document.addEventListener('DOMContentLoaded', async () => {
+export async function initializeCheckpointsPage() {
// Initialize core application
await appCore.initialize();
-
+
// Initialize checkpoints page
const checkpointsPage = new CheckpointsPageManager();
await checkpointsPage.initialize();
-});
\ No newline at end of file
+
+ return checkpointsPage;
+}
+
+// Initialize everything when DOM is ready
+document.addEventListener('DOMContentLoaded', initializeCheckpointsPage);
\ No newline at end of file
diff --git a/static/js/components/ContextMenu/GlobalContextMenu.js b/static/js/components/ContextMenu/GlobalContextMenu.js
new file mode 100644
index 00000000..deeacff9
--- /dev/null
+++ b/static/js/components/ContextMenu/GlobalContextMenu.js
@@ -0,0 +1,104 @@
+import { BaseContextMenu } from './BaseContextMenu.js';
+import { showToast } from '../../utils/uiHelpers.js';
+import { state } from '../../state/index.js';
+
+export class GlobalContextMenu extends BaseContextMenu {
+ constructor() {
+ super('globalContextMenu');
+ this._cleanupInProgress = false;
+ }
+
+ showMenu(x, y, origin = null) {
+ const contextOrigin = origin || { type: 'global' };
+ super.showMenu(x, y, contextOrigin);
+ }
+
+ handleMenuAction(action, menuItem) {
+ switch (action) {
+ case 'cleanup-example-images-folders':
+ this.cleanupExampleImagesFolders(menuItem).catch((error) => {
+ console.error('Failed to trigger example images cleanup:', error);
+ });
+ break;
+ case 'download-example-images':
+ this.downloadExampleImages(menuItem).catch((error) => {
+ console.error('Failed to trigger example images download:', error);
+ });
+ break;
+ default:
+ console.warn(`Unhandled global context menu action: ${action}`);
+ break;
+ }
+ }
+
+ async downloadExampleImages(menuItem) {
+ const exampleImagesManager = window.exampleImagesManager;
+
+ if (!exampleImagesManager) {
+ showToast('globalContextMenu.downloadExampleImages.unavailable', {}, 'error');
+ return;
+ }
+
+ const downloadPath = state?.global?.settings?.example_images_path;
+ if (!downloadPath) {
+ showToast('globalContextMenu.downloadExampleImages.missingPath', {}, 'warning');
+ return;
+ }
+
+ menuItem?.classList.add('disabled');
+
+ try {
+ await exampleImagesManager.handleDownloadButton();
+ } finally {
+ menuItem?.classList.remove('disabled');
+ }
+ }
+
+ async cleanupExampleImagesFolders(menuItem) {
+ if (this._cleanupInProgress) {
+ return;
+ }
+
+ this._cleanupInProgress = true;
+ menuItem?.classList.add('disabled');
+
+ try {
+ const response = await fetch('/api/lm/cleanup-example-image-folders', {
+ method: 'POST',
+ });
+
+ let payload;
+ try {
+ payload = await response.json();
+ } catch (parseError) {
+ payload = { error: 'Unexpected response format.' };
+ }
+
+ if (response.ok && (payload.success || payload.partial_success)) {
+ const movedTotal = payload.moved_total || 0;
+
+ if (movedTotal > 0) {
+ showToast('globalContextMenu.cleanupExampleImages.success', { count: movedTotal }, 'success');
+ } else {
+ showToast('globalContextMenu.cleanupExampleImages.none', {}, 'info');
+ }
+
+ if (payload.partial_success) {
+ showToast(
+ 'globalContextMenu.cleanupExampleImages.partial',
+ { failures: payload.move_failures ?? 0 },
+ 'warning',
+ );
+ }
+ } else {
+ const message = payload?.error || 'Unknown error';
+ showToast('globalContextMenu.cleanupExampleImages.error', { message }, 'error');
+ }
+ } catch (error) {
+ showToast('globalContextMenu.cleanupExampleImages.error', { message: error.message || 'Unknown error' }, 'error');
+ } finally {
+ this._cleanupInProgress = false;
+ menuItem?.classList.remove('disabled');
+ }
+ }
+}
diff --git a/static/js/components/ContextMenu/LoraContextMenu.js b/static/js/components/ContextMenu/LoraContextMenu.js
index ee7634b6..4b72cafa 100644
--- a/static/js/components/ContextMenu/LoraContextMenu.js
+++ b/static/js/components/ContextMenu/LoraContextMenu.js
@@ -1,7 +1,7 @@
import { BaseContextMenu } from './BaseContextMenu.js';
import { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
import { getModelApiClient, resetAndReload } from '../../api/modelApiFactory.js';
-import { copyLoraSyntax, sendLoraToWorkflow } from '../../utils/uiHelpers.js';
+import { copyLoraSyntax, sendLoraToWorkflow, buildLoraSyntax } from '../../utils/uiHelpers.js';
import { showExcludeModal, showDeleteModal } from '../../utils/modalUtils.js';
import { moveManager } from '../../managers/MoveManager.js';
@@ -70,9 +70,8 @@ export class LoraContextMenu extends BaseContextMenu {
sendLoraToWorkflow(replaceMode) {
const card = this.currentCard;
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
- const strength = usageTips.strength || 1;
- const loraSyntax = ``;
-
+ const loraSyntax = buildLoraSyntax(card.dataset.file_name, usageTips);
+
sendLoraToWorkflow(loraSyntax, replaceMode, 'lora');
}
}
diff --git a/static/js/components/ContextMenu/ModelContextMenuMixin.js b/static/js/components/ContextMenu/ModelContextMenuMixin.js
index cd376dd1..3c461a9a 100644
--- a/static/js/components/ContextMenu/ModelContextMenuMixin.js
+++ b/static/js/components/ContextMenu/ModelContextMenuMixin.js
@@ -125,8 +125,8 @@ export const ModelContextMenuMixin = {
state.loadingManager.showSimpleLoading('Re-linking to Civitai...');
const endpoint = this.modelType === 'checkpoint' ?
- '/api/checkpoints/relink-civitai' :
- '/api/loras/relink-civitai';
+ '/api/lm/checkpoints/relink-civitai' :
+ '/api/lm/loras/relink-civitai';
const response = await fetch(endpoint, {
method: 'POST',
diff --git a/static/js/components/ContextMenu/RecipeContextMenu.js b/static/js/components/ContextMenu/RecipeContextMenu.js
index 351263c7..bb8b8e69 100644
--- a/static/js/components/ContextMenu/RecipeContextMenu.js
+++ b/static/js/components/ContextMenu/RecipeContextMenu.js
@@ -103,7 +103,7 @@ export class RecipeContextMenu extends BaseContextMenu {
return;
}
- fetch(`/api/recipe/${recipeId}/syntax`)
+ fetch(`/api/lm/recipe/${recipeId}/syntax`)
.then(response => response.json())
.then(data => {
if (data.success && data.syntax) {
@@ -126,7 +126,7 @@ export class RecipeContextMenu extends BaseContextMenu {
return;
}
- fetch(`/api/recipe/${recipeId}/syntax`)
+ fetch(`/api/lm/recipe/${recipeId}/syntax`)
.then(response => response.json())
.then(data => {
if (data.success && data.syntax) {
@@ -149,7 +149,7 @@ export class RecipeContextMenu extends BaseContextMenu {
}
// First get the recipe details to access its LoRAs
- fetch(`/api/recipe/${recipeId}`)
+ fetch(`/api/lm/recipe/${recipeId}`)
.then(response => response.json())
.then(recipe => {
// Clear any previous filters first
@@ -189,7 +189,7 @@ export class RecipeContextMenu extends BaseContextMenu {
try {
// First get the recipe details
- const response = await fetch(`/api/recipe/${recipeId}`);
+ const response = await fetch(`/api/lm/recipe/${recipeId}`);
const recipe = await response.json();
// Get missing LoRAs
@@ -209,9 +209,9 @@ export class RecipeContextMenu extends BaseContextMenu {
// Determine which endpoint to use based on available data
if (lora.modelVersionId) {
- endpoint = `/api/loras/civitai/model/version/${lora.modelVersionId}`;
+ endpoint = `/api/lm/loras/civitai/model/version/${lora.modelVersionId}`;
} else if (lora.hash) {
- endpoint = `/api/loras/civitai/model/hash/${lora.hash}`;
+ endpoint = `/api/lm/loras/civitai/model/hash/${lora.hash}`;
} else {
console.error("Missing both hash and modelVersionId for lora:", lora);
return null;
diff --git a/static/js/components/ContextMenu/index.js b/static/js/components/ContextMenu/index.js
index b6be6ccf..7d01c0f6 100644
--- a/static/js/components/ContextMenu/index.js
+++ b/static/js/components/ContextMenu/index.js
@@ -2,12 +2,14 @@ export { LoraContextMenu } from './LoraContextMenu.js';
export { RecipeContextMenu } from './RecipeContextMenu.js';
export { CheckpointContextMenu } from './CheckpointContextMenu.js';
export { EmbeddingContextMenu } from './EmbeddingContextMenu.js';
+export { GlobalContextMenu } from './GlobalContextMenu.js';
export { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
import { LoraContextMenu } from './LoraContextMenu.js';
import { RecipeContextMenu } from './RecipeContextMenu.js';
import { CheckpointContextMenu } from './CheckpointContextMenu.js';
import { EmbeddingContextMenu } from './EmbeddingContextMenu.js';
+import { GlobalContextMenu } from './GlobalContextMenu.js';
// Factory method to create page-specific context menu instances
export function createPageContextMenu(pageType) {
@@ -23,4 +25,8 @@ export function createPageContextMenu(pageType) {
default:
return null;
}
+}
+
+export function createGlobalContextMenu() {
+ return new GlobalContextMenu();
}
\ No newline at end of file
diff --git a/static/js/components/DuplicatesManager.js b/static/js/components/DuplicatesManager.js
index f49a2d02..a2360c06 100644
--- a/static/js/components/DuplicatesManager.js
+++ b/static/js/components/DuplicatesManager.js
@@ -13,7 +13,7 @@ export class DuplicatesManager {
async findDuplicates() {
try {
- const response = await fetch('/api/recipes/find-duplicates');
+ const response = await fetch('/api/lm/recipes/find-duplicates');
if (!response.ok) {
throw new Error('Failed to find duplicates');
}
@@ -354,7 +354,7 @@ export class DuplicatesManager {
const recipeIds = Array.from(this.selectedForDeletion);
// Call API to bulk delete
- const response = await fetch('/api/recipes/bulk-delete', {
+ const response = await fetch('/api/lm/recipes/bulk-delete', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
diff --git a/static/js/components/ModelDuplicatesManager.js b/static/js/components/ModelDuplicatesManager.js
index c8879ce9..33df3779 100644
--- a/static/js/components/ModelDuplicatesManager.js
+++ b/static/js/components/ModelDuplicatesManager.js
@@ -48,7 +48,7 @@ export class ModelDuplicatesManager {
// Method to check for duplicates count using existing endpoint
async checkDuplicatesCount() {
try {
- const endpoint = `/api/${this.modelType}/find-duplicates`;
+ const endpoint = `/api/lm/${this.modelType}/find-duplicates`;
const response = await fetch(endpoint);
if (!response.ok) {
@@ -104,7 +104,7 @@ export class ModelDuplicatesManager {
async findDuplicates() {
try {
// Determine API endpoint based on model type
- const endpoint = `/api/${this.modelType}/find-duplicates`;
+ const endpoint = `/api/lm/${this.modelType}/find-duplicates`;
const response = await fetch(endpoint);
if (!response.ok) {
@@ -623,7 +623,7 @@ export class ModelDuplicatesManager {
const filePaths = Array.from(this.selectedForDeletion);
// Call API to bulk delete
- const response = await fetch(`/api/${this.modelType}/bulk-delete`, {
+ const response = await fetch(`/api/lm/${this.modelType}/bulk-delete`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -648,7 +648,7 @@ export class ModelDuplicatesManager {
// Check if there are still duplicates
try {
- const endpoint = `/api/${this.modelType}/find-duplicates`;
+ const endpoint = `/api/lm/${this.modelType}/find-duplicates`;
const dupResponse = await fetch(endpoint);
if (!dupResponse.ok) {
@@ -756,7 +756,7 @@ export class ModelDuplicatesManager {
const filePaths = group.models.map(model => model.file_path);
// Make API request to verify hashes
- const response = await fetch(`/api/${this.modelType}/verify-duplicates`, {
+ const response = await fetch(`/api/lm/${this.modelType}/verify-duplicates`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
diff --git a/static/js/components/RecipeCard.js b/static/js/components/RecipeCard.js
index 0eaa68ad..94db918d 100644
--- a/static/js/components/RecipeCard.js
+++ b/static/js/components/RecipeCard.js
@@ -46,7 +46,7 @@ class RecipeCard {
// NSFW blur logic - similar to LoraCard
const nsfwLevel = this.recipe.preview_nsfw_level !== undefined ? this.recipe.preview_nsfw_level : 0;
- const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
+ const shouldBlur = state.settings.blur_mature_content && nsfwLevel > NSFW_LEVELS.PG13;
if (shouldBlur) {
card.classList.add('nsfw-content');
@@ -203,7 +203,7 @@ class RecipeCard {
return;
}
- fetch(`/api/recipe/${recipeId}/syntax`)
+ fetch(`/api/lm/recipe/${recipeId}/syntax`)
.then(response => response.json())
.then(data => {
if (data.success && data.syntax) {
@@ -299,7 +299,7 @@ class RecipeCard {
deleteBtn.disabled = true;
// Call API to delete the recipe
- fetch(`/api/recipe/${recipeId}`, {
+ fetch(`/api/lm/recipe/${recipeId}`, {
method: 'DELETE',
headers: {
'Content-Type': 'application/json'
@@ -341,7 +341,7 @@ class RecipeCard {
showToast('toast.recipes.preparingForSharing', {}, 'info');
// Call the API to process the image with metadata
- fetch(`/api/recipe/${recipeId}/share`)
+ fetch(`/api/lm/recipe/${recipeId}/share`)
.then(response => {
if (!response.ok) {
throw new Error('Failed to prepare recipe for sharing');
diff --git a/static/js/components/RecipeModal.js b/static/js/components/RecipeModal.js
index ffbfa2aa..68d61eb9 100644
--- a/static/js/components/RecipeModal.js
+++ b/static/js/components/RecipeModal.js
@@ -784,7 +784,7 @@ class RecipeModal {
try {
// Fetch recipe syntax from backend
- const response = await fetch(`/api/recipe/${this.recipeId}/syntax`);
+ const response = await fetch(`/api/lm/recipe/${this.recipeId}/syntax`);
if (!response.ok) {
throw new Error(`Failed to get recipe syntax: ${response.statusText}`);
@@ -830,9 +830,9 @@ class RecipeModal {
// Determine which endpoint to use based on available data
if (lora.modelVersionId) {
- endpoint = `/api/loras/civitai/model/version/${lora.modelVersionId}`;
+ endpoint = `/api/lm/loras/civitai/model/version/${lora.modelVersionId}`;
} else if (lora.hash) {
- endpoint = `/api/loras/civitai/model/hash/${lora.hash}`;
+ endpoint = `/api/lm/loras/civitai/model/hash/${lora.hash}`;
} else {
console.error("Missing both hash and modelVersionId for lora:", lora);
return null;
@@ -1003,7 +1003,7 @@ class RecipeModal {
state.loadingManager.showSimpleLoading('Reconnecting LoRA...');
// Call API to reconnect the LoRA
- const response = await fetch('/api/recipe/lora/reconnect', {
+ const response = await fetch('/api/lm/recipe/lora/reconnect', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
diff --git a/static/js/components/alphabet/AlphabetBar.js b/static/js/components/alphabet/AlphabetBar.js
index 82113758..2d358156 100644
--- a/static/js/components/alphabet/AlphabetBar.js
+++ b/static/js/components/alphabet/AlphabetBar.js
@@ -46,7 +46,7 @@ export class AlphabetBar {
*/
async fetchLetterCounts() {
try {
- const response = await fetch('/api/loras/letter-counts');
+ const response = await fetch('/api/lm/loras/letter-counts');
if (!response.ok) {
throw new Error(`Failed to fetch letter counts: ${response.statusText}`);
diff --git a/static/js/components/initialization.js b/static/js/components/initialization.js
index e7b6818f..9a547a86 100644
--- a/static/js/components/initialization.js
+++ b/static/js/components/initialization.js
@@ -169,7 +169,7 @@ class InitializationManager {
*/
pollProgress() {
const checkProgress = () => {
- fetch('/api/init-status')
+ fetch('/api/lm/init-status')
.then(response => response.json())
.then(data => {
this.handleProgressUpdate(data);
diff --git a/static/js/components/shared/ModelCard.js b/static/js/components/shared/ModelCard.js
index 16bdf45d..17226bc0 100644
--- a/static/js/components/shared/ModelCard.js
+++ b/static/js/components/shared/ModelCard.js
@@ -1,4 +1,4 @@
-import { showToast, openCivitai, copyToClipboard, copyLoraSyntax, sendLoraToWorkflow, openExampleImagesFolder } from '../../utils/uiHelpers.js';
+import { showToast, openCivitai, copyToClipboard, copyLoraSyntax, sendLoraToWorkflow, openExampleImagesFolder, buildLoraSyntax } from '../../utils/uiHelpers.js';
import { state, getCurrentPageState } from '../../state/index.js';
import { showModelModal } from './ModelModal.js';
import { toggleShowcase } from './showcase/ShowcaseView.js';
@@ -155,8 +155,7 @@ async function toggleFavorite(card) {
function handleSendToWorkflow(card, replaceMode, modelType) {
if (modelType === MODEL_TYPES.LORA) {
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
- const strength = usageTips.strength || 1;
- const loraSyntax = ``;
+ const loraSyntax = buildLoraSyntax(card.dataset.file_name, usageTips);
sendLoraToWorkflow(loraSyntax, replaceMode, 'lora');
} else {
// Checkpoint send functionality - to be implemented
@@ -186,7 +185,7 @@ async function handleExampleImagesAccess(card, modelType) {
const modelHash = card.dataset.sha256;
try {
- const response = await fetch(`/api/has-example-images?model_hash=${modelHash}`);
+ const response = await fetch(`/api/lm/has-example-images?model_hash=${modelHash}`);
const data = await response.json();
if (data.has_images) {
@@ -216,13 +215,6 @@ function handleCardClick(card, modelType) {
}
async function showModelModalFromCard(card, modelType) {
- // Get the appropriate preview versions map
- const previewVersionsKey = modelType;
- const previewVersions = state.pages[previewVersionsKey]?.previewVersions || new Map();
- const version = previewVersions.get(card.dataset.filepath);
- const previewUrl = card.dataset.preview_url || '/loras_static/images/no-preview.png';
- const versionedPreviewUrl = version ? `${previewUrl}?t=${version}` : previewUrl;
-
// Create model metadata object
const modelMeta = {
sha256: card.dataset.sha256,
@@ -235,7 +227,6 @@ async function showModelModalFromCard(card, modelType) {
from_civitai: card.dataset.from_civitai === 'true',
base_model: card.dataset.base_model,
notes: card.dataset.notes || '',
- preview_url: versionedPreviewUrl,
favorite: card.dataset.favorite === 'true',
// Parse civitai metadata from the card's dataset
civitai: JSON.parse(card.dataset.meta || '{}'),
@@ -414,7 +405,7 @@ export function createModelCard(model, modelType) {
card.dataset.nsfwLevel = nsfwLevel;
// Determine if the preview should be blurred based on NSFW level and user settings
- const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
+ const shouldBlur = state.settings.blur_mature_content && nsfwLevel > NSFW_LEVELS.PG13;
if (shouldBlur) {
card.classList.add('nsfw-content');
}
@@ -442,9 +433,20 @@ export function createModelCard(model, modelType) {
}
// Check if autoplayOnHover is enabled for video previews
- const autoplayOnHover = state.global?.settings?.autoplayOnHover || false;
+ const autoplayOnHover = state.global?.settings?.autoplay_on_hover || false;
const isVideo = previewUrl.endsWith('.mp4');
- const videoAttrs = autoplayOnHover ? 'controls muted loop' : 'controls autoplay muted loop';
+ const videoAttrs = [
+ 'controls',
+ 'muted',
+ 'loop',
+ 'playsinline',
+ 'preload="none"',
+ `data-src="${versionedPreviewUrl}"`
+ ];
+
+ if (!autoplayOnHover) {
+ videoAttrs.push('data-autoplay="true"');
+ }
// Get favorite status from model data
const isFavorite = model.favorite === true;
@@ -482,9 +484,7 @@ export function createModelCard(model, modelType) {
card.innerHTML = `
${isVideo ?
- `
-
- ` :
+ `
` :
`
`
}
+
+
diff --git a/templates/components/modals/settings_modal.html b/templates/components/modals/settings_modal.html
index 81b31228..b38196b1 100644
--- a/templates/components/modals/settings_modal.html
+++ b/templates/components/modals/settings_modal.html
@@ -255,6 +255,7 @@
{{ t('settings.downloadPathTemplates.templateOptions.baseModelFirstTag') }}
{{ t('settings.downloadPathTemplates.templateOptions.baseModelAuthor') }}
{{ t('settings.downloadPathTemplates.templateOptions.authorFirstTag') }}
+ {{ t('settings.downloadPathTemplates.templateOptions.baseModelAuthorFirstTag') }}
{{ t('settings.downloadPathTemplates.templateOptions.customTemplate') }}
@@ -281,6 +282,7 @@
{{ t('settings.downloadPathTemplates.templateOptions.baseModelFirstTag') }}
{{ t('settings.downloadPathTemplates.templateOptions.baseModelAuthor') }}
{{ t('settings.downloadPathTemplates.templateOptions.authorFirstTag') }}
+ {{ t('settings.downloadPathTemplates.templateOptions.baseModelAuthorFirstTag') }}
{{ t('settings.downloadPathTemplates.templateOptions.customTemplate') }}
@@ -307,6 +309,7 @@
{{ t('settings.downloadPathTemplates.templateOptions.baseModelFirstTag') }}
{{ t('settings.downloadPathTemplates.templateOptions.baseModelAuthor') }}
{{ t('settings.downloadPathTemplates.templateOptions.authorFirstTag') }}
+ {{ t('settings.downloadPathTemplates.templateOptions.baseModelAuthorFirstTag') }}
{{ t('settings.downloadPathTemplates.templateOptions.customTemplate') }}
@@ -446,6 +449,129 @@
+
+
+
{{ t('settings.sections.proxySettings') }}
+
+
+
+
+ {{ t('settings.proxySettings.enableProxy') }}
+
+
+
+
+
+
+
+
+
+ {{ t('settings.proxySettings.enableProxyHelp') }}
+
+
+
+
+
+
+
+ {{ t('settings.proxySettings.proxyType') }}
+
+
+
+ HTTP
+ HTTPS
+ SOCKS4
+ SOCKS5
+
+
+
+
+ {{ t('settings.proxySettings.proxyTypeHelp') }}
+
+
+
+
+
+
+ {{ t('settings.proxySettings.proxyHost') }}
+
+
+
+
+ {{ t('settings.proxySettings.proxyHostHelp') }}
+
+
+
+
+
+
+ {{ t('settings.proxySettings.proxyPort') }}
+
+
+
+
+ {{ t('settings.proxySettings.proxyPortHelp') }}
+
+
+
+
+
+
+ {{ t('settings.proxySettings.proxyUsername') }}
+
+
+
+
+ {{ t('settings.proxySettings.proxyUsernameHelp') }}
+
+
+
+
+
+
+ {{ t('settings.proxySettings.proxyPassword') }}
+
+
+
+
+ {{ t('settings.proxySettings.proxyPasswordHelp') }}
+
+
+
+
+
{{ t('settings.sections.misc') }}
@@ -467,6 +593,7 @@
+
diff --git a/test_i18n.py b/test_i18n.py
deleted file mode 100644
index f05ed6c9..00000000
--- a/test_i18n.py
+++ /dev/null
@@ -1,998 +0,0 @@
-#!/usr/bin/env python3
-"""
-Test script to verify the updated i18n system works correctly.
-This tests both JavaScript loading and Python server-side functionality.
-"""
-
-import os
-import sys
-import json
-import re
-import glob
-from typing import Set, Dict, List, Tuple, Any
-
-# Add the parent directory to the path so we can import the modules
-sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
-
-def test_json_files_exist():
- """Test that all JSON locale files exist and are valid JSON."""
- print("Testing JSON locale files...")
- return test_json_structure_validation()
-
-def test_locale_files_structural_consistency():
- """Test that all locale files have identical structure, line counts, and formatting."""
- print("\nTesting locale files structural consistency...")
-
- locales_dir = os.path.join(os.path.dirname(__file__), 'locales')
- if not os.path.exists(locales_dir):
- print("❌ Locales directory does not exist!")
- return False
-
- # Get all locale files
- locale_files = []
- for file in os.listdir(locales_dir):
- if file.endswith('.json'):
- locale_files.append(file)
-
- if not locale_files:
- print("❌ No locale files found!")
- return False
-
- # Use en.json as the reference
- reference_file = 'en.json'
- if reference_file not in locale_files:
- print(f"❌ Reference file {reference_file} not found!")
- return False
-
- locale_files.remove(reference_file)
- locale_files.insert(0, reference_file) # Put reference first
-
- success = True
-
- # Load and parse the reference file
- reference_path = os.path.join(locales_dir, reference_file)
- try:
- with open(reference_path, 'r', encoding='utf-8') as f:
- reference_lines = f.readlines()
- reference_content = ''.join(reference_lines)
-
- reference_data = json.loads(reference_content)
- reference_structure = get_json_structure(reference_data)
-
- print(f"📋 Reference file {reference_file}:")
- print(f" Lines: {len(reference_lines)}")
- print(f" Keys: {len(get_all_translation_keys(reference_data))}")
-
- except Exception as e:
- print(f"❌ Error reading reference file {reference_file}: {e}")
- return False
-
- # Compare each locale file with the reference
- for locale_file in locale_files[1:]: # Skip reference file
- locale_path = os.path.join(locales_dir, locale_file)
- locale_name = locale_file.replace('.json', '')
-
- try:
- with open(locale_path, 'r', encoding='utf-8') as f:
- locale_lines = f.readlines()
- locale_content = ''.join(locale_lines)
-
- locale_data = json.loads(locale_content)
- locale_structure = get_json_structure(locale_data)
-
- # Test 1: Line count consistency
- if len(locale_lines) != len(reference_lines):
- print(f"❌ {locale_name}: Line count mismatch!")
- print(f" Reference: {len(reference_lines)} lines")
- print(f" {locale_name}: {len(locale_lines)} lines")
- success = False
- continue
-
- # Test 2: Structural consistency (key order and nesting)
- structure_issues = compare_json_structures(reference_structure, locale_structure)
- if structure_issues:
- print(f"❌ {locale_name}: Structure mismatch!")
- for issue in structure_issues[:5]: # Show first 5 issues
- print(f" - {issue}")
- if len(structure_issues) > 5:
- print(f" ... and {len(structure_issues) - 5} more issues")
- success = False
- continue
-
- # Test 3: Line-by-line format consistency (excluding translation values)
- format_issues = compare_line_formats(reference_lines, locale_lines, locale_name)
- if format_issues:
- print(f"❌ {locale_name}: Format mismatch!")
- for issue in format_issues[:5]: # Show first 5 issues
- print(f" - {issue}")
- if len(format_issues) > 5:
- print(f" ... and {len(format_issues) - 5} more issues")
- success = False
- continue
-
- # Test 4: Key completeness
- reference_keys = get_all_translation_keys(reference_data)
- locale_keys = get_all_translation_keys(locale_data)
-
- missing_keys = reference_keys - locale_keys
- extra_keys = locale_keys - reference_keys
-
- if missing_keys or extra_keys:
- print(f"❌ {locale_name}: Key mismatch!")
- if missing_keys:
- print(f" Missing {len(missing_keys)} keys")
- if extra_keys:
- print(f" Extra {len(extra_keys)} keys")
- success = False
- continue
-
- print(f"✅ {locale_name}: Structure and format consistent")
-
- except json.JSONDecodeError as e:
- print(f"❌ {locale_name}: Invalid JSON syntax: {e}")
- success = False
- except Exception as e:
- print(f"❌ {locale_name}: Error during validation: {e}")
- success = False
-
- if success:
- print(f"\n✅ All {len(locale_files)} locale files have consistent structure and formatting")
-
- return success
-
-def get_json_structure(data: Any, path: str = '') -> Dict[str, Any]:
- """
- Extract the structural information from JSON data.
- Returns a dictionary describing the structure without the actual values.
- """
- if isinstance(data, dict):
- structure = {}
- for key, value in data.items():
- current_path = f"{path}.{key}" if path else key
- if isinstance(value, dict):
- structure[key] = get_json_structure(value, current_path)
- elif isinstance(value, list):
- structure[key] = {'_type': 'array', '_length': len(value)}
- if value: # If array is not empty, analyze first element
- structure[key]['_element_type'] = get_json_structure(value[0], f"{current_path}[0]")
- else:
- structure[key] = {'_type': type(value).__name__}
- return structure
- elif isinstance(data, list):
- return {'_type': 'array', '_length': len(data)}
- else:
- return {'_type': type(data).__name__}
-
-def compare_json_structures(ref_structure: Dict[str, Any], locale_structure: Dict[str, Any], path: str = '') -> List[str]:
- """
- Compare two JSON structures and return a list of differences.
- """
- issues = []
-
- # Check for missing keys in locale
- for key in ref_structure:
- current_path = f"{path}.{key}" if path else key
- if key not in locale_structure:
- issues.append(f"Missing key: {current_path}")
- elif isinstance(ref_structure[key], dict) and '_type' not in ref_structure[key]:
- # It's a nested object, recurse
- if isinstance(locale_structure[key], dict) and '_type' not in locale_structure[key]:
- issues.extend(compare_json_structures(ref_structure[key], locale_structure[key], current_path))
- else:
- issues.append(f"Structure mismatch at {current_path}: expected object, got {type(locale_structure[key])}")
- elif ref_structure[key] != locale_structure[key]:
- issues.append(f"Type mismatch at {current_path}: expected {ref_structure[key]}, got {locale_structure[key]}")
-
- # Check for extra keys in locale
- for key in locale_structure:
- current_path = f"{path}.{key}" if path else key
- if key not in ref_structure:
- issues.append(f"Extra key: {current_path}")
-
- return issues
-
-def extract_line_structure(line: str) -> Dict[str, str]:
- """
- Extract structural elements from a JSON line.
- Returns indentation, key (if present), and structural characters.
- """
- # Get indentation (leading whitespace)
- indentation = len(line) - len(line.lstrip())
-
- # Remove leading/trailing whitespace for analysis
- stripped_line = line.strip()
-
- # Extract key if this is a key-value line
- key_match = re.match(r'^"([^"]+)"\s*:\s*', stripped_line)
- key = key_match.group(1) if key_match else ''
-
- # Extract structural characters (everything except the actual translation value)
- if key:
- # For key-value lines, extract everything except the value
- # Handle string values in quotes with better escaping support
- value_pattern = r'^"[^"]+"\s*:\s*("(?:[^"\\]|\\.)*")(.*?)$'
- value_match = re.match(value_pattern, stripped_line)
- if value_match:
- # Preserve the structure but replace the actual string content
- structural_chars = f'"{key}": "VALUE"{value_match.group(2)}'
- else:
- # Handle non-string values (objects, arrays, booleans, numbers)
- colon_pos = stripped_line.find(':')
- if colon_pos != -1:
- after_colon = stripped_line[colon_pos + 1:].strip()
- if after_colon.startswith('"'):
- # String value - find the end quote with proper escaping
- end_quote = find_closing_quote(after_colon, 1)
- if end_quote != -1:
- structural_chars = f'"{key}": "VALUE"{after_colon[end_quote + 1:]}'
- else:
- structural_chars = f'"{key}": "VALUE"'
- elif after_colon.startswith('{'):
- # Object value
- structural_chars = f'"{key}": {{'
- elif after_colon.startswith('['):
- # Array value
- structural_chars = f'"{key}": ['
- else:
- # Other values (numbers, booleans, null)
- # Replace the actual value with a placeholder
- value_end = find_value_end(after_colon)
- structural_chars = f'"{key}": VALUE{after_colon[value_end:]}'
- else:
- structural_chars = stripped_line
- else:
- # For non key-value lines (brackets, etc.), keep as-is
- structural_chars = stripped_line
-
- return {
- 'indentation': str(indentation),
- 'key': key,
- 'structural_chars': structural_chars
- }
-
-def find_value_end(text: str) -> int:
- """
- Find the end of a non-string JSON value (number, boolean, null).
- """
- for i, char in enumerate(text):
- if char in ',}]':
- return i
- return len(text)
-
-def find_closing_quote(text: str, start: int) -> int:
- """
- Find the position of the closing quote, handling escaped quotes properly.
- """
- i = start
- while i < len(text):
- if text[i] == '"':
- # Count preceding backslashes
- backslash_count = 0
- j = i - 1
- while j >= 0 and text[j] == '\\':
- backslash_count += 1
- j -= 1
-
- # If even number of backslashes (including 0), the quote is not escaped
- if backslash_count % 2 == 0:
- return i
- i += 1
- return -1
-
-def compare_line_formats(ref_lines: List[str], locale_lines: List[str], locale_name: str) -> List[str]:
- """
- Compare line-by-line formatting between reference and locale files.
- Only checks structural elements (indentation, brackets, commas) and ignores translation values.
- """
- issues = []
-
- for i, (ref_line, locale_line) in enumerate(zip(ref_lines, locale_lines)):
- line_num = i + 1
-
- # Skip empty lines and lines with only whitespace
- if not ref_line.strip() and not locale_line.strip():
- continue
-
- # Extract structural elements from each line
- ref_structure = extract_line_structure(ref_line)
- locale_structure = extract_line_structure(locale_line)
-
- # Compare structural elements with more tolerance
- structure_issues = []
-
- # Check indentation (must be exact)
- if ref_structure['indentation'] != locale_structure['indentation']:
- structure_issues.append(f"indentation ({ref_structure['indentation']} vs {locale_structure['indentation']})")
-
- # Check keys (must be exact for structural consistency)
- if ref_structure['key'] != locale_structure['key']:
- structure_issues.append(f"key ('{ref_structure['key']}' vs '{locale_structure['key']}')")
-
- # Check structural characters with improved normalization
- ref_normalized = normalize_structural_chars(ref_structure['structural_chars'])
- locale_normalized = normalize_structural_chars(locale_structure['structural_chars'])
-
- if ref_normalized != locale_normalized:
- # Additional check: if both lines have the same key and similar structure,
- # this might be a false positive due to translation content differences
- if (ref_structure['key'] and locale_structure['key'] and
- ref_structure['key'] == locale_structure['key']):
-
- # Check if the difference is only in the translation value
- ref_has_string_value = '"VALUE"' in ref_normalized
- locale_has_string_value = '"VALUE"' in locale_normalized
-
- if ref_has_string_value and locale_has_string_value:
- # Both have string values, check if structure around value is same
- ref_structure_only = re.sub(r'"VALUE"', '"X"', ref_normalized)
- locale_structure_only = re.sub(r'"VALUE"', '"X"', locale_normalized)
-
- if ref_structure_only == locale_structure_only:
- # Structure is actually the same, skip this as false positive
- continue
-
- structure_issues.append(f"structure ('{ref_normalized}' vs '{locale_normalized}')")
-
- if structure_issues:
- issues.append(f"Line {line_num}: {', '.join(structure_issues)}")
-
- return issues
-
-def normalize_structural_chars(structural_chars: str) -> str:
- """
- Normalize structural characters for comparison by replacing variable content
- with placeholders while preserving the actual structure.
- """
- # Normalize the structural characters more carefully
- normalized = structural_chars
-
- # Replace quoted strings with a consistent placeholder, handling escapes
- # This regex matches strings while properly handling escaped quotes
- string_pattern = r'"(?:[^"\\]|\\.)*"(?=\s*[,}\]:}]|$)'
-
- # Find all string matches and replace with placeholder
- strings = re.findall(string_pattern, normalized)
- for string_match in strings:
- # Only replace if this looks like a translation value, not a key
- if ':' in normalized:
- # Check if this string comes after a colon (likely a value)
- parts = normalized.split(':', 1)
- if len(parts) == 2 and string_match in parts[1]:
- normalized = normalized.replace(string_match, '"VALUE"', 1)
-
- # Normalize whitespace around structural characters
- normalized = re.sub(r'\s*:\s*', ': ', normalized)
- normalized = re.sub(r'\s*,\s*', ', ', normalized)
- normalized = re.sub(r'\s*{\s*', '{ ', normalized)
- normalized = re.sub(r'\s*}\s*', ' }', normalized)
-
- return normalized.strip()
-
-def test_locale_files_formatting_consistency():
- """Test that all locale files have identical formatting (whitespace, indentation, etc.)."""
- print("\nTesting locale files formatting consistency...")
-
- locales_dir = os.path.join(os.path.dirname(__file__), 'locales')
- expected_locales = ['en', 'zh-CN', 'zh-TW', 'ja', 'ru', 'de', 'fr', 'es', 'ko']
-
- # Read reference file (en.json)
- reference_path = os.path.join(locales_dir, 'en.json')
- try:
- with open(reference_path, 'r', encoding='utf-8') as f:
- reference_lines = f.readlines()
- except Exception as e:
- print(f"❌ Error reading reference file: {e}")
- return False
-
- success = True
-
- # Compare each locale file
- for locale in expected_locales[1:]: # Skip 'en' as it's the reference
- locale_path = os.path.join(locales_dir, f'{locale}.json')
-
- if not os.path.exists(locale_path):
- print(f"❌ {locale}.json does not exist!")
- success = False
- continue
-
- try:
- with open(locale_path, 'r', encoding='utf-8') as f:
- locale_lines = f.readlines()
-
- # Compare line count
- if len(locale_lines) != len(reference_lines):
- print(f"❌ {locale}.json: Line count differs from reference")
- print(f" Reference: {len(reference_lines)} lines")
- print(f" {locale}: {len(locale_lines)} lines")
- success = False
- continue
-
- # Compare formatting with improved algorithm
- formatting_issues = compare_line_formats(reference_lines, locale_lines, locale)
-
- if formatting_issues:
- print(f"❌ {locale}.json: Formatting issues found")
- # Show only the first few issues to avoid spam
- shown_issues = 0
- for issue in formatting_issues:
- if shown_issues < 3: # Reduced from 5 to 3
- print(f" - {issue}")
- shown_issues += 1
- else:
- break
-
- if len(formatting_issues) > 3:
- print(f" ... and {len(formatting_issues) - 3} more issues")
-
- # Provide debug info for first issue to help identify false positives
- if formatting_issues:
- first_issue = formatting_issues[0]
- line_match = re.match(r'Line (\d+):', first_issue)
- if line_match:
- line_num = int(line_match.group(1)) - 1 # Convert to 0-based
- if 0 <= line_num < len(reference_lines):
- print(f" Debug - Reference line {line_num + 1}: {repr(reference_lines[line_num].rstrip())}")
- print(f" Debug - {locale} line {line_num + 1}: {repr(locale_lines[line_num].rstrip())}")
-
- success = False
- else:
- print(f"✅ {locale}.json: Formatting consistent with reference")
-
- except Exception as e:
- print(f"❌ Error validating {locale}.json: {e}")
- success = False
-
- if success:
- print("✅ All locale files have consistent formatting")
- else:
- print("💡 Note: Some formatting differences may be false positives due to translation content.")
- print(" If translations are correct but structure appears different, the test may need refinement.")
-
- return success
-
-def test_locale_key_ordering():
- """Test that all locale files maintain the same key ordering as the reference."""
- print("\nTesting locale files key ordering...")
-
- locales_dir = os.path.join(os.path.dirname(__file__), 'locales')
- expected_locales = ['en', 'zh-CN', 'zh-TW', 'ja', 'ru', 'de', 'fr', 'es', 'ko']
-
- # Load reference file
- reference_path = os.path.join(locales_dir, 'en.json')
- try:
- with open(reference_path, 'r', encoding='utf-8') as f:
- reference_data = json.load(f, object_pairs_hook=lambda x: x) # Preserve order
-
- reference_key_order = get_key_order(reference_data)
- except Exception as e:
- print(f"❌ Error reading reference file: {e}")
- return False
-
- success = True
-
- for locale in expected_locales[1:]: # Skip 'en' as it's the reference
- locale_path = os.path.join(locales_dir, f'{locale}.json')
-
- if not os.path.exists(locale_path):
- continue
-
- try:
- with open(locale_path, 'r', encoding='utf-8') as f:
- locale_data = json.load(f, object_pairs_hook=lambda x: x) # Preserve order
-
- locale_key_order = get_key_order(locale_data)
-
- if reference_key_order != locale_key_order:
- print(f"❌ {locale}.json: Key ordering differs from reference")
-
- # Find the first difference
- for i, (ref_key, locale_key) in enumerate(zip(reference_key_order, locale_key_order)):
- if ref_key != locale_key:
- print(f" First difference at position {i}: '{ref_key}' vs '{locale_key}'")
- break
-
- success = False
- else:
- print(f"✅ {locale}.json: Key ordering matches reference")
-
- except Exception as e:
- print(f"❌ Error validating {locale}.json key ordering: {e}")
- success = False
-
- return success
-
-def get_key_order(data: Any, path: str = '') -> List[str]:
- """
- Extract the order of keys from nested JSON data.
- Returns a list of all keys in their order of appearance.
- """
- keys = []
-
- if isinstance(data, list):
- # Handle list of key-value pairs (from object_pairs_hook)
- for key, value in data:
- current_path = f"{path}.{key}" if path else key
- keys.append(current_path)
- if isinstance(value, list): # Nested object as list of pairs
- keys.extend(get_key_order(value, current_path))
- elif isinstance(data, dict):
- for key, value in data.items():
- current_path = f"{path}.{key}" if path else key
- keys.append(current_path)
- if isinstance(value, (dict, list)):
- keys.extend(get_key_order(value, current_path))
-
- return keys
-
-def test_server_i18n():
- """Test the Python server-side i18n system."""
- print("\nTesting Python server-side i18n...")
-
- try:
- from py.services.server_i18n import ServerI18nManager
-
- # Create a new instance to test
- i18n = ServerI18nManager()
-
- # Test that translations loaded
- available_locales = i18n.get_available_locales()
- if not available_locales:
- print("❌ No locales loaded in server i18n!")
- return False
-
- print(f"✅ Loaded {len(available_locales)} locales: {', '.join(available_locales)}")
-
- # Test English translations
- i18n.set_locale('en')
- test_key = 'common.status.loading'
- translation = i18n.get_translation(test_key)
- if translation == test_key:
- print(f"❌ Translation not found for key '{test_key}'")
- return False
-
- print(f"✅ English translation for '{test_key}': '{translation}'")
-
- # Test Chinese translations
- i18n.set_locale('zh-CN')
- translation_cn = i18n.get_translation(test_key)
- if translation_cn == test_key:
- print(f"❌ Chinese translation not found for key '{test_key}'")
- return False
-
- print(f"✅ Chinese translation for '{test_key}': '{translation_cn}'")
-
- # Test parameter interpolation
- param_key = 'common.itemCount'
- translation_with_params = i18n.get_translation(param_key, count=42)
- if '{count}' in translation_with_params:
- print(f"❌ Parameter interpolation failed for key '{param_key}'")
- return False
-
- print(f"✅ Parameter interpolation for '{param_key}': '{translation_with_params}'")
-
- print("✅ Server-side i18n system working correctly")
- return True
-
- except Exception as e:
- print(f"❌ Error testing server i18n: {e}")
- import traceback
- traceback.print_exc()
- return False
-
-def test_translation_completeness():
- """Test that all languages have the same translation keys."""
- print("\nTesting translation completeness...")
-
- locales_dir = os.path.join(os.path.dirname(__file__), 'locales')
-
- # Load English as reference
- with open(os.path.join(locales_dir, 'en.json'), 'r', encoding='utf-8') as f:
- en_data = json.load(f)
-
- en_keys = get_all_translation_keys(en_data)
- print(f"English has {len(en_keys)} translation keys")
-
- # Check other languages
- locales = ['zh-CN', 'zh-TW', 'ja', 'ru', 'de', 'fr', 'es', 'ko']
-
- for locale in locales:
- with open(os.path.join(locales_dir, f'{locale}.json'), 'r', encoding='utf-8') as f:
- locale_data = json.load(f)
-
- locale_keys = get_all_translation_keys(locale_data)
-
- missing_keys = en_keys - locale_keys
- extra_keys = locale_keys - en_keys
-
- if missing_keys:
- print(f"❌ {locale} missing keys: {len(missing_keys)}")
- # Print first few missing keys
- for key in sorted(missing_keys)[:5]:
- print(f" - {key}")
- if len(missing_keys) > 5:
- print(f" ... and {len(missing_keys) - 5} more")
-
- if extra_keys:
- print(f"⚠️ {locale} has extra keys: {len(extra_keys)}")
-
- if not missing_keys and not extra_keys:
- print(f"✅ {locale} has complete translations ({len(locale_keys)} keys)")
-
- return True
-
-
-def extract_i18n_keys_from_js(file_path: str) -> Set[str]:
- """Extract translation keys from JavaScript files."""
- keys = set()
-
- try:
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- # Remove comments to avoid false positives
- # Remove single-line comments
- content = re.sub(r'//.*$', '', content, flags=re.MULTILINE)
- # Remove multi-line comments
- content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL)
-
- # Pattern for translate() function calls - more specific
- # Matches: translate('key.name', ...) or translate("key.name", ...)
- # Must have opening parenthesis immediately after translate
- translate_pattern = r"\btranslate\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]"
- translate_matches = re.findall(translate_pattern, content)
-
- # Filter out single words that are likely not translation keys
- # Translation keys should typically have dots or be in specific namespaces
- filtered_translate = [key for key in translate_matches if '.' in key or key in [
- 'loading', 'error', 'success', 'warning', 'info', 'cancel', 'save', 'delete'
- ]]
- keys.update(filtered_translate)
-
- # Pattern for showToast() function calls - more specific
- # Matches: showToast('key.name', ...) or showToast("key.name", ...)
- showtoast_pattern = r"\bshowToast\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]"
- showtoast_matches = re.findall(showtoast_pattern, content)
-
- # Filter showToast matches as well
- filtered_showtoast = [key for key in showtoast_matches if '.' in key or key in [
- 'loading', 'error', 'success', 'warning', 'info', 'cancel', 'save', 'delete'
- ]]
- keys.update(filtered_showtoast)
-
- # Additional patterns for other i18n function calls you might have
- # Pattern for t() function calls (if used in JavaScript)
- t_pattern = r"\bt\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]"
- t_matches = re.findall(t_pattern, content)
- filtered_t = [key for key in t_matches if '.' in key or key in [
- 'loading', 'error', 'success', 'warning', 'info', 'cancel', 'save', 'delete'
- ]]
- keys.update(filtered_t)
-
- except Exception as e:
- print(f"⚠️ Error reading {file_path}: {e}")
-
- return keys
-
-
-def extract_i18n_keys_from_html(file_path: str) -> Set[str]:
- """Extract translation keys from HTML template files."""
- keys = set()
-
- try:
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- # Remove HTML comments to avoid false positives
- content = re.sub(r'', '', content, flags=re.DOTALL)
-
- # Pattern for t() function calls in Jinja2 templates
- # Matches: {{ t('key.name') }} or {% ... t('key.name') ... %}
- # More specific pattern that ensures we're in template context
- t_pattern = r"(?:\{\{|\{%)[^}]*\bt\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"][^}]*(?:\}\}|%\})"
- t_matches = re.findall(t_pattern, content)
-
- # Filter HTML matches
- filtered_t = [key for key in t_matches if '.' in key or key in [
- 'loading', 'error', 'success', 'warning', 'info', 'cancel', 'save', 'delete'
- ]]
- keys.update(filtered_t)
-
- # Also check for translate() calls in script tags within HTML
- script_pattern = r''
- script_matches = re.findall(script_pattern, content, flags=re.DOTALL)
- for script_content in script_matches:
- # Apply JavaScript extraction to script content
- translate_pattern = r"\btranslate\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]"
- script_translate_matches = re.findall(translate_pattern, script_content)
- filtered_script = [key for key in script_translate_matches if '.' in key]
- keys.update(filtered_script)
-
- except Exception as e:
- print(f"⚠️ Error reading {file_path}: {e}")
-
- return keys
-
-
-def get_all_translation_keys(data: dict, prefix: str = '', include_containers: bool = False) -> Set[str]:
- """
- Recursively collect translation keys.
- By default only leaf keys (where the value is NOT a dict) are returned so that
- structural/container nodes (e.g. 'common', 'common.actions') are not treated
- as real translation entries and won't appear in the 'unused' list.
-
- Set include_containers=True to also include container/object nodes.
- """
- keys: Set[str] = set()
- if not isinstance(data, dict):
- return keys
- for key, value in data.items():
- full_key = f"{prefix}.{key}" if prefix else key
- if isinstance(value, dict):
- # Recurse first
- keys.update(get_all_translation_keys(value, full_key, include_containers))
- # Optionally include container nodes
- if include_containers:
- keys.add(full_key)
- else:
- # Leaf node: actual translatable value
- keys.add(full_key)
- return keys
-
-
-def test_static_code_analysis():
- """Test static code analysis to detect missing translation keys."""
- # print("\nTesting static code analysis for translation keys...")
-
- # Load English translations as reference
- locales_dir = os.path.join(os.path.dirname(__file__), 'locales')
- with open(os.path.join(locales_dir, 'en.json'), 'r', encoding='utf-8') as f:
- en_data = json.load(f)
-
- available_keys = get_all_translation_keys(en_data)
- # print(f"Available translation keys in en.json: {len(available_keys)}")
-
- # Known false positives to exclude from analysis
- # These are typically HTML attributes, CSS classes, or other non-translation strings
- false_positives = {
- 'checkpoint', 'civitai_api_key', 'div', 'embedding', 'lora', 'show_only_sfw',
- 'model', 'type', 'name', 'value', 'id', 'class', 'style', 'src', 'href',
- 'data', 'width', 'height', 'size', 'format', 'version', 'url', 'path',
- 'file', 'folder', 'image', 'text', 'number', 'boolean', 'array', 'object', 'non.existent.key'
- }
-
- # Special translation keys used in uiHelpers.js but not detected by regex
- uihelpers_special_keys = {
- 'uiHelpers.workflow.loraAdded',
- 'uiHelpers.workflow.loraReplaced',
- 'uiHelpers.workflow.loraFailedToSend',
- 'uiHelpers.workflow.recipeAdded',
- 'uiHelpers.workflow.recipeReplaced',
- 'uiHelpers.workflow.recipeFailedToSend',
- }
-
- # Extract keys from JavaScript files
- js_dir = os.path.join(os.path.dirname(__file__), 'static', 'js')
- js_files = []
- if os.path.exists(js_dir):
- # Recursively find all JS files
- for root, dirs, files in os.walk(js_dir):
- for file in files:
- if file.endswith('.js'):
- js_files.append(os.path.join(root, file))
-
- js_keys = set()
- js_files_with_keys = []
- for js_file in js_files:
- file_keys = extract_i18n_keys_from_js(js_file)
- # Filter out false positives
- file_keys = file_keys - false_positives
- js_keys.update(file_keys)
- if file_keys:
- rel_path = os.path.relpath(js_file, os.path.dirname(__file__))
- js_files_with_keys.append((rel_path, len(file_keys)))
- # print(f" Found {len(file_keys)} keys in {rel_path}")
-
- # print(f"Total unique keys found in JavaScript files: {len(js_keys)}")
-
- # Extract keys from HTML template files
- templates_dir = os.path.join(os.path.dirname(__file__), 'templates')
- html_files = []
- if os.path.exists(templates_dir):
- html_files = glob.glob(os.path.join(templates_dir, '*.html'))
- # Also check for HTML files in subdirectories
- html_files.extend(glob.glob(os.path.join(templates_dir, '**', '*.html'), recursive=True))
-
- html_keys = set()
- html_files_with_keys = []
- for html_file in html_files:
- file_keys = extract_i18n_keys_from_html(html_file)
- # Filter out false positives
- file_keys = file_keys - false_positives
- html_keys.update(file_keys)
- if file_keys:
- rel_path = os.path.relpath(html_file, os.path.dirname(__file__))
- html_files_with_keys.append((rel_path, len(file_keys)))
- # print(f" Found {len(file_keys)} keys in {rel_path}")
-
- # print(f"Total unique keys found in HTML templates: {len(html_keys)}")
-
- # Combine all used keys
- all_used_keys = js_keys.union(html_keys)
- # Add special keys from uiHelpers.js
- all_used_keys.update(uihelpers_special_keys)
- # print(f"Total unique keys used in code: {len(all_used_keys)}")
-
- # Check for missing keys
- missing_keys = all_used_keys - available_keys
- unused_keys = available_keys - all_used_keys
-
- success = True
-
- if missing_keys:
- print(f"\n❌ Found {len(missing_keys)} missing translation keys:")
- for key in sorted(missing_keys):
- print(f" - {key}")
- success = False
-
- # Group missing keys by category for better analysis
- key_categories = {}
- for key in missing_keys:
- category = key.split('.')[0] if '.' in key else 'root'
- if category not in key_categories:
- key_categories[category] = []
- key_categories[category].append(key)
-
- print(f"\n Missing keys by category:")
- for category, keys in sorted(key_categories.items()):
- print(f" {category}: {len(keys)} keys")
-
- # Provide helpful suggestion
- print(f"\n💡 If these are false positives, add them to the false_positives set in test_static_code_analysis()")
- else:
- print("\n✅ All translation keys used in code are available in en.json")
-
- if unused_keys:
- print(f"\n⚠️ Found {len(unused_keys)} unused translation keys in en.json:")
- # Only show first 20 to avoid cluttering output
- for key in sorted(unused_keys)[:20]:
- print(f" - {key}")
- if len(unused_keys) > 20:
- print(f" ... and {len(unused_keys) - 20} more")
-
- # Group unused keys by category for better analysis
- unused_categories = {}
- for key in unused_keys:
- category = key.split('.')[0] if '.' in key else 'root'
- if category not in unused_categories:
- unused_categories[category] = []
- unused_categories[category].append(key)
-
- print(f"\n Unused keys by category:")
- for category, keys in sorted(unused_categories.items()):
- print(f" {category}: {len(keys)} keys")
-
- # Summary statistics
- # print(f"\n📊 Static Code Analysis Summary:")
- # print(f" JavaScript files analyzed: {len(js_files)}")
- # print(f" JavaScript files with translations: {len(js_files_with_keys)}")
- # print(f" HTML template files analyzed: {len(html_files)}")
- # print(f" HTML template files with translations: {len(html_files_with_keys)}")
- # print(f" Translation keys in en.json: {len(available_keys)}")
- # print(f" Translation keys used in code: {len(all_used_keys)}")
- # print(f" Usage coverage: {len(all_used_keys)/len(available_keys)*100:.1f}%")
-
- return success
-
-
-def test_json_structure_validation():
- """Test JSON file structure and syntax validation."""
- print("\nTesting JSON file structure and syntax validation...")
-
- locales_dir = os.path.join(os.path.dirname(__file__), 'locales')
- if not os.path.exists(locales_dir):
- print("❌ Locales directory does not exist!")
- return False
-
- expected_locales = ['en', 'zh-CN', 'zh-TW', 'ja', 'ru', 'de', 'fr', 'es', 'ko']
- success = True
-
- for locale in expected_locales:
- file_path = os.path.join(locales_dir, f'{locale}.json')
- if not os.path.exists(file_path):
- print(f"❌ {locale}.json does not exist!")
- success = False
- continue
-
- try:
- with open(file_path, 'r', encoding='utf-8') as f:
- data = json.load(f)
-
- # Check for valid JSON structure
- if not isinstance(data, dict):
- print(f"❌ {locale}.json root must be an object/dictionary")
- success = False
- continue
-
- # Check that required sections exist
- required_sections = ['common', 'header', 'loras', 'recipes', 'modals']
- missing_sections = []
- for section in required_sections:
- if section not in data:
- missing_sections.append(section)
-
- if missing_sections:
- print(f"❌ {locale}.json missing required sections: {', '.join(missing_sections)}")
- success = False
-
- # Check for empty values
- empty_values = []
- def check_empty_values(obj, path=''):
- if isinstance(obj, dict):
- for key, value in obj.items():
- current_path = f"{path}.{key}" if path else key
- if isinstance(value, dict):
- check_empty_values(value, current_path)
- elif isinstance(value, str) and not value.strip():
- empty_values.append(current_path)
- elif value is None:
- empty_values.append(current_path)
-
- check_empty_values(data)
-
- if empty_values:
- print(f"⚠️ {locale}.json has {len(empty_values)} empty translation values:")
- for path in empty_values[:5]: # Show first 5
- print(f" - {path}")
- if len(empty_values) > 5:
- print(f" ... and {len(empty_values) - 5} more")
-
- print(f"✅ {locale}.json structure is valid")
-
- except json.JSONDecodeError as e:
- print(f"❌ {locale}.json has invalid JSON syntax: {e}")
- success = False
- except Exception as e:
- print(f"❌ Error validating {locale}.json: {e}")
- success = False
-
- return success
-
-def main():
- """Run all tests."""
- print("🚀 Testing updated i18n system...\n")
-
- success = True
-
- # Test JSON files structure and syntax
- if not test_json_files_exist():
- success = False
-
- # Test comprehensive structural consistency
- if not test_locale_files_structural_consistency():
- success = False
-
- # Test formatting consistency
- if not test_locale_files_formatting_consistency():
- success = False
-
- # Test key ordering
- if not test_locale_key_ordering():
- success = False
-
- # Test server i18n
- if not test_server_i18n():
- success = False
-
- # Test translation completeness
- if not test_translation_completeness():
- success = False
-
- # Test static code analysis
- if not test_static_code_analysis():
- success = False
-
- print(f"\n{'🎉 All tests passed!' if success else '❌ Some tests failed!'}")
- return success
-
-if __name__ == '__main__':
- main()
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 00000000..58263c8a
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,222 @@
+import types
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Optional, Sequence
+import asyncio
+import inspect
+from unittest import mock
+import sys
+
+import pytest
+
+# Mock ComfyUI modules before any imports from the main project
+server_mock = types.SimpleNamespace()
+server_mock.PromptServer = mock.MagicMock()
+sys.modules['server'] = server_mock
+
+folder_paths_mock = types.SimpleNamespace()
+folder_paths_mock.get_folder_paths = mock.MagicMock(return_value=[])
+folder_paths_mock.folder_names_and_paths = {}
+sys.modules['folder_paths'] = folder_paths_mock
+
+# Mock other ComfyUI modules that might be imported
+comfy_mock = types.SimpleNamespace()
+comfy_mock.utils = types.SimpleNamespace()
+comfy_mock.model_management = types.SimpleNamespace()
+comfy_mock.comfy_types = types.SimpleNamespace()
+comfy_mock.comfy_types.IO = mock.MagicMock()
+sys.modules['comfy'] = comfy_mock
+sys.modules['comfy.utils'] = comfy_mock.utils
+sys.modules['comfy.model_management'] = comfy_mock.model_management
+sys.modules['comfy.comfy_types'] = comfy_mock.comfy_types
+
+execution_mock = types.SimpleNamespace()
+execution_mock.PromptExecutor = mock.MagicMock()
+sys.modules['execution'] = execution_mock
+
+# Mock ComfyUI nodes module
+nodes_mock = types.SimpleNamespace()
+nodes_mock.LoraLoader = mock.MagicMock()
+nodes_mock.SaveImage = mock.MagicMock()
+nodes_mock.NODE_CLASS_MAPPINGS = {}
+sys.modules['nodes'] = nodes_mock
+
+
+def pytest_pyfunc_call(pyfuncitem):
+ """Allow bare async tests to run without pytest.mark.asyncio."""
+ test_function = pyfuncitem.function
+ if inspect.iscoroutinefunction(test_function):
+ func = pyfuncitem.obj
+ signature = inspect.signature(func)
+ accepted_kwargs: Dict[str, Any] = {}
+ for name, parameter in signature.parameters.items():
+ if parameter.kind is inspect.Parameter.VAR_POSITIONAL:
+ continue
+ if parameter.kind is inspect.Parameter.VAR_KEYWORD:
+ accepted_kwargs = dict(pyfuncitem.funcargs)
+ break
+ if name in pyfuncitem.funcargs:
+ accepted_kwargs[name] = pyfuncitem.funcargs[name]
+
+ original_policy = asyncio.get_event_loop_policy()
+ policy = pyfuncitem.funcargs.get("event_loop_policy")
+ if policy is not None and policy is not original_policy:
+ asyncio.set_event_loop_policy(policy)
+ try:
+ asyncio.run(func(**accepted_kwargs))
+ finally:
+ if policy is not None and policy is not original_policy:
+ asyncio.set_event_loop_policy(original_policy)
+ return True
+ return None
+
+
+@dataclass
+class MockHashIndex:
+ """Minimal hash index stub mirroring the scanner contract."""
+
+ removed_paths: List[str] = field(default_factory=list)
+
+ def remove_by_path(self, path: str) -> None:
+ self.removed_paths.append(path)
+
+
+class MockCache:
+ """Cache object with the attributes."""
+
+ def __init__(self, items: Optional[Sequence[Dict[str, Any]]] = None):
+ self.raw_data: List[Dict[str, Any]] = list(items or [])
+ self.resort_calls = 0
+
+ async def resort(self) -> None:
+ self.resort_calls += 1
+ # expects the coroutine interface but does not
+ # rely on the return value.
+
+
+class MockScanner:
+ """Scanner double that exposes the attributes used by route utilities."""
+
+ def __init__(self, cache: Optional[MockCache] = None, hash_index: Optional[MockHashIndex] = None):
+ self._cache = cache or MockCache()
+ self._hash_index = hash_index or MockHashIndex()
+ self._tags_count: Dict[str, int] = {}
+ self._excluded_models: List[str] = []
+ self.updated_models: List[Dict[str, Any]] = []
+ self.preview_updates: List[Dict[str, Any]] = []
+ self.bulk_deleted: List[Sequence[str]] = []
+
+ async def get_cached_data(self, force_refresh: bool = False):
+ return self._cache
+
+ async def update_single_model_cache(self, original_path: str, new_path: str, metadata: Dict[str, Any]) -> bool:
+ self.updated_models.append({
+ "original_path": original_path,
+ "new_path": new_path,
+ "metadata": metadata,
+ })
+ for item in self._cache.raw_data:
+ if item.get("file_path") == original_path:
+ item.update(metadata)
+ return True
+
+ async def update_preview_in_cache(self, model_path: str, preview_path: str, nsfw_level: int) -> bool:
+ self.preview_updates.append({
+ "model_path": model_path,
+ "preview_path": preview_path,
+ "nsfw_level": nsfw_level,
+ })
+ for item in self._cache.raw_data:
+ if item.get("file_path") == model_path:
+ item["preview_url"] = preview_path
+ item["preview_nsfw_level"] = nsfw_level
+ return True
+
+ async def bulk_delete_models(self, file_paths: Sequence[str]) -> Dict[str, Any]:
+ self.bulk_deleted.append(tuple(file_paths))
+ self._cache.raw_data = [item for item in self._cache.raw_data if item.get("file_path") not in file_paths]
+ await self._cache.resort()
+ for path in file_paths:
+ self._hash_index.remove_by_path(path)
+ return {"success": True, "deleted": list(file_paths)}
+
+
+class MockModelService:
+ """Service stub consumed by the shared routes."""
+
+ def __init__(self, scanner: MockScanner):
+ self.scanner = scanner
+ self.model_type = "test-model"
+ self.paginated_items: List[Dict[str, Any]] = []
+ self.formatted: List[Dict[str, Any]] = []
+
+ async def get_paginated_data(self, **params: Any) -> Dict[str, Any]:
+ items = [dict(item) for item in self.paginated_items]
+ total = len(items)
+ page = params.get("page", 1)
+ page_size = params.get("page_size", 20)
+ return {
+ "items": items,
+ "total": total,
+ "page": page,
+ "page_size": page_size,
+ "total_pages": max(1, (total + page_size - 1) // page_size),
+ }
+
+ async def format_response(self, item: Dict[str, Any]) -> Dict[str, Any]:
+ formatted = {**item, "formatted": True}
+ self.formatted.append(formatted)
+ return formatted
+
+ # Convenience helpers used by assorted routes. They are no-ops for the
+ # smoke tests but document the expected surface area of the real services.
+ def get_model_roots(self) -> List[str]:
+ return ["."]
+
+ async def scan_models(self, *_, **__): # pragma: no cover - behaviour exercised via mocks
+ return None
+
+ async def get_model_notes(self, *_args, **_kwargs): # pragma: no cover
+ return None
+
+ async def get_model_preview_url(self, *_args, **_kwargs): # pragma: no cover
+ return ""
+
+ async def get_model_civitai_url(self, *_args, **_kwargs): # pragma: no cover
+ return {"civitai_url": ""}
+
+ async def get_model_metadata(self, *_args, **_kwargs): # pragma: no cover
+ return {}
+
+ async def get_model_description(self, *_args, **_kwargs): # pragma: no cover
+ return ""
+
+ async def get_relative_paths(self, *_args, **_kwargs): # pragma: no cover
+ return []
+
+ def has_hash(self, *_args, **_kwargs): # pragma: no cover
+ return False
+
+ def get_path_by_hash(self, *_args, **_kwargs): # pragma: no cover
+ return ""
+
+
+@pytest.fixture
+def mock_hash_index() -> MockHashIndex:
+ return MockHashIndex()
+
+
+@pytest.fixture
+def mock_cache() -> MockCache:
+ return MockCache()
+
+
+@pytest.fixture
+def mock_scanner(mock_cache: MockCache, mock_hash_index: MockHashIndex) -> MockScanner:
+ return MockScanner(cache=mock_cache, hash_index=mock_hash_index)
+
+
+@pytest.fixture
+def mock_service(mock_scanner: MockScanner) -> MockModelService:
+ return MockModelService(scanner=mock_scanner)
+
+
diff --git a/tests/frontend/components/contextMenu.interactions.test.js b/tests/frontend/components/contextMenu.interactions.test.js
new file mode 100644
index 00000000..7dd18ad5
--- /dev/null
+++ b/tests/frontend/components/contextMenu.interactions.test.js
@@ -0,0 +1,319 @@
+import { describe, it, beforeEach, afterEach, expect, vi } from 'vitest';
+
+const showToastMock = vi.fn();
+const copyToClipboardMock = vi.fn();
+const getNSFWLevelNameMock = vi.fn((level) => {
+ if (level >= 16) return 'XXX';
+ if (level >= 8) return 'X';
+ if (level >= 4) return 'R';
+ if (level >= 2) return 'PG13';
+ if (level >= 1) return 'PG';
+ return 'Unknown';
+});
+const copyLoraSyntaxMock = vi.fn();
+const sendLoraToWorkflowMock = vi.fn();
+const buildLoraSyntaxMock = vi.fn((fileName) => `lora:${fileName}`);
+const openExampleImagesFolderMock = vi.fn();
+
+const modalManagerMock = {
+ showModal: vi.fn(),
+ closeModal: vi.fn(),
+ registerModal: vi.fn(),
+ getModal: vi.fn(() => ({ element: { style: { display: 'none' } }, isOpen: false })),
+ isAnyModalOpen: vi.fn(),
+};
+
+const loadingManagerStub = {
+ showSimpleLoading: vi.fn(),
+ hide: vi.fn(),
+ show: vi.fn(),
+};
+
+const stateStub = {
+ global: { settings: {}, loadingManager: loadingManagerStub },
+ loadingManager: loadingManagerStub,
+ virtualScroller: { updateSingleItem: vi.fn() },
+};
+
+const saveModelMetadataMock = vi.fn();
+const downloadExampleImagesApiMock = vi.fn();
+const replaceModelPreviewMock = vi.fn();
+const refreshSingleModelMetadataMock = vi.fn();
+const resetAndReloadMock = vi.fn();
+
+const getModelApiClientMock = vi.fn(() => ({
+ saveModelMetadata: saveModelMetadataMock,
+ downloadExampleImages: downloadExampleImagesApiMock,
+ replaceModelPreview: replaceModelPreviewMock,
+ refreshSingleModelMetadata: refreshSingleModelMetadataMock,
+}));
+
+const updateRecipeMetadataMock = vi.fn(() => Promise.resolve({ success: true }));
+
+vi.mock('../../../static/js/utils/uiHelpers.js', () => ({
+ showToast: showToastMock,
+ copyToClipboard: copyToClipboardMock,
+ getNSFWLevelName: getNSFWLevelNameMock,
+ copyLoraSyntax: copyLoraSyntaxMock,
+ sendLoraToWorkflow: sendLoraToWorkflowMock,
+ buildLoraSyntax: buildLoraSyntaxMock,
+ openExampleImagesFolder: openExampleImagesFolderMock,
+}));
+
+vi.mock('../../../static/js/managers/ModalManager.js', () => ({
+ modalManager: modalManagerMock,
+}));
+
+vi.mock('../../../static/js/utils/storageHelpers.js', () => ({
+ setSessionItem: vi.fn(),
+ removeSessionItem: vi.fn(),
+ getSessionItem: vi.fn(),
+}));
+
+vi.mock('../../../static/js/api/modelApiFactory.js', () => ({
+ getModelApiClient: getModelApiClientMock,
+ resetAndReload: resetAndReloadMock,
+}));
+
+vi.mock('../../../static/js/state/index.js', () => ({
+ state: stateStub,
+}));
+
+vi.mock('../../../static/js/utils/modalUtils.js', () => ({
+ showExcludeModal: vi.fn(),
+ showDeleteModal: vi.fn(),
+}));
+
+vi.mock('../../../static/js/managers/MoveManager.js', () => ({
+ moveManager: { showMoveModal: vi.fn() },
+}));
+
+vi.mock('../../../static/js/api/recipeApi.js', () => ({
+ updateRecipeMetadata: updateRecipeMetadataMock,
+}));
+
+async function flushAsyncTasks() {
+ await Promise.resolve();
+ await new Promise((resolve) => setTimeout(resolve, 0));
+}
+
+describe('Interaction-level regression coverage', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ vi.useRealTimers();
+ document.body.innerHTML = '';
+ stateStub.global.settings = {};
+ saveModelMetadataMock.mockResolvedValue(undefined);
+ downloadExampleImagesApiMock.mockResolvedValue(undefined);
+ updateRecipeMetadataMock.mockResolvedValue({ success: true });
+ global.modalManager = modalManagerMock;
+ });
+
+ afterEach(() => {
+ vi.useRealTimers();
+ document.body.innerHTML = '';
+ delete window.exampleImagesManager;
+ delete global.fetch;
+ delete global.modalManager;
+ });
+
+ it('opens the NSFW selector from the LoRA context menu and persists the new rating', async () => {
+ document.body.innerHTML = `
+
+
+ `;
+
+ const card = document.createElement('div');
+ card.className = 'model-card';
+ card.dataset.filepath = '/models/test.safetensors';
+ card.dataset.meta = JSON.stringify({ preview_nsfw_level: 1 });
+ document.body.appendChild(card);
+
+ const { LoraContextMenu } = await import('../../../static/js/components/ContextMenu/LoraContextMenu.js');
+ const helpers = await import('../../../static/js/utils/uiHelpers.js');
+ expect(helpers.showToast).toBe(showToastMock);
+ const contextMenu = new LoraContextMenu();
+
+ contextMenu.showMenu(120, 140, card);
+
+ const nsfwMenuItem = document.querySelector('#loraContextMenu .context-menu-item[data-action="set-nsfw"]');
+ nsfwMenuItem.dispatchEvent(new Event('click', { bubbles: true }));
+
+ const selector = document.getElementById('nsfwLevelSelector');
+ expect(selector.style.display).toBe('block');
+ expect(selector.dataset.cardPath).toBe('/models/test.safetensors');
+ expect(document.getElementById('currentNSFWLevel').textContent).toBe('PG');
+
+ const levelButton = selector.querySelector('.nsfw-level-btn[data-level="4"]');
+ levelButton.dispatchEvent(new Event('click', { bubbles: true }));
+
+ expect(saveModelMetadataMock).toHaveBeenCalledWith('/models/test.safetensors', { preview_nsfw_level: 4 });
+ expect(saveModelMetadataMock).toHaveBeenCalledTimes(1);
+ await saveModelMetadataMock.mock.results[0].value;
+ await flushAsyncTasks();
+ expect(selector.style.display).toBe('none');
+ expect(document.getElementById('loraContextMenu').style.display).toBe('none');
+ });
+
+ it('wires recipe modal title editing to update metadata and UI state', async () => {
+ document.body.innerHTML = `
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ `;
+
+ const { RecipeModal } = await import('../../../static/js/components/RecipeModal.js');
+ const recipeModal = new RecipeModal();
+
+ const recipe = {
+ id: 'recipe-1',
+ file_path: '/recipes/test.json',
+ title: 'Original Title',
+ tags: ['tag1', 'tag2', 'tag3', 'tag4', 'tag5', 'tag6'],
+ file_url: '',
+ preview_url: '',
+ source_path: '',
+ gen_params: {
+ prompt: 'Prompt text',
+ negative_prompt: 'Negative prompt',
+ steps: '30',
+ },
+ loras: [],
+ };
+
+ recipeModal.showRecipeDetails(recipe);
+ await new Promise((resolve) => setTimeout(resolve, 60));
+ await flushAsyncTasks();
+
+ expect(modalManagerMock.showModal).toHaveBeenCalledWith('recipeModal');
+
+ const editIcon = document.querySelector('#recipeModalTitle .edit-icon');
+ editIcon.dispatchEvent(new Event('click', { bubbles: true }));
+
+ const titleInput = document.querySelector('#recipeTitleEditor .title-input');
+ titleInput.value = 'Updated Title';
+
+ recipeModal.saveTitleEdit();
+
+ expect(updateRecipeMetadataMock).toHaveBeenCalledWith('/recipes/test.json', { title: 'Updated Title' });
+ expect(updateRecipeMetadataMock).toHaveBeenCalledTimes(1);
+ await updateRecipeMetadataMock.mock.results[0].value;
+ await flushAsyncTasks();
+
+ const titleContainer = document.getElementById('recipeModalTitle');
+ expect(titleContainer.querySelector('.content-text').textContent).toBe('Updated Title');
+ expect(titleContainer.querySelector('#recipeTitleEditor').classList.contains('active')).toBe(false);
+ expect(recipeModal.currentRecipe.title).toBe('Updated Title');
+ });
+
+ it('processes global context menu actions for downloads and cleanup', async () => {
+ document.body.innerHTML = `
+
+ `;
+
+ const { GlobalContextMenu } = await import('../../../static/js/components/ContextMenu/GlobalContextMenu.js');
+ const menu = new GlobalContextMenu();
+
+ stateStub.global.settings.example_images_path = '/tmp/examples';
+ window.exampleImagesManager = {
+ handleDownloadButton: vi.fn().mockResolvedValue(undefined),
+ };
+
+ menu.showMenu(100, 200);
+ const downloadItem = document.querySelector('[data-action="download-example-images"]');
+ downloadItem.dispatchEvent(new Event('click', { bubbles: true }));
+ expect(downloadItem.classList.contains('disabled')).toBe(true);
+
+ expect(window.exampleImagesManager.handleDownloadButton).toHaveBeenCalledTimes(1);
+ await window.exampleImagesManager.handleDownloadButton.mock.results[0].value;
+ await flushAsyncTasks();
+ expect(downloadItem.classList.contains('disabled')).toBe(false);
+ expect(document.getElementById('globalContextMenu').style.display).toBe('none');
+
+ global.fetch = vi.fn().mockResolvedValue({
+ ok: true,
+ json: async () => ({ success: true, moved_total: 2 }),
+ });
+
+ menu.showMenu(240, 320);
+ const cleanupItem = document.querySelector('[data-action="cleanup-example-images-folders"]');
+ cleanupItem.dispatchEvent(new Event('click', { bubbles: true }));
+ expect(cleanupItem.classList.contains('disabled')).toBe(true);
+
+ expect(global.fetch).toHaveBeenCalledWith('/api/lm/cleanup-example-image-folders', { method: 'POST' });
+ expect(global.fetch).toHaveBeenCalledTimes(1);
+ const responsePromise = global.fetch.mock.results[0].value;
+ const response = await responsePromise;
+ await response.json();
+ await flushAsyncTasks();
+ expect(cleanupItem.classList.contains('disabled')).toBe(false);
+ expect(menu._cleanupInProgress).toBe(false);
+ });
+});
diff --git a/tests/frontend/components/pageControls.filtering.test.js b/tests/frontend/components/pageControls.filtering.test.js
new file mode 100644
index 00000000..bdbe4121
--- /dev/null
+++ b/tests/frontend/components/pageControls.filtering.test.js
@@ -0,0 +1,367 @@
+import { describe, it, beforeEach, afterEach, expect, vi } from 'vitest';
+
+const loadMoreWithVirtualScrollMock = vi.fn();
+const refreshModelsMock = vi.fn();
+const fetchCivitaiMetadataMock = vi.fn();
+const resetAndReloadMock = vi.fn();
+const getModelApiClientMock = vi.fn();
+const apiClientMock = {
+ loadMoreWithVirtualScroll: loadMoreWithVirtualScrollMock,
+ refreshModels: refreshModelsMock,
+ fetchCivitaiMetadata: fetchCivitaiMetadataMock,
+};
+
+const showToastMock = vi.fn();
+const updatePanelPositionsMock = vi.fn();
+const downloadManagerMock = {
+ showDownloadModal: vi.fn(),
+};
+
+const sidebarManagerMock = {
+ initialize: vi.fn(async () => {
+ sidebarManagerMock.isInitialized = true;
+ }),
+ refresh: vi.fn(async () => {}),
+ cleanup: vi.fn(),
+ isInitialized: false,
+};
+
+const createAlphabetBarMock = vi.fn(() => ({ destroy: vi.fn() }));
+
+getModelApiClientMock.mockReturnValue(apiClientMock);
+
+vi.mock('../../../static/js/api/modelApiFactory.js', () => ({
+ getModelApiClient: getModelApiClientMock,
+ resetAndReload: resetAndReloadMock,
+}));
+
+vi.mock('../../../static/js/utils/uiHelpers.js', () => ({
+ showToast: showToastMock,
+ updatePanelPositions: updatePanelPositionsMock,
+}));
+
+vi.mock('../../../static/js/managers/DownloadManager.js', () => ({
+ downloadManager: downloadManagerMock,
+}));
+
+vi.mock('../../../static/js/components/SidebarManager.js', () => ({
+ sidebarManager: sidebarManagerMock,
+}));
+
+vi.mock('../../../static/js/components/alphabet/index.js', () => ({
+ createAlphabetBar: createAlphabetBarMock,
+}));
+
+beforeEach(() => {
+ vi.resetModules();
+ vi.clearAllMocks();
+
+ loadMoreWithVirtualScrollMock.mockResolvedValue(undefined);
+ refreshModelsMock.mockResolvedValue(undefined);
+ fetchCivitaiMetadataMock.mockResolvedValue(undefined);
+ resetAndReloadMock.mockResolvedValue(undefined);
+ getModelApiClientMock.mockReturnValue(apiClientMock);
+
+ sidebarManagerMock.isInitialized = false;
+ sidebarManagerMock.initialize.mockImplementation(async () => {
+ sidebarManagerMock.isInitialized = true;
+ });
+
+ global.fetch = vi.fn().mockResolvedValue({
+ ok: true,
+ json: async () => ({ success: true, base_models: [] }),
+ });
+});
+
+afterEach(() => {
+ delete window.modelDuplicatesManager;
+ delete global.fetch;
+ vi.useRealTimers();
+});
+
+function renderControlsDom(pageKey) {
+ document.body.dataset.page = pageKey;
+ document.body.innerHTML = `
+
+
+
+
+
+ `;
+}
+
+describe('SearchManager filtering scenarios', () => {
+ it.each([
+ ['loras'],
+ ['checkpoints'],
+ ])('updates filters and reloads results for %s page', async (pageKey) => {
+ vi.useFakeTimers();
+
+ renderControlsDom(pageKey);
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState(pageKey);
+ const { getCurrentPageState } = stateModule;
+ const { SearchManager } = await import('../../../static/js/managers/SearchManager.js');
+
+ new SearchManager({ page: pageKey, searchDelay: 0 });
+
+ const input = document.getElementById('searchInput');
+ input.value = 'flux';
+ input.dispatchEvent(new Event('input', { bubbles: true }));
+
+ await vi.runAllTimersAsync();
+
+ expect(getCurrentPageState().filters.search).toBe('flux');
+ expect(loadMoreWithVirtualScrollMock).toHaveBeenCalledWith(true, false);
+ expect(loadMoreWithVirtualScrollMock).toHaveBeenCalledTimes(1);
+ });
+});
+
+describe('FilterManager tag and base model filters', () => {
+ it.each([
+ ['loras'],
+ ['checkpoints'],
+ ])('toggles tag chips and persists filters for %s page', async (pageKey) => {
+ renderControlsDom(pageKey);
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState(pageKey);
+ const { getCurrentPageState } = stateModule;
+ const { FilterManager } = await import('../../../static/js/managers/FilterManager.js');
+
+ const manager = new FilterManager({ page: pageKey });
+ manager.createTagFilterElements([{ tag: 'style', count: 5 }]);
+
+ const tagChip = document.querySelector('.filter-tag.tag-filter');
+ expect(tagChip).not.toBeNull();
+
+ tagChip.dispatchEvent(new Event('click', { bubbles: true }));
+ await vi.waitFor(() => expect(loadMoreWithVirtualScrollMock).toHaveBeenCalledTimes(1));
+
+ expect(getCurrentPageState().filters.tags).toEqual(['style']);
+ expect(tagChip.classList.contains('active')).toBe(true);
+ expect(document.getElementById('activeFiltersCount').textContent).toBe('1');
+ expect(document.getElementById('activeFiltersCount').style.display).toBe('inline-flex');
+
+ const storageKey = `lora_manager_${pageKey}_filters`;
+ const storedFilters = JSON.parse(localStorage.getItem(storageKey));
+ expect(storedFilters.tags).toEqual(['style']);
+
+ loadMoreWithVirtualScrollMock.mockClear();
+
+ tagChip.dispatchEvent(new Event('click', { bubbles: true }));
+ await vi.waitFor(() => expect(loadMoreWithVirtualScrollMock).toHaveBeenCalledTimes(1));
+
+ expect(getCurrentPageState().filters.tags).toEqual([]);
+ expect(document.getElementById('activeFiltersCount').style.display).toBe('none');
+ });
+
+ it.each([
+ ['loras'],
+ ['checkpoints'],
+ ])('toggles base model chips and reloads %s results', async (pageKey) => {
+ global.fetch = vi.fn().mockResolvedValue({
+ ok: true,
+ json: async () => ({
+ success: true,
+ base_models: [{ name: 'SDXL', count: 2 }],
+ }),
+ });
+
+ renderControlsDom(pageKey);
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState(pageKey);
+ const { getCurrentPageState } = stateModule;
+ const { FilterManager } = await import('../../../static/js/managers/FilterManager.js');
+
+ const manager = new FilterManager({ page: pageKey });
+
+ await vi.waitFor(() => {
+ const chip = document.querySelector('[data-base-model="SDXL"]');
+ expect(chip).not.toBeNull();
+ });
+
+ const baseModelChip = document.querySelector('[data-base-model="SDXL"]');
+
+ baseModelChip.dispatchEvent(new Event('click', { bubbles: true }));
+ await vi.waitFor(() => expect(loadMoreWithVirtualScrollMock).toHaveBeenCalledTimes(1));
+
+ expect(getCurrentPageState().filters.baseModel).toEqual(['SDXL']);
+ expect(baseModelChip.classList.contains('active')).toBe(true);
+
+ const storageKey = `lora_manager_${pageKey}_filters`;
+ const storedFilters = JSON.parse(localStorage.getItem(storageKey));
+ expect(storedFilters.baseModel).toEqual(['SDXL']);
+
+ loadMoreWithVirtualScrollMock.mockClear();
+
+ baseModelChip.dispatchEvent(new Event('click', { bubbles: true }));
+ await vi.waitFor(() => expect(loadMoreWithVirtualScrollMock).toHaveBeenCalledTimes(1));
+
+ expect(getCurrentPageState().filters.baseModel).toEqual([]);
+ expect(baseModelChip.classList.contains('active')).toBe(false);
+ });
+});
+
+describe('PageControls favorites, sorting, and duplicates scenarios', () => {
+ it('persists favorites toggle for LoRAs and triggers reload', async () => {
+ renderControlsDom('loras');
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState('loras');
+ const { LorasControls } = await import('../../../static/js/components/controls/LorasControls.js');
+
+ const controls = new LorasControls();
+
+ await controls.toggleFavoritesOnly();
+
+ expect(sessionStorage.getItem('lora_manager_show_favorites_only_loras')).toBe('true');
+ expect(stateModule.getCurrentPageState().showFavoritesOnly).toBe(true);
+ expect(document.getElementById('favoriteFilterBtn').classList.contains('active')).toBe(true);
+ expect(resetAndReloadMock).toHaveBeenCalledWith(true);
+
+ resetAndReloadMock.mockClear();
+
+ await controls.toggleFavoritesOnly();
+
+ expect(sessionStorage.getItem('lora_manager_show_favorites_only_loras')).toBe('false');
+ expect(stateModule.getCurrentPageState().showFavoritesOnly).toBe(false);
+ expect(document.getElementById('favoriteFilterBtn').classList.contains('active')).toBe(false);
+ expect(resetAndReloadMock).toHaveBeenCalledWith(true);
+ });
+
+ it('persists favorites toggle for checkpoints and triggers reload', async () => {
+ renderControlsDom('checkpoints');
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState('checkpoints');
+ const { CheckpointsControls } = await import('../../../static/js/components/controls/CheckpointsControls.js');
+
+ const controls = new CheckpointsControls();
+
+ await controls.toggleFavoritesOnly();
+
+ expect(sessionStorage.getItem('lora_manager_show_favorites_only_checkpoints')).toBe('true');
+ expect(stateModule.getCurrentPageState().showFavoritesOnly).toBe(true);
+ expect(document.getElementById('favoriteFilterBtn').classList.contains('active')).toBe(true);
+ expect(resetAndReloadMock).toHaveBeenCalledWith(true);
+
+ resetAndReloadMock.mockClear();
+
+ await controls.toggleFavoritesOnly();
+
+ expect(sessionStorage.getItem('lora_manager_show_favorites_only_checkpoints')).toBe('false');
+ expect(stateModule.getCurrentPageState().showFavoritesOnly).toBe(false);
+ expect(document.getElementById('favoriteFilterBtn').classList.contains('active')).toBe(false);
+ expect(resetAndReloadMock).toHaveBeenCalledWith(true);
+ });
+
+ it('saves sort selection and reloads models', async () => {
+ renderControlsDom('loras');
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState('loras');
+ const { LorasControls } = await import('../../../static/js/components/controls/LorasControls.js');
+
+ new LorasControls();
+
+ const sortSelect = document.getElementById('sortSelect');
+ sortSelect.value = 'date:asc';
+ sortSelect.dispatchEvent(new Event('change', { bubbles: true }));
+
+ await vi.waitFor(() => expect(resetAndReloadMock).toHaveBeenCalledTimes(1));
+ expect(localStorage.getItem('lora_manager_loras_sort')).toBe('date:asc');
+ expect(stateModule.getCurrentPageState().sortBy).toBe('date:asc');
+ });
+
+ it('converts legacy sort preference on initialization', async () => {
+ localStorage.setItem('loras_sort', 'date');
+
+ renderControlsDom('loras');
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState('loras');
+ const { LorasControls } = await import('../../../static/js/components/controls/LorasControls.js');
+
+ new LorasControls();
+
+ const sortSelect = document.getElementById('sortSelect');
+ expect(sortSelect.value).toBe('date:desc');
+ expect(stateModule.getCurrentPageState().sortBy).toBe('date:desc');
+ });
+
+ it('updates duplicate badge after refresh and toggles duplicate mode from controls', async () => {
+ renderControlsDom('checkpoints');
+ const stateModule = await import('../../../static/js/state/index.js');
+ stateModule.initPageState('checkpoints');
+ const { CheckpointsControls } = await import('../../../static/js/components/controls/CheckpointsControls.js');
+
+ const controls = new CheckpointsControls();
+
+ const toggleDuplicateMode = vi.fn();
+ const updateDuplicatesBadgeAfterRefresh = vi.fn();
+ window.modelDuplicatesManager = {
+ toggleDuplicateMode,
+ updateDuplicatesBadgeAfterRefresh,
+ };
+
+ await controls.refreshModels(true);
+ expect(refreshModelsMock).toHaveBeenCalledWith(true);
+ expect(updateDuplicatesBadgeAfterRefresh).toHaveBeenCalledTimes(1);
+
+ const duplicateButton = document.querySelector('[data-action="find-duplicates"]');
+ duplicateButton.click();
+ expect(toggleDuplicateMode).toHaveBeenCalledTimes(1);
+ });
+});
diff --git a/tests/frontend/core/appCore.test.js b/tests/frontend/core/appCore.test.js
new file mode 100644
index 00000000..094200f1
--- /dev/null
+++ b/tests/frontend/core/appCore.test.js
@@ -0,0 +1,274 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import { renderTemplate, resetDom } from '../utils/domFixtures.js';
+
+const loadingManagerInstance = { showSimpleLoading: vi.fn(), hide: vi.fn() };
+const exampleImagesManagerInitialize = vi.fn();
+const exampleImagesManagerInstance = { initialize: exampleImagesManagerInitialize };
+const bulkContextMenuInstance = { menu: 'bulk-context' };
+const headerManagerInstance = { type: 'header-manager' };
+
+vi.mock('../../../static/js/managers/LoadingManager.js', () => ({
+ LoadingManager: vi.fn(() => loadingManagerInstance),
+}));
+
+vi.mock('../../../static/js/managers/ModalManager.js', () => ({
+ modalManager: { initialize: vi.fn() },
+}));
+
+vi.mock('../../../static/js/managers/UpdateService.js', () => ({
+ updateService: { initialize: vi.fn() },
+}));
+
+vi.mock('../../../static/js/components/Header.js', () => ({
+ HeaderManager: vi.fn(() => headerManagerInstance),
+}));
+
+vi.mock('../../../static/js/managers/SettingsManager.js', () => ({
+ settingsManager: {
+ waitForInitialization: vi.fn().mockResolvedValue(undefined),
+ },
+}));
+
+vi.mock('../../../static/js/managers/MoveManager.js', () => ({
+ moveManager: { initialize: vi.fn() },
+}));
+
+vi.mock('../../../static/js/managers/BulkManager.js', () => ({
+ bulkManager: {
+ initialize: vi.fn(),
+ setBulkContextMenu: vi.fn(),
+ },
+}));
+
+vi.mock('../../../static/js/managers/ExampleImagesManager.js', () => ({
+ ExampleImagesManager: vi.fn(() => exampleImagesManagerInstance),
+}));
+
+vi.mock('../../../static/js/managers/HelpManager.js', () => ({
+ helpManager: {
+ initialize: vi.fn(),
+ },
+}));
+
+vi.mock('../../../static/js/managers/BannerService.js', () => ({
+ bannerService: {
+ initialize: vi.fn(),
+ isBannerVisible: vi.fn().mockReturnValue(false),
+ },
+}));
+
+vi.mock('../../../static/js/utils/uiHelpers.js', () => ({
+ initTheme: vi.fn(),
+ initBackToTop: vi.fn(),
+ showToast: vi.fn(),
+}));
+
+vi.mock('../../../static/js/i18n/index.js', () => ({
+ i18n: {
+ waitForReady: vi.fn().mockResolvedValue(undefined),
+ getCurrentLocale: vi.fn().mockReturnValue('en'),
+ },
+}));
+
+vi.mock('../../../static/js/managers/OnboardingManager.js', () => ({
+ onboardingManager: {
+ start: vi.fn(),
+ },
+}));
+
+vi.mock('../../../static/js/components/ContextMenu/BulkContextMenu.js', () => ({
+ BulkContextMenu: vi.fn(() => bulkContextMenuInstance),
+}));
+
+vi.mock('../../../static/js/utils/eventManagementInit.js', () => ({
+ initializeEventManagement: vi.fn(),
+}));
+
+vi.mock('../../../static/js/utils/infiniteScroll.js', () => ({
+ initializeInfiniteScroll: vi.fn(),
+}));
+
+vi.mock('../../../static/js/components/ContextMenu/index.js', () => ({
+ createPageContextMenu: vi.fn((pageType) => ({ pageType })),
+ createGlobalContextMenu: vi.fn(() => ({ type: 'global' })),
+}));
+
+import { appCore } from '../../../static/js/core.js';
+import { state } from '../../../static/js/state/index.js';
+import { LoadingManager } from '../../../static/js/managers/LoadingManager.js';
+import { modalManager } from '../../../static/js/managers/ModalManager.js';
+import { updateService } from '../../../static/js/managers/UpdateService.js';
+import { settingsManager } from '../../../static/js/managers/SettingsManager.js';
+import { moveManager } from '../../../static/js/managers/MoveManager.js';
+import { bulkManager } from '../../../static/js/managers/BulkManager.js';
+import { ExampleImagesManager } from '../../../static/js/managers/ExampleImagesManager.js';
+import { helpManager } from '../../../static/js/managers/HelpManager.js';
+import { bannerService } from '../../../static/js/managers/BannerService.js';
+import { initTheme, initBackToTop } from '../../../static/js/utils/uiHelpers.js';
+import { onboardingManager } from '../../../static/js/managers/OnboardingManager.js';
+import { BulkContextMenu } from '../../../static/js/components/ContextMenu/BulkContextMenu.js';
+import { HeaderManager } from '../../../static/js/components/Header.js';
+import { initializeEventManagement } from '../../../static/js/utils/eventManagementInit.js';
+import { initializeInfiniteScroll } from '../../../static/js/utils/infiniteScroll.js';
+import { createPageContextMenu, createGlobalContextMenu } from '../../../static/js/components/ContextMenu/index.js';
+
+const SUPPORTED_PAGES = ['loras', 'recipes', 'checkpoints', 'embeddings'];
+
+describe('AppCore page orchestration', () => {
+ beforeEach(() => {
+ resetDom();
+ delete window.pageContextMenu;
+ delete window.globalContextMenuInstance;
+ vi.clearAllMocks();
+ });
+
+ afterEach(() => {
+ vi.restoreAllMocks();
+ });
+
+ it.each(SUPPORTED_PAGES)('initializes page features for %s pages', (pageType) => {
+ renderTemplate('loras.html', { dataset: { page: pageType } });
+ const contextSpy = vi.spyOn(appCore, 'initializeContextMenus');
+
+ appCore.initializePageFeatures();
+
+ expect(contextSpy).toHaveBeenCalledWith(pageType);
+ expect(initializeInfiniteScroll).toHaveBeenCalledWith(pageType);
+ });
+
+ it('skips initialization when page type is unsupported', () => {
+ renderTemplate('statistics.html', { dataset: { page: 'statistics' } });
+ const contextSpy = vi.spyOn(appCore, 'initializeContextMenus');
+
+ appCore.initializePageFeatures();
+
+ expect(contextSpy).not.toHaveBeenCalled();
+ expect(initializeInfiniteScroll).not.toHaveBeenCalled();
+ });
+
+ it('creates page and global context menus on first initialization', () => {
+ const pageMenu = { menu: 'page' };
+ const globalMenu = { menu: 'global' };
+ createPageContextMenu.mockReturnValueOnce(pageMenu);
+ createGlobalContextMenu.mockReturnValueOnce(globalMenu);
+
+ appCore.initializeContextMenus('loras');
+
+ expect(createPageContextMenu).toHaveBeenCalledWith('loras');
+ expect(window.pageContextMenu).toBe(pageMenu);
+ expect(createGlobalContextMenu).toHaveBeenCalledTimes(1);
+ expect(window.globalContextMenuInstance).toBe(globalMenu);
+ });
+
+ it('reuses the existing global context menu instance on subsequent calls', () => {
+ const existingGlobalMenu = { menu: 'existing' };
+ window.globalContextMenuInstance = existingGlobalMenu;
+
+ appCore.initializeContextMenus('loras');
+
+ expect(createGlobalContextMenu).not.toHaveBeenCalled();
+ expect(window.globalContextMenuInstance).toBe(existingGlobalMenu);
+ });
+});
+
+describe('AppCore initialization flow', () => {
+ beforeEach(() => {
+ vi.useFakeTimers();
+ vi.clearAllMocks();
+ resetDom();
+ document.body.className = '';
+ appCore.initialized = false;
+ state.loadingManager = undefined;
+ state.currentPageType = 'loras';
+ state.global.settings.card_info_display = 'always';
+ delete window.modalManager;
+ delete window.settingsManager;
+ delete window.exampleImagesManager;
+ delete window.helpManager;
+ delete window.moveManager;
+ delete window.bulkManager;
+ delete window.headerManager;
+ delete window.i18n;
+ delete window.pageContextMenu;
+ delete window.globalContextMenuInstance;
+ });
+
+ afterEach(async () => {
+ await vi.runAllTimersAsync();
+ vi.clearAllTimers();
+ vi.useRealTimers();
+ });
+
+ it('initializes core managers and global references', async () => {
+ state.global.settings.card_info_display = 'hover';
+
+ const result = await appCore.initialize();
+
+ expect(result).toBe(appCore);
+ expect(window.i18n).toBeDefined();
+ expect(settingsManager.waitForInitialization).toHaveBeenCalledTimes(1);
+ expect(LoadingManager).toHaveBeenCalledTimes(1);
+ expect(state.loadingManager).toBe(loadingManagerInstance);
+ expect(modalManager.initialize).toHaveBeenCalledTimes(1);
+ expect(updateService.initialize).toHaveBeenCalledTimes(1);
+ expect(bannerService.initialize).toHaveBeenCalledTimes(1);
+ expect(window.modalManager).toBe(modalManager);
+ expect(window.settingsManager).toBe(settingsManager);
+ expect(window.moveManager).toBe(moveManager);
+ expect(window.bulkManager).toBe(bulkManager);
+ expect(HeaderManager).toHaveBeenCalledTimes(1);
+ expect(window.headerManager).toBe(headerManagerInstance);
+ expect(initTheme).toHaveBeenCalledTimes(1);
+ expect(initBackToTop).toHaveBeenCalledTimes(1);
+ expect(bulkManager.initialize).toHaveBeenCalledTimes(1);
+ expect(BulkContextMenu).toHaveBeenCalledTimes(1);
+ expect(bulkManager.setBulkContextMenu).toHaveBeenCalledWith(bulkContextMenuInstance);
+ expect(ExampleImagesManager).toHaveBeenCalledTimes(1);
+ expect(window.exampleImagesManager).toBe(exampleImagesManagerInstance);
+ expect(exampleImagesManagerInitialize).toHaveBeenCalledTimes(1);
+ expect(helpManager.initialize).toHaveBeenCalledTimes(1);
+ expect(document.body.classList.contains('hover-reveal')).toBe(true);
+ expect(initializeEventManagement).toHaveBeenCalledTimes(1);
+ expect(onboardingManager.start).not.toHaveBeenCalled();
+
+ await vi.runAllTimersAsync();
+
+ expect(onboardingManager.start).toHaveBeenCalledTimes(1);
+ expect(bannerService.isBannerVisible).toHaveBeenCalledWith('version-mismatch');
+ });
+
+ it('does not reinitialize once initialized', async () => {
+ await appCore.initialize();
+ await vi.runAllTimersAsync();
+
+ vi.clearAllMocks();
+
+ const result = await appCore.initialize();
+
+ expect(result).toBeUndefined();
+ expect(LoadingManager).not.toHaveBeenCalled();
+ expect(modalManager.initialize).not.toHaveBeenCalled();
+ expect(updateService.initialize).not.toHaveBeenCalled();
+ expect(ExampleImagesManager).not.toHaveBeenCalled();
+ expect(onboardingManager.start).not.toHaveBeenCalled();
+ });
+
+ it('skips bulk setup when viewing recipes', async () => {
+ state.currentPageType = 'recipes';
+
+ await appCore.initialize();
+
+ expect(bulkManager.initialize).not.toHaveBeenCalled();
+ expect(BulkContextMenu).not.toHaveBeenCalled();
+ expect(bulkManager.setBulkContextMenu).not.toHaveBeenCalled();
+ });
+
+ it('suppresses onboarding when version mismatch banner is visible', async () => {
+ bannerService.isBannerVisible.mockReturnValueOnce(true);
+
+ await appCore.initialize();
+ await vi.runAllTimersAsync();
+
+ expect(onboardingManager.start).not.toHaveBeenCalled();
+ });
+});
diff --git a/tests/frontend/pages/checkpointsPage.test.js b/tests/frontend/pages/checkpointsPage.test.js
new file mode 100644
index 00000000..ea982e57
--- /dev/null
+++ b/tests/frontend/pages/checkpointsPage.test.js
@@ -0,0 +1,100 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import { renderCheckpointsPage } from '../utils/pageFixtures.js';
+
+const CHECKPOINT_TYPE = 'checkpoints';
+
+vi.mock('../../../static/js/api/apiConfig.js', () => ({
+ MODEL_TYPES: {
+ CHECKPOINT: CHECKPOINT_TYPE,
+ },
+}));
+
+const initializeAppMock = vi.fn();
+const initializePageFeaturesMock = vi.fn();
+const createPageControlsMock = vi.fn();
+const confirmDeleteMock = vi.fn();
+const closeDeleteModalMock = vi.fn();
+const confirmExcludeMock = vi.fn();
+const closeExcludeModalMock = vi.fn();
+const duplicatesManagerMock = vi.fn();
+
+vi.mock('../../../static/js/core.js', () => ({
+ appCore: {
+ initialize: initializeAppMock,
+ initializePageFeatures: initializePageFeaturesMock,
+ },
+}));
+
+vi.mock('../../../static/js/components/controls/index.js', () => ({
+ createPageControls: createPageControlsMock,
+}));
+
+vi.mock('../../../static/js/utils/modalUtils.js', () => ({
+ confirmDelete: confirmDeleteMock,
+ closeDeleteModal: closeDeleteModalMock,
+ confirmExclude: confirmExcludeMock,
+ closeExcludeModal: closeExcludeModalMock,
+}));
+
+vi.mock('../../../static/js/components/ModelDuplicatesManager.js', () => ({
+ ModelDuplicatesManager: duplicatesManagerMock,
+}));
+
+describe('CheckpointsPageManager', () => {
+ let CheckpointsPageManager;
+ let initializeCheckpointsPage;
+ let duplicatesManagerInstance;
+
+ beforeEach(async () => {
+ vi.clearAllMocks();
+
+ duplicatesManagerInstance = {
+ checkDuplicatesCount: vi.fn(),
+ };
+
+ duplicatesManagerMock.mockReturnValue(duplicatesManagerInstance);
+ createPageControlsMock.mockReturnValue({ destroy: vi.fn() });
+ initializeAppMock.mockResolvedValue(undefined);
+
+ renderCheckpointsPage();
+
+ ({ CheckpointsPageManager, initializeCheckpointsPage } = await import('../../../static/js/checkpoints.js'));
+ });
+
+ afterEach(() => {
+ delete window.confirmDelete;
+ delete window.closeDeleteModal;
+ delete window.confirmExclude;
+ delete window.closeExcludeModal;
+ delete window.modelDuplicatesManager;
+ });
+
+ it('wires duplicates manager and exposes globals during construction', () => {
+ const manager = new CheckpointsPageManager();
+
+ expect(createPageControlsMock).toHaveBeenCalledWith(CHECKPOINT_TYPE);
+ expect(duplicatesManagerMock).toHaveBeenCalledWith(manager, CHECKPOINT_TYPE);
+
+ expect(window.confirmDelete).toBe(confirmDeleteMock);
+ expect(window.closeDeleteModal).toBe(closeDeleteModalMock);
+ expect(window.confirmExclude).toBe(confirmExcludeMock);
+ expect(window.closeExcludeModal).toBe(closeExcludeModalMock);
+ expect(window.modelDuplicatesManager).toBe(duplicatesManagerInstance);
+ });
+
+ it('initializes shared page features', async () => {
+ const manager = new CheckpointsPageManager();
+
+ await manager.initialize();
+
+ expect(initializePageFeaturesMock).toHaveBeenCalledTimes(1);
+ });
+
+ it('boots the page when DOMContentLoaded handler runs', async () => {
+ const manager = await initializeCheckpointsPage();
+
+ expect(initializeAppMock).toHaveBeenCalledTimes(1);
+ expect(manager).toBeInstanceOf(CheckpointsPageManager);
+ expect(window.modelDuplicatesManager).toBe(duplicatesManagerInstance);
+ });
+});
diff --git a/tests/frontend/pages/embeddingsPage.test.js b/tests/frontend/pages/embeddingsPage.test.js
new file mode 100644
index 00000000..4d0f754f
--- /dev/null
+++ b/tests/frontend/pages/embeddingsPage.test.js
@@ -0,0 +1,99 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import { renderEmbeddingsPage } from '../utils/pageFixtures.js';
+
+const initializeAppMock = vi.fn();
+const initializePageFeaturesMock = vi.fn();
+const createPageControlsMock = vi.fn();
+const confirmDeleteMock = vi.fn();
+const closeDeleteModalMock = vi.fn();
+const confirmExcludeMock = vi.fn();
+const closeExcludeModalMock = vi.fn();
+const duplicatesManagerMock = vi.fn();
+
+vi.mock('../../../static/js/core.js', () => ({
+ appCore: {
+ initialize: initializeAppMock,
+ initializePageFeatures: initializePageFeaturesMock,
+ },
+}));
+
+vi.mock('../../../static/js/components/controls/index.js', () => ({
+ createPageControls: createPageControlsMock,
+}));
+
+vi.mock('../../../static/js/utils/modalUtils.js', () => ({
+ confirmDelete: confirmDeleteMock,
+ closeDeleteModal: closeDeleteModalMock,
+ confirmExclude: confirmExcludeMock,
+ closeExcludeModal: closeExcludeModalMock,
+}));
+
+vi.mock('../../../static/js/api/apiConfig.js', () => ({
+ MODEL_TYPES: {
+ EMBEDDING: 'embeddings',
+ },
+}));
+
+vi.mock('../../../static/js/components/ModelDuplicatesManager.js', () => ({
+ ModelDuplicatesManager: duplicatesManagerMock,
+}));
+
+describe('EmbeddingsPageManager', () => {
+ let EmbeddingsPageManager;
+ let initializeEmbeddingsPage;
+ let duplicatesManagerInstance;
+
+ beforeEach(async () => {
+ vi.resetModules();
+ vi.clearAllMocks();
+
+ duplicatesManagerInstance = {
+ checkDuplicatesCount: vi.fn(),
+ };
+
+ duplicatesManagerMock.mockReturnValue(duplicatesManagerInstance);
+ createPageControlsMock.mockReturnValue({ destroy: vi.fn() });
+ initializeAppMock.mockResolvedValue(undefined);
+
+ renderEmbeddingsPage();
+
+ ({ EmbeddingsPageManager, initializeEmbeddingsPage } = await import('../../../static/js/embeddings.js'));
+ });
+
+ afterEach(() => {
+ delete window.confirmDelete;
+ delete window.closeDeleteModal;
+ delete window.confirmExclude;
+ delete window.closeExcludeModal;
+ delete window.modelDuplicatesManager;
+ });
+
+ it('wires page controls and exposes modal helpers during construction', () => {
+ const manager = new EmbeddingsPageManager();
+
+ expect(createPageControlsMock).toHaveBeenCalledWith('embeddings');
+ expect(duplicatesManagerMock).toHaveBeenCalledWith(manager, 'embeddings');
+
+ expect(window.confirmDelete).toBe(confirmDeleteMock);
+ expect(window.closeDeleteModal).toBe(closeDeleteModalMock);
+ expect(window.confirmExclude).toBe(confirmExcludeMock);
+ expect(window.closeExcludeModal).toBe(closeExcludeModalMock);
+ expect(window.modelDuplicatesManager).toBe(duplicatesManagerInstance);
+ });
+
+ it('initializes shared page features', async () => {
+ const manager = new EmbeddingsPageManager();
+
+ await manager.initialize();
+
+ expect(initializePageFeaturesMock).toHaveBeenCalledTimes(1);
+ });
+
+ it('boots the embeddings page through the initializer', async () => {
+ const manager = await initializeEmbeddingsPage();
+
+ expect(initializeAppMock).toHaveBeenCalledTimes(1);
+ expect(manager).toBeInstanceOf(EmbeddingsPageManager);
+ expect(window.modelDuplicatesManager).toBe(duplicatesManagerInstance);
+ });
+});
diff --git a/tests/frontend/pages/lorasPage.test.js b/tests/frontend/pages/lorasPage.test.js
new file mode 100644
index 00000000..1768fa35
--- /dev/null
+++ b/tests/frontend/pages/lorasPage.test.js
@@ -0,0 +1,109 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import { renderLorasPage } from '../utils/pageFixtures.js';
+
+const initializeAppMock = vi.fn();
+const initializePageFeaturesMock = vi.fn();
+const updateCardsForBulkModeMock = vi.fn();
+const createPageControlsMock = vi.fn();
+const confirmDeleteMock = vi.fn();
+const closeDeleteModalMock = vi.fn();
+const confirmExcludeMock = vi.fn();
+const closeExcludeModalMock = vi.fn();
+const state = {};
+const duplicatesManagerMock = vi.fn();
+
+vi.mock('../../../static/js/core.js', () => ({
+ appCore: {
+ initialize: initializeAppMock,
+ initializePageFeatures: initializePageFeaturesMock,
+ },
+}));
+
+vi.mock('../../../static/js/state/index.js', () => ({
+ state,
+}));
+
+vi.mock('../../../static/js/components/shared/ModelCard.js', () => ({
+ updateCardsForBulkMode: updateCardsForBulkModeMock,
+}));
+
+vi.mock('../../../static/js/components/controls/index.js', () => ({
+ createPageControls: createPageControlsMock,
+}));
+
+vi.mock('../../../static/js/utils/modalUtils.js', () => ({
+ confirmDelete: confirmDeleteMock,
+ closeDeleteModal: closeDeleteModalMock,
+ confirmExclude: confirmExcludeMock,
+ closeExcludeModal: closeExcludeModalMock,
+}));
+
+vi.mock('../../../static/js/components/ModelDuplicatesManager.js', () => ({
+ ModelDuplicatesManager: duplicatesManagerMock,
+}));
+
+describe('LoraPageManager', () => {
+ let LoraPageManager;
+ let initializeLoraPage;
+ let duplicatesManagerInstance;
+
+ beforeEach(async () => {
+ vi.clearAllMocks();
+
+ state.bulkMode = undefined;
+ state.selectedLoras = undefined;
+
+ duplicatesManagerInstance = {
+ checkDuplicatesCount: vi.fn(),
+ };
+
+ duplicatesManagerMock.mockReturnValue(duplicatesManagerInstance);
+ createPageControlsMock.mockReturnValue({ destroy: vi.fn() });
+ initializeAppMock.mockResolvedValue(undefined);
+
+ renderLorasPage();
+
+ ({ LoraPageManager, initializeLoraPage } = await import('../../../static/js/loras.js'));
+ });
+
+ afterEach(() => {
+ delete window.confirmDelete;
+ delete window.closeDeleteModal;
+ delete window.confirmExclude;
+ delete window.closeExcludeModal;
+ delete window.modelDuplicatesManager;
+ });
+
+ it('configures state and exposes globals during construction', () => {
+ const manager = new LoraPageManager();
+
+ expect(state.bulkMode).toBe(false);
+ expect(state.selectedLoras).toBeInstanceOf(Set);
+ expect(createPageControlsMock).toHaveBeenCalledWith('loras');
+ expect(duplicatesManagerMock).toHaveBeenCalledWith(manager);
+
+ expect(window.confirmDelete).toBe(confirmDeleteMock);
+ expect(window.closeDeleteModal).toBe(closeDeleteModalMock);
+ expect(window.confirmExclude).toBe(confirmExcludeMock);
+ expect(window.closeExcludeModal).toBe(closeExcludeModalMock);
+ expect(window.modelDuplicatesManager).toBe(duplicatesManagerInstance);
+ });
+
+ it('initializes cards and page features', async () => {
+ const manager = new LoraPageManager();
+
+ await manager.initialize();
+
+ expect(updateCardsForBulkModeMock).toHaveBeenCalledWith(false);
+ expect(initializePageFeaturesMock).toHaveBeenCalledTimes(1);
+ });
+
+ it('boots the page when DOMContentLoaded handler runs', async () => {
+ const manager = await initializeLoraPage();
+
+ expect(initializeAppMock).toHaveBeenCalledTimes(1);
+ expect(manager).toBeInstanceOf(LoraPageManager);
+ expect(updateCardsForBulkModeMock).toHaveBeenCalledWith(false);
+ expect(window.modelDuplicatesManager).toBe(duplicatesManagerInstance);
+ });
+});
diff --git a/tests/frontend/pages/recipesPage.test.js b/tests/frontend/pages/recipesPage.test.js
new file mode 100644
index 00000000..74add4e0
--- /dev/null
+++ b/tests/frontend/pages/recipesPage.test.js
@@ -0,0 +1,209 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import { renderRecipesPage } from '../utils/pageFixtures.js';
+
+const initializeAppMock = vi.fn();
+const initializePageFeaturesMock = vi.fn();
+const getCurrentPageStateMock = vi.fn();
+const getSessionItemMock = vi.fn();
+const removeSessionItemMock = vi.fn();
+const RecipeContextMenuMock = vi.fn();
+const refreshVirtualScrollMock = vi.fn();
+const refreshRecipesMock = vi.fn();
+
+let importManagerInstance;
+let recipeModalInstance;
+let duplicatesManagerInstance;
+
+const ImportManagerMock = vi.fn(() => importManagerInstance);
+const RecipeModalMock = vi.fn(() => recipeModalInstance);
+const DuplicatesManagerMock = vi.fn(() => duplicatesManagerInstance);
+
+vi.mock('../../../static/js/core.js', () => ({
+ appCore: {
+ initialize: initializeAppMock,
+ initializePageFeatures: initializePageFeaturesMock,
+ },
+}));
+
+vi.mock('../../../static/js/managers/ImportManager.js', () => ({
+ ImportManager: ImportManagerMock,
+}));
+
+vi.mock('../../../static/js/components/RecipeModal.js', () => ({
+ RecipeModal: RecipeModalMock,
+}));
+
+vi.mock('../../../static/js/state/index.js', () => ({
+ getCurrentPageState: getCurrentPageStateMock,
+}));
+
+vi.mock('../../../static/js/utils/storageHelpers.js', () => ({
+ getSessionItem: getSessionItemMock,
+ removeSessionItem: removeSessionItemMock,
+}));
+
+vi.mock('../../../static/js/components/ContextMenu/index.js', () => ({
+ RecipeContextMenu: RecipeContextMenuMock,
+}));
+
+vi.mock('../../../static/js/components/DuplicatesManager.js', () => ({
+ DuplicatesManager: DuplicatesManagerMock,
+}));
+
+vi.mock('../../../static/js/utils/infiniteScroll.js', () => ({
+ refreshVirtualScroll: refreshVirtualScrollMock,
+}));
+
+vi.mock('../../../static/js/api/recipeApi.js', () => ({
+ refreshRecipes: refreshRecipesMock,
+}));
+
+describe('RecipeManager', () => {
+ let RecipeManager;
+ let pageState;
+
+ beforeEach(async () => {
+ vi.resetModules();
+ vi.clearAllMocks();
+
+ importManagerInstance = {
+ showImportModal: vi.fn(),
+ };
+ recipeModalInstance = {
+ showRecipeDetails: vi.fn(),
+ };
+ duplicatesManagerInstance = {
+ findDuplicates: vi.fn(),
+ selectLatestDuplicates: vi.fn(),
+ deleteSelectedDuplicates: vi.fn(),
+ confirmDeleteDuplicates: vi.fn(),
+ exitDuplicateMode: vi.fn(),
+ };
+
+ pageState = {
+ sortBy: 'date',
+ searchOptions: undefined,
+ customFilter: undefined,
+ duplicatesMode: false,
+ };
+
+ getCurrentPageStateMock.mockImplementation(() => pageState);
+ initializeAppMock.mockResolvedValue(undefined);
+ initializePageFeaturesMock.mockResolvedValue(undefined);
+ refreshVirtualScrollMock.mockReset();
+ refreshVirtualScrollMock.mockImplementation(() => {});
+ refreshRecipesMock.mockResolvedValue('refreshed');
+
+ getSessionItemMock.mockImplementation((key) => {
+ const map = {
+ lora_to_recipe_filterLoraName: 'Flux Dream',
+ lora_to_recipe_filterLoraHash: 'abc123',
+ viewRecipeId: '42',
+ };
+ return map[key] ?? null;
+ });
+ removeSessionItemMock.mockImplementation(() => {});
+
+ renderRecipesPage();
+
+ ({ RecipeManager } = await import('../../../static/js/recipes.js'));
+ });
+
+ afterEach(() => {
+ delete window.recipeManager;
+ delete window.importManager;
+ });
+
+ it('initializes page controls, restores filters, and wires sort interactions', async () => {
+ const sortSelectElement = document.createElement('select');
+ sortSelectElement.id = 'sortSelect';
+ sortSelectElement.innerHTML = `
+ Date
+ Name
+ `;
+ document.body.appendChild(sortSelectElement);
+
+ const manager = new RecipeManager();
+ await manager.initialize();
+
+ expect(ImportManagerMock).toHaveBeenCalledTimes(1);
+ expect(RecipeModalMock).toHaveBeenCalledTimes(1);
+ expect(DuplicatesManagerMock).toHaveBeenCalledWith(manager);
+ expect(RecipeContextMenuMock).toHaveBeenCalledTimes(1);
+
+ expect(window.recipeManager).toBe(manager);
+ expect(window.importManager).toBe(importManagerInstance);
+
+ expect(pageState.searchOptions).toEqual({
+ title: true,
+ tags: true,
+ loraName: true,
+ loraModel: true,
+ });
+
+ expect(pageState.customFilter).toEqual({
+ active: true,
+ loraName: 'Flux Dream',
+ loraHash: 'abc123',
+ recipeId: '42',
+ });
+
+ const indicator = document.getElementById('customFilterIndicator');
+ expect(indicator.classList.contains('hidden')).toBe(false);
+
+ const clearButton = indicator.querySelector('.clear-filter');
+ clearButton.dispatchEvent(new Event('click', { bubbles: true }));
+
+ expect(removeSessionItemMock).toHaveBeenCalledWith('lora_to_recipe_filterLoraName');
+ expect(removeSessionItemMock).toHaveBeenCalledWith('lora_to_recipe_filterLoraHash');
+ expect(removeSessionItemMock).toHaveBeenCalledWith('viewRecipeId');
+ expect(pageState.customFilter.active).toBe(false);
+ expect(indicator.classList.contains('hidden')).toBe(true);
+ expect(refreshVirtualScrollMock).toHaveBeenCalledTimes(1);
+
+ const sortSelect = document.getElementById('sortSelect');
+ sortSelect.value = 'name';
+ sortSelect.dispatchEvent(new Event('change', { bubbles: true }));
+
+ expect(pageState.sortBy).toBe('name');
+ expect(refreshVirtualScrollMock).toHaveBeenCalledTimes(2);
+ expect(initializePageFeaturesMock).toHaveBeenCalledTimes(1);
+ });
+
+ it('skips loading when duplicates mode is active and refreshes otherwise', async () => {
+ const manager = new RecipeManager();
+
+ pageState.duplicatesMode = true;
+ await manager.loadRecipes();
+ expect(refreshVirtualScrollMock).not.toHaveBeenCalled();
+
+ pageState.duplicatesMode = false;
+ await manager.loadRecipes();
+ expect(refreshVirtualScrollMock).toHaveBeenCalledTimes(1);
+ });
+
+ it('proxies duplicate management and refresh helpers', async () => {
+ const manager = new RecipeManager();
+
+ await manager.findDuplicateRecipes();
+ expect(duplicatesManagerInstance.findDuplicates).toHaveBeenCalledTimes(1);
+
+ manager.selectLatestDuplicates();
+ expect(duplicatesManagerInstance.selectLatestDuplicates).toHaveBeenCalledTimes(1);
+
+ manager.deleteSelectedDuplicates();
+ expect(duplicatesManagerInstance.deleteSelectedDuplicates).toHaveBeenCalledTimes(1);
+
+ manager.confirmDeleteDuplicates();
+ expect(duplicatesManagerInstance.confirmDeleteDuplicates).toHaveBeenCalledTimes(1);
+
+ const grid = document.getElementById('recipeGrid');
+ grid.innerHTML = 'content
';
+ manager.exitDuplicateMode();
+ expect(grid.innerHTML).toBe('');
+ expect(duplicatesManagerInstance.exitDuplicateMode).toHaveBeenCalledTimes(1);
+
+ await manager.refreshRecipes();
+ expect(refreshRecipesMock).toHaveBeenCalledTimes(1);
+ });
+});
diff --git a/tests/frontend/setup.js b/tests/frontend/setup.js
new file mode 100644
index 00000000..b6862e67
--- /dev/null
+++ b/tests/frontend/setup.js
@@ -0,0 +1,16 @@
+import { afterEach, beforeEach } from 'vitest';
+import { resetDom } from './utils/domFixtures.js';
+
+beforeEach(() => {
+ // Ensure storage is clean before each test to avoid cross-test pollution
+ localStorage.clear();
+ sessionStorage.clear();
+
+ // Reset DOM state for modules that rely on body attributes
+ resetDom();
+});
+
+afterEach(() => {
+ // Clean any dynamically attached globals by tests
+ resetDom();
+});
diff --git a/tests/frontend/state/index.test.js b/tests/frontend/state/index.test.js
new file mode 100644
index 00000000..50e5e6e8
--- /dev/null
+++ b/tests/frontend/state/index.test.js
@@ -0,0 +1,53 @@
+import { describe, it, expect, beforeEach } from 'vitest';
+import { createDefaultSettings, getCurrentPageState, initPageState, setCurrentPageType, state } from '../../../static/js/state/index.js';
+import { MODEL_TYPES } from '../../../static/js/api/apiConfig.js';
+import { DEFAULT_PATH_TEMPLATES } from '../../../static/js/utils/constants.js';
+
+describe('state module', () => {
+ beforeEach(() => {
+ // Reset to default page before each assertion
+ state.currentPageType = MODEL_TYPES.LORA;
+ });
+
+ it('creates default settings with immutable template copies', () => {
+ const defaultSettings = createDefaultSettings();
+
+ expect(defaultSettings).toMatchObject({
+ civitai_api_key: '',
+ language: 'en',
+ blur_mature_content: true
+ });
+
+ expect(defaultSettings.download_path_templates).toEqual(DEFAULT_PATH_TEMPLATES);
+
+ // ensure nested objects are new references so tests can safely mutate
+ expect(defaultSettings.download_path_templates).not.toBe(DEFAULT_PATH_TEMPLATES);
+ expect(defaultSettings.base_model_path_mappings).toEqual({});
+ expect(Object.isFrozen(defaultSettings)).toBe(false);
+ });
+
+ it('switches current page type when valid', () => {
+ const didSwitch = setCurrentPageType(MODEL_TYPES.CHECKPOINT);
+
+ expect(didSwitch).toBe(true);
+ expect(state.currentPageType).toBe(MODEL_TYPES.CHECKPOINT);
+ expect(getCurrentPageState()).toBe(state.pages[MODEL_TYPES.CHECKPOINT]);
+ });
+
+ it('rejects switching to an unknown page type', () => {
+ state.currentPageType = MODEL_TYPES.LORA;
+
+ const didSwitch = setCurrentPageType('invalid-page');
+
+ expect(didSwitch).toBe(false);
+ expect(state.currentPageType).toBe(MODEL_TYPES.LORA);
+ });
+
+ it('initializes and returns state for a known page', () => {
+ const pageState = initPageState(MODEL_TYPES.EMBEDDING);
+
+ expect(pageState).toBeDefined();
+ expect(pageState).toBe(state.pages[MODEL_TYPES.EMBEDDING]);
+ expect(state.currentPageType).toBe(MODEL_TYPES.EMBEDDING);
+ });
+});
diff --git a/tests/frontend/utils/domFixtures.js b/tests/frontend/utils/domFixtures.js
new file mode 100644
index 00000000..ea903473
--- /dev/null
+++ b/tests/frontend/utils/domFixtures.js
@@ -0,0 +1,68 @@
+import fs from 'node:fs';
+import path from 'node:path';
+
+const TEMPLATE_ROOT = path.resolve(process.cwd(), 'templates');
+
+/**
+ * Reads an HTML template from the templates directory and returns its markup.
+ * @param {string} relativePath - Path relative to the templates directory.
+ * @returns {string}
+ */
+export function readTemplate(relativePath) {
+ const filePath = path.join(TEMPLATE_ROOT, relativePath);
+ return fs.readFileSync(filePath, 'utf-8');
+}
+
+/**
+ * Injects the provided HTML markup into the supplied container (defaults to document.body).
+ * @param {string} html
+ * @param {Element} [container=document.body]
+ * @returns {Element}
+ */
+export function mountMarkup(html, container = document.body) {
+ container.innerHTML = html;
+ return container;
+}
+
+/**
+ * Loads a template file and mounts it into the DOM, returning the container used.
+ * @param {string} relativePath - Template path relative to templates directory.
+ * @param {{
+ * container?: Element,
+ * dataset?: Record,
+ * beforeMount?: (options: { container: Element }) => void,
+ * afterMount?: (options: { container: Element }) => void
+ * }} [options]
+ * @returns {Element}
+ */
+export function renderTemplate(relativePath, options = {}) {
+ const { container = document.body, dataset = {}, beforeMount, afterMount } = options;
+ if (beforeMount) {
+ beforeMount({ container });
+ }
+
+ const html = readTemplate(relativePath);
+ const target = mountMarkup(html, container);
+
+ Object.entries(dataset).forEach(([key, value]) => {
+ target.dataset[key] = value;
+ });
+
+ if (afterMount) {
+ afterMount({ container: target });
+ }
+
+ return target;
+}
+
+/**
+ * Utility to reset the DOM to a clean state. Useful when tests modify the structure
+ * beyond what the shared Vitest setup clears.
+ * @param {Element} [container=document.body]
+ */
+export function resetDom(container = document.body) {
+ container.innerHTML = '';
+ if (container === document.body) {
+ document.body.removeAttribute('data-page');
+ }
+}
diff --git a/tests/frontend/utils/pageFixtures.js b/tests/frontend/utils/pageFixtures.js
new file mode 100644
index 00000000..465ea8b3
--- /dev/null
+++ b/tests/frontend/utils/pageFixtures.js
@@ -0,0 +1,49 @@
+import { renderTemplate } from './domFixtures.js';
+
+/**
+ * Renders the LoRAs page template with expected dataset attributes.
+ * @returns {Element}
+ */
+export function renderLorasPage() {
+ return renderTemplate('loras.html', {
+ dataset: {
+ page: 'loras',
+ },
+ });
+}
+
+/**
+ * Renders the Checkpoints page template with expected dataset attributes.
+ * @returns {Element}
+ */
+export function renderCheckpointsPage() {
+ return renderTemplate('checkpoints.html', {
+ dataset: {
+ page: 'checkpoints',
+ },
+ });
+}
+
+/**
+ * Renders the Embeddings page template with expected dataset attributes.
+ * @returns {Element}
+ */
+export function renderEmbeddingsPage() {
+ return renderTemplate('embeddings.html', {
+ dataset: {
+ page: 'embeddings',
+ },
+ });
+}
+
+/**
+ * Renders the Recipes page template with expected dataset attributes.
+ * @returns {Element}
+ */
+export function renderRecipesPage() {
+ return renderTemplate('recipes.html', {
+ dataset: {
+ page: 'recipes',
+ },
+ });
+}
diff --git a/tests/frontend/utils/storageHelpers.test.js b/tests/frontend/utils/storageHelpers.test.js
new file mode 100644
index 00000000..87acbcc7
--- /dev/null
+++ b/tests/frontend/utils/storageHelpers.test.js
@@ -0,0 +1,111 @@
+import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
+import * as storageHelpers from '../../../static/js/utils/storageHelpers.js';
+
+const {
+ getStorageItem,
+ setStorageItem,
+ removeStorageItem,
+ getSessionItem,
+ setSessionItem,
+ removeSessionItem,
+} = storageHelpers;
+
+const createFakeStorage = () => {
+ const store = new Map();
+ return {
+ getItem: vi.fn((key) => (store.has(key) ? store.get(key) : null)),
+ setItem: vi.fn((key, value) => {
+ store.set(key, value);
+ }),
+ removeItem: vi.fn((key) => {
+ store.delete(key);
+ }),
+ clear: vi.fn(() => {
+ store.clear();
+ }),
+ key: vi.fn((index) => Array.from(store.keys())[index] ?? null),
+ get length() {
+ return store.size;
+ },
+ _store: store
+ };
+};
+
+let localStorageMock;
+let sessionStorageMock;
+let consoleLogMock;
+
+beforeEach(() => {
+ localStorageMock = createFakeStorage();
+ sessionStorageMock = createFakeStorage();
+ vi.stubGlobal('localStorage', localStorageMock);
+ vi.stubGlobal('sessionStorage', sessionStorageMock);
+ consoleLogMock = vi.spyOn(console, 'log').mockImplementation(() => {});
+});
+
+afterEach(() => {
+ vi.unstubAllGlobals();
+ vi.restoreAllMocks();
+});
+
+describe('storageHelpers namespace utilities', () => {
+ it('returns parsed JSON for prefixed localStorage items', () => {
+ localStorage.setItem('lora_manager_preferences', JSON.stringify({ theme: 'dark' }));
+
+ const result = getStorageItem('preferences');
+
+ expect(result).toEqual({ theme: 'dark' });
+ expect(localStorage.getItem).toHaveBeenCalledWith('lora_manager_preferences');
+ });
+
+ it('falls back to legacy keys and migrates them to the namespace', () => {
+ localStorage.setItem('legacy_key', 'value');
+
+ const value = getStorageItem('legacy_key');
+
+ expect(value).toBe('value');
+ expect(localStorage.getItem('lora_manager_legacy_key')).toBe('value');
+ });
+
+ it('serializes objects when setting prefixed localStorage values', () => {
+ const data = { ids: [1, 2, 3] };
+
+ setStorageItem('data', data);
+
+ expect(localStorage.setItem).toHaveBeenCalledWith('lora_manager_data', JSON.stringify(data));
+ expect(localStorage.getItem('lora_manager_data')).toEqual(JSON.stringify(data));
+ });
+
+ it('removes both prefixed and legacy localStorage entries', () => {
+ localStorage.setItem('lora_manager_temp', '123');
+ localStorage.setItem('temp', '456');
+
+ removeStorageItem('temp');
+
+ expect(localStorage.getItem('lora_manager_temp')).toBeNull();
+ expect(localStorage.getItem('temp')).toBeNull();
+ });
+
+ it('returns parsed JSON for session storage items', () => {
+ sessionStorage.setItem('lora_manager_session', JSON.stringify({ page: 'loras' }));
+
+ const session = getSessionItem('session');
+
+ expect(session).toEqual({ page: 'loras' });
+ });
+
+ it('stores primitives in session storage directly', () => {
+ setSessionItem('token', 'abc123');
+
+ expect(sessionStorage.setItem).toHaveBeenCalledWith('lora_manager_token', 'abc123');
+ expect(sessionStorage.getItem('lora_manager_token')).toBe('abc123');
+ });
+
+ it('removes session storage entries by namespace', () => {
+ sessionStorage.setItem('lora_manager_flag', '1');
+
+ removeSessionItem('flag');
+
+ expect(sessionStorage.getItem('lora_manager_flag')).toBeNull();
+ });
+});
diff --git a/tests/i18n/test_i18n.py b/tests/i18n/test_i18n.py
new file mode 100644
index 00000000..41631287
--- /dev/null
+++ b/tests/i18n/test_i18n.py
@@ -0,0 +1,247 @@
+"""Regression tests for localization data and usage.
+
+These tests validate three key aspects of the localisation setup:
+
+* Every locale file is valid JSON and contains the expected sections.
+* All locales expose the same translation keys as the English reference.
+* Static JavaScript/HTML sources only reference available translation keys.
+"""
+
+from __future__ import annotations
+
+import json
+import re
+from pathlib import Path
+from typing import Dict, Iterable, Set
+
+import pytest
+
+ROOT_DIR = Path(__file__).resolve().parents[2]
+LOCALES_DIR = ROOT_DIR / "locales"
+STATIC_JS_DIR = ROOT_DIR / "static" / "js"
+TEMPLATES_DIR = ROOT_DIR / "templates"
+
+EXPECTED_LOCALES = (
+ "en",
+ "zh-CN",
+ "zh-TW",
+ "ja",
+ "ru",
+ "de",
+ "fr",
+ "es",
+ "ko",
+)
+
+REQUIRED_SECTIONS = {"common", "header", "loras", "recipes", "modals"}
+
+SINGLE_WORD_TRANSLATION_KEYS = {
+ "loading",
+ "error",
+ "success",
+ "warning",
+ "info",
+ "cancel",
+ "save",
+ "delete",
+}
+
+FALSE_POSITIVES = {
+ "checkpoint",
+ "civitai_api_key",
+ "div",
+ "embedding",
+ "lora",
+ "show_only_sfw",
+ "model",
+ "type",
+ "name",
+ "value",
+ "id",
+ "class",
+ "style",
+ "src",
+ "href",
+ "data",
+ "width",
+ "height",
+ "size",
+ "format",
+ "version",
+ "url",
+ "path",
+ "file",
+ "folder",
+ "image",
+ "text",
+ "number",
+ "boolean",
+ "array",
+ "object",
+ "non.existent.key",
+}
+
+SPECIAL_UI_HELPER_KEYS = {
+ "uiHelpers.workflow.loraAdded",
+ "uiHelpers.workflow.loraReplaced",
+ "uiHelpers.workflow.loraFailedToSend",
+ "uiHelpers.workflow.recipeAdded",
+ "uiHelpers.workflow.recipeReplaced",
+ "uiHelpers.workflow.recipeFailedToSend",
+}
+
+JS_TRANSLATION_PATTERNS = (
+ r"\btranslate\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]",
+ r"\bshowToast\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]",
+ r"\bt\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"]",
+)
+
+HTML_TRANSLATION_PATTERN = (
+ r"(?:\{\{|\{%)[^}]*\bt\s*\(\s*['\"]([a-zA-Z0-9._-]+)['\"][^}]*(?:\}\}|%\})"
+)
+
+
+@pytest.fixture(scope="module")
+def loaded_locales() -> Dict[str, dict]:
+ """Load locale JSON once per test module."""
+ locales: Dict[str, dict] = {}
+
+ for locale in EXPECTED_LOCALES:
+ path = LOCALES_DIR / f"{locale}.json"
+ if not path.exists():
+ pytest.fail(f"Locale file {path.name} is missing", pytrace=False)
+
+ try:
+ data = json.loads(path.read_text(encoding="utf-8"))
+ except json.JSONDecodeError as exc: # pragma: no cover - explicit failure message
+ pytest.fail(f"Locale file {path.name} contains invalid JSON: {exc}", pytrace=False)
+
+ if not isinstance(data, dict):
+ pytest.fail(
+ f"Locale file {path.name} must contain a JSON object at the top level",
+ pytrace=False,
+ )
+
+ locales[locale] = data
+
+ return locales
+
+
+@pytest.fixture(scope="module")
+def english_translation_keys(loaded_locales: Dict[str, dict]) -> Set[str]:
+ return collect_translation_keys(loaded_locales["en"])
+
+
+@pytest.fixture(scope="module")
+def static_code_translation_keys() -> Set[str]:
+ return gather_static_translation_keys()
+
+
+def collect_translation_keys(data: dict, prefix: str = "") -> Set[str]:
+ """Recursively collect translation keys from a locale dictionary."""
+ keys: Set[str] = set()
+
+ for key, value in data.items():
+ full_key = f"{prefix}.{key}" if prefix else key
+ if isinstance(value, dict):
+ keys.update(collect_translation_keys(value, full_key))
+ else:
+ keys.add(full_key)
+
+ return keys
+
+
+def gather_static_translation_keys() -> Set[str]:
+ """Collect translation keys referenced in static JavaScript and HTML templates."""
+ keys: Set[str] = set()
+
+ if STATIC_JS_DIR.exists():
+ for file_path in STATIC_JS_DIR.rglob("*.js"):
+ keys.update(filter_translation_keys(extract_i18n_keys_from_js(file_path)))
+
+ if TEMPLATES_DIR.exists():
+ for file_path in TEMPLATES_DIR.rglob("*.html"):
+ keys.update(filter_translation_keys(extract_i18n_keys_from_html(file_path)))
+
+ keys.update(SPECIAL_UI_HELPER_KEYS)
+
+ return keys
+
+
+def filter_translation_keys(raw_keys: Iterable[str]) -> Set[str]:
+ """Filter out obvious false positives and non-translation identifiers."""
+ filtered: Set[str] = set()
+ for key in raw_keys:
+ if key in FALSE_POSITIVES:
+ continue
+ if "." not in key and key not in SINGLE_WORD_TRANSLATION_KEYS:
+ continue
+ filtered.add(key)
+ return filtered
+
+
+def extract_i18n_keys_from_js(file_path: Path) -> Set[str]:
+ """Extract translation keys from JavaScript sources."""
+ content = file_path.read_text(encoding="utf-8")
+ # Remove single-line and multi-line comments to avoid false positives.
+ content = re.sub(r"//.*$", "", content, flags=re.MULTILINE)
+ content = re.sub(r"/\*.*?\*/", "", content, flags=re.DOTALL)
+
+ matches: Set[str] = set()
+ for pattern in JS_TRANSLATION_PATTERNS:
+ matches.update(re.findall(pattern, content))
+ return matches
+
+
+def extract_i18n_keys_from_html(file_path: Path) -> Set[str]:
+ """Extract translation keys from HTML templates."""
+ content = file_path.read_text(encoding="utf-8")
+ content = re.sub(r"", "", content, flags=re.DOTALL)
+
+ matches: Set[str] = set(re.findall(HTML_TRANSLATION_PATTERN, content))
+
+ # Inspect inline script tags as JavaScript.
+ for script_body in re.findall(r"", content, flags=re.DOTALL):
+ for pattern in JS_TRANSLATION_PATTERNS:
+ matches.update(re.findall(pattern, script_body))
+
+ return matches
+
+
+@pytest.mark.parametrize("locale", EXPECTED_LOCALES)
+def test_locale_files_have_expected_structure(locale: str, loaded_locales: Dict[str, dict]) -> None:
+ """Every locale must contain the required sections."""
+ data = loaded_locales[locale]
+ missing_sections = sorted(REQUIRED_SECTIONS - data.keys())
+ assert not missing_sections, f"{locale} locale is missing sections: {missing_sections}"
+
+
+@pytest.mark.parametrize("locale", EXPECTED_LOCALES[1:])
+def test_locale_keys_match_english(
+ locale: str, loaded_locales: Dict[str, dict], english_translation_keys: Set[str]
+) -> None:
+ """Locales must expose the same translation keys as English."""
+ locale_keys = collect_translation_keys(loaded_locales[locale])
+
+ missing_keys = sorted(english_translation_keys - locale_keys)
+ extra_keys = sorted(locale_keys - english_translation_keys)
+
+ assert not missing_keys, (
+ f"{locale} is missing translation keys: {missing_keys[:10]}"
+ + ("..." if len(missing_keys) > 10 else "")
+ )
+ assert not extra_keys, (
+ f"{locale} defines unexpected translation keys: {extra_keys[:10]}"
+ + ("..." if len(extra_keys) > 10 else "")
+ )
+
+
+def test_static_sources_only_use_existing_translations(
+ english_translation_keys: Set[str], static_code_translation_keys: Set[str]
+) -> None:
+ """Static code must not reference unknown translation keys."""
+ missing_keys = sorted(static_code_translation_keys - english_translation_keys)
+ assert not missing_keys, (
+ "Static sources reference missing translation keys: "
+ f"{missing_keys[:20]}" + ("..." if len(missing_keys) > 20 else "")
+ )
diff --git a/tests/routes/test_base_model_routes_smoke.py b/tests/routes/test_base_model_routes_smoke.py
new file mode 100644
index 00000000..7f7e0c9a
--- /dev/null
+++ b/tests/routes/test_base_model_routes_smoke.py
@@ -0,0 +1,390 @@
+import asyncio
+import importlib.util
+import json
+import os
+import sys
+from pathlib import Path
+
+import types
+
+folder_paths_stub = types.SimpleNamespace(get_folder_paths=lambda *_: [])
+sys.modules.setdefault("folder_paths", folder_paths_stub)
+
+import pytest
+from aiohttp import FormData, web
+from aiohttp.test_utils import TestClient, TestServer
+
+REPO_ROOT = Path(__file__).resolve().parents[2]
+PY_PACKAGE_PATH = REPO_ROOT / "py"
+
+spec = importlib.util.spec_from_file_location(
+ "py_local",
+ PY_PACKAGE_PATH / "__init__.py",
+ submodule_search_locations=[str(PY_PACKAGE_PATH)],
+)
+py_local = importlib.util.module_from_spec(spec)
+assert spec.loader is not None # for mypy/static analyzers
+spec.loader.exec_module(py_local)
+sys.modules.setdefault("py_local", py_local)
+
+from py_local.routes.base_model_routes import BaseModelRoutes
+from py_local.services.model_file_service import AutoOrganizeResult
+from py_local.services.service_registry import ServiceRegistry
+from py_local.services.websocket_manager import ws_manager
+from py_local.utils.exif_utils import ExifUtils
+from py_local.config import config
+
+
+class DummyRoutes(BaseModelRoutes):
+ template_name = "dummy.html"
+
+ def setup_specific_routes(self, registrar, prefix: str) -> None: # pragma: no cover - no extra routes in smoke tests
+ return None
+
+
+async def create_test_client(service) -> TestClient:
+ routes = DummyRoutes(service)
+ app = web.Application()
+ routes.setup_routes(app, "test-models")
+
+ server = TestServer(app)
+ client = TestClient(server)
+ await client.start_server()
+ return client
+
+
+@pytest.fixture(autouse=True)
+def reset_ws_manager_state():
+ ws_manager.cleanup_auto_organize_progress()
+ ws_manager._download_progress.clear()
+ yield
+ ws_manager.cleanup_auto_organize_progress()
+ ws_manager._download_progress.clear()
+
+
+@pytest.fixture
+def download_manager_stub():
+ class FakeDownloadManager:
+ def __init__(self):
+ self.calls = []
+ self.error = None
+ self.cancelled = []
+ self.active_downloads = {}
+
+ async def download_from_civitai(self, **kwargs):
+ self.calls.append(kwargs)
+ if self.error is not None:
+ raise self.error
+ await kwargs["progress_callback"](42)
+ return {"success": True, "path": "/tmp/model.safetensors"}
+
+ async def cancel_download(self, download_id):
+ self.cancelled.append(download_id)
+ return {"success": True, "download_id": download_id}
+
+ async def get_active_downloads(self):
+ return self.active_downloads
+
+ stub = FakeDownloadManager()
+ previous = ServiceRegistry._services.get("download_manager")
+ asyncio.run(ServiceRegistry.register_service("download_manager", stub))
+ try:
+ yield stub
+ finally:
+ if previous is not None:
+ ServiceRegistry._services["download_manager"] = previous
+ else:
+ ServiceRegistry._services.pop("download_manager", None)
+
+
+def test_list_models_returns_formatted_items(mock_service, mock_scanner):
+ mock_service.paginated_items = [{"file_path": "/tmp/demo.safetensors", "name": "Demo"}]
+
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.get("/api/lm/test-models/list")
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload["items"] == [{"file_path": "/tmp/demo.safetensors", "name": "Demo", "formatted": True}]
+ assert payload["total"] == 1
+ assert mock_service.formatted == payload["items"]
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_routes_return_service_not_ready_when_unattached():
+ async def scenario():
+ client = await create_test_client(None)
+ try:
+ response = await client.get("/api/lm/test-models/list")
+ payload = await response.json()
+
+ assert response.status == 503
+ assert payload == {"success": False, "error": "Service not ready"}
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_delete_model_updates_cache_and_hash_index(mock_service, mock_scanner, tmp_path: Path):
+ model_path = tmp_path / "sample.safetensors"
+ model_path.write_bytes(b"model")
+ mock_scanner._cache.raw_data = [{"file_path": str(model_path)}]
+
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post(
+ "/api/lm/test-models/delete",
+ json={"file_path": str(model_path)},
+ )
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload["success"] is True
+ assert mock_scanner._cache.raw_data == []
+ assert mock_scanner._cache.resort_calls == 1
+ assert mock_scanner._hash_index.removed_paths == [str(model_path)]
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+ assert not model_path.exists()
+
+
+def test_replace_preview_writes_file_and_updates_cache(
+ mock_service,
+ mock_scanner,
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+):
+ model_path = tmp_path / "preview-model.safetensors"
+ model_path.write_bytes(b"model")
+ metadata_path = tmp_path / "preview-model.metadata.json"
+ metadata_path.write_text(json.dumps({"file_path": str(model_path)}))
+
+ mock_scanner._cache.raw_data = [{"file_path": str(model_path)}]
+
+ monkeypatch.setattr(
+ ExifUtils,
+ "optimize_image",
+ staticmethod(lambda image_data, **_: (image_data, ".webp")),
+ )
+ monkeypatch.setattr(
+ config,
+ "get_preview_static_url",
+ lambda preview_path: f"/static/{Path(preview_path).name}",
+ )
+
+ form = FormData()
+ form.add_field("preview_file", b"binary-data", filename="preview.png", content_type="image/png")
+ form.add_field("model_path", str(model_path))
+ form.add_field("nsfw_level", "2")
+
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post("/api/lm/test-models/replace-preview", data=form)
+ payload = await response.json()
+
+ expected_preview = str((tmp_path / "preview-model.webp")).replace(os.sep, "/")
+ assert response.status == 200
+ assert payload["success"] is True
+ assert payload["preview_url"] == "/static/preview-model.webp"
+ assert Path(expected_preview).exists()
+ assert mock_scanner.preview_updates[-1]["preview_path"] == expected_preview
+ assert mock_scanner._cache.raw_data[0]["preview_url"] == expected_preview
+ assert mock_scanner._cache.raw_data[0]["preview_nsfw_level"] == 2
+
+ updated_metadata = json.loads(metadata_path.read_text())
+ assert updated_metadata["preview_url"] == expected_preview
+ assert updated_metadata["preview_nsfw_level"] == 2
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_download_model_invokes_download_manager(
+ mock_service,
+ download_manager_stub,
+ tmp_path: Path,
+):
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post(
+ "/api/lm/download-model",
+ json={"model_id": 1, "model_root": str(tmp_path)},
+ )
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload["success"] is True
+ assert download_manager_stub.calls
+
+ call_args = download_manager_stub.calls[0]
+ assert call_args["model_id"] == 1
+ assert call_args["download_id"] == payload["download_id"]
+ progress = ws_manager.get_download_progress(payload["download_id"])
+ assert progress is not None
+ assert progress["progress"] == 42
+ assert "timestamp" in progress
+
+ progress_response = await client.get(
+ f"/api/lm/download-progress/{payload['download_id']}"
+ )
+ progress_payload = await progress_response.json()
+
+ assert progress_response.status == 200
+ assert progress_payload == {"success": True, "progress": 42}
+ ws_manager.cleanup_download_progress(payload["download_id"])
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_download_model_requires_identifier(mock_service, download_manager_stub):
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post(
+ "/api/lm/download-model",
+ json={"model_root": "/tmp"},
+ )
+ payload = await response.json()
+
+ assert response.status == 400
+ assert payload["success"] is False
+ assert "Missing required" in payload["error"]
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_download_model_maps_validation_errors(mock_service, download_manager_stub):
+ download_manager_stub.error = ValueError("Invalid relative path")
+
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post(
+ "/api/lm/download-model",
+ json={"model_version_id": 123},
+ )
+ payload = await response.json()
+
+ assert response.status == 400
+ assert payload == {"success": False, "error": "Invalid relative path"}
+ assert ws_manager._download_progress == {}
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_download_model_maps_early_access_errors(mock_service, download_manager_stub):
+ download_manager_stub.error = RuntimeError("401 early access")
+
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post(
+ "/api/lm/download-model",
+ json={"model_id": 4},
+ )
+ payload = await response.json()
+
+ assert response.status == 401
+ assert payload == {
+ "success": False,
+ "error": "Early Access Restriction: This model requires purchase. Please buy early access on Civitai.com.",
+ }
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_auto_organize_progress_returns_latest_snapshot(mock_service):
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ await ws_manager.broadcast_auto_organize_progress({"status": "processing", "percent": 50})
+
+ response = await client.get("/api/lm/test-models/auto-organize-progress")
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload == {"success": True, "progress": {"status": "processing", "percent": 50}}
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_auto_organize_route_emits_progress(mock_service, monkeypatch: pytest.MonkeyPatch):
+ async def fake_auto_organize(self, file_paths=None, progress_callback=None):
+ result = AutoOrganizeResult()
+ result.total = 1
+ result.processed = 1
+ result.success_count = 1
+ result.skipped_count = 0
+ result.failure_count = 0
+ result.operation_type = "bulk"
+ if progress_callback is not None:
+ await progress_callback.on_progress({"type": "auto_organize_progress", "status": "started"})
+ await progress_callback.on_progress({"type": "auto_organize_progress", "status": "completed"})
+ return result
+
+ monkeypatch.setattr(
+ py_local.services.model_file_service.ModelFileService,
+ "auto_organize_models",
+ fake_auto_organize,
+ )
+
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ response = await client.post("/api/lm/test-models/auto-organize", json={"file_paths": []})
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload["success"] is True
+
+ progress = ws_manager.get_auto_organize_progress()
+ assert progress is not None
+ assert progress["status"] == "completed"
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
+
+
+def test_auto_organize_conflict_when_running(mock_service):
+ async def scenario():
+ client = await create_test_client(mock_service)
+ try:
+ await ws_manager.broadcast_auto_organize_progress(
+ {"type": "auto_organize_progress", "status": "started"}
+ )
+
+ response = await client.post("/api/lm/test-models/auto-organize")
+ payload = await response.json()
+
+ assert response.status == 409
+ assert payload == {
+ "success": False,
+ "error": "Auto-organize is already running. Please wait for it to complete.",
+ }
+ finally:
+ await client.close()
+
+ asyncio.run(scenario())
diff --git a/tests/routes/test_embedding_routes.py b/tests/routes/test_embedding_routes.py
new file mode 100644
index 00000000..fc1782a0
--- /dev/null
+++ b/tests/routes/test_embedding_routes.py
@@ -0,0 +1,50 @@
+import json
+
+import pytest
+
+from py.routes.embedding_routes import EmbeddingRoutes
+
+
+class DummyRequest:
+ def __init__(self, *, match_info=None):
+ self.match_info = match_info or {}
+
+
+class StubEmbeddingService:
+ def __init__(self):
+ self.info = {}
+
+ async def get_model_info_by_name(self, name):
+ value = self.info.get(name)
+ if isinstance(value, Exception):
+ raise value
+ return value
+
+
+@pytest.fixture
+def routes():
+ handler = EmbeddingRoutes()
+ handler.service = StubEmbeddingService()
+ return handler
+
+
+async def test_get_embedding_info_success(routes):
+ routes.service.info["demo"] = {"name": "demo"}
+ response = await routes.get_embedding_info(DummyRequest(match_info={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert payload == {"name": "demo"}
+
+
+async def test_get_embedding_info_missing(routes):
+ response = await routes.get_embedding_info(DummyRequest(match_info={"name": "missing"}))
+ payload = json.loads(response.text)
+ assert response.status == 404
+ assert payload == {"error": "Embedding not found"}
+
+
+async def test_get_embedding_info_error(routes):
+ routes.service.info["demo"] = RuntimeError("boom")
+ response = await routes.get_embedding_info(DummyRequest(match_info={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert response.status == 500
+ assert payload == {"error": "boom"}
diff --git a/tests/routes/test_example_images_routes.py b/tests/routes/test_example_images_routes.py
new file mode 100644
index 00000000..1cc57eb0
--- /dev/null
+++ b/tests/routes/test_example_images_routes.py
@@ -0,0 +1,474 @@
+from __future__ import annotations
+
+import json
+from contextlib import asynccontextmanager
+from dataclasses import dataclass
+from typing import Any, Dict, List, Tuple
+
+from aiohttp import web
+from aiohttp.test_utils import TestClient, TestServer
+import pytest
+
+from py.routes.example_images_route_registrar import ROUTE_DEFINITIONS
+from py.routes.example_images_routes import ExampleImagesRoutes
+from py.routes.handlers.example_images_handlers import (
+ ExampleImagesDownloadHandler,
+ ExampleImagesFileHandler,
+ ExampleImagesHandlerSet,
+ ExampleImagesManagementHandler,
+)
+
+
+@dataclass
+class ExampleImagesHarness:
+ """Container exposing the aiohttp client and stubbed collaborators."""
+
+ client: TestClient
+ download_manager: "StubDownloadManager"
+ processor: "StubExampleImagesProcessor"
+ file_manager: "StubExampleImagesFileManager"
+ cleanup_service: "StubExampleImagesCleanupService"
+ controller: ExampleImagesRoutes
+
+
+class StubDownloadManager:
+ def __init__(self) -> None:
+ self.calls: List[Tuple[str, Any]] = []
+
+ async def start_download(self, payload: Any) -> dict:
+ self.calls.append(("start_download", payload))
+ return {"operation": "start_download", "payload": payload}
+
+ async def get_status(self, request: web.Request) -> dict:
+ self.calls.append(("get_status", dict(request.query)))
+ return {"operation": "get_status"}
+
+ async def pause_download(self, request: web.Request) -> dict:
+ self.calls.append(("pause_download", None))
+ return {"operation": "pause_download"}
+
+ async def resume_download(self, request: web.Request) -> dict:
+ self.calls.append(("resume_download", None))
+ return {"operation": "resume_download"}
+
+ async def start_force_download(self, payload: Any) -> dict:
+ self.calls.append(("start_force_download", payload))
+ return {"operation": "start_force_download", "payload": payload}
+
+
+class StubExampleImagesProcessor:
+ def __init__(self) -> None:
+ self.calls: List[Tuple[str, Any]] = []
+
+ async def import_images(self, model_hash: str, files: List[str]) -> dict:
+ payload = {"model_hash": model_hash, "file_paths": files}
+ self.calls.append(("import_images", payload))
+ return {"operation": "import_images", "payload": payload}
+
+ async def delete_custom_image(self, request: web.Request) -> web.StreamResponse:
+ payload = await request.json()
+ self.calls.append(("delete_custom_image", payload))
+ return web.json_response({"operation": "delete_custom_image", "payload": payload})
+
+
+class StubExampleImagesFileManager:
+ def __init__(self) -> None:
+ self.calls: List[Tuple[str, Any]] = []
+
+ async def open_folder(self, request: web.Request) -> web.StreamResponse:
+ payload = await request.json()
+ self.calls.append(("open_folder", payload))
+ return web.json_response({"operation": "open_folder", "payload": payload})
+
+ async def get_files(self, request: web.Request) -> web.StreamResponse:
+ self.calls.append(("get_files", dict(request.query)))
+ return web.json_response({"operation": "get_files", "query": dict(request.query)})
+
+ async def has_images(self, request: web.Request) -> web.StreamResponse:
+ self.calls.append(("has_images", dict(request.query)))
+ return web.json_response({"operation": "has_images", "query": dict(request.query)})
+
+
+class StubExampleImagesCleanupService:
+ def __init__(self) -> None:
+ self.calls: List[Dict[str, Any]] = []
+ self.result: Dict[str, Any] = {
+ "success": True,
+ "moved_total": 0,
+ "moved_empty_folders": 0,
+ "moved_orphaned_folders": 0,
+ }
+
+ async def cleanup_example_image_folders(self) -> Dict[str, Any]:
+ self.calls.append({})
+ return self.result
+
+
+class StubWebSocketManager:
+ def __init__(self) -> None:
+ self.broadcast_calls: List[Dict[str, Any]] = []
+
+ async def broadcast(self, payload: Dict[str, Any]) -> None:
+ self.broadcast_calls.append(payload)
+
+
+@asynccontextmanager
+async def example_images_app() -> ExampleImagesHarness:
+ """Yield an ExampleImagesRoutes app wired with stubbed collaborators."""
+
+ download_manager = StubDownloadManager()
+ processor = StubExampleImagesProcessor()
+ file_manager = StubExampleImagesFileManager()
+ cleanup_service = StubExampleImagesCleanupService()
+ ws_manager = StubWebSocketManager()
+
+ controller = ExampleImagesRoutes(
+ ws_manager=ws_manager,
+ download_manager=download_manager,
+ processor=processor,
+ file_manager=file_manager,
+ cleanup_service=cleanup_service,
+ )
+
+ app = web.Application()
+ controller.register(app)
+
+ server = TestServer(app)
+ client = TestClient(server)
+ await client.start_server()
+
+ try:
+ yield ExampleImagesHarness(
+ client=client,
+ download_manager=download_manager,
+ processor=processor,
+ file_manager=file_manager,
+ cleanup_service=cleanup_service,
+ controller=controller,
+ )
+ finally:
+ await client.close()
+
+
+async def test_setup_routes_registers_all_definitions():
+ async with example_images_app() as harness:
+ registered = {
+ (route.method, route.resource.canonical)
+ for route in harness.client.app.router.routes()
+ if route.resource.canonical
+ }
+
+ expected = {(definition.method, definition.path) for definition in ROUTE_DEFINITIONS}
+
+ assert expected <= registered
+
+
+@pytest.mark.parametrize(
+ "endpoint, payload",
+ [
+ ("/api/lm/download-example-images", {"model_types": ["lora"], "optimize": False}),
+ ("/api/lm/force-download-example-images", {"model_hashes": ["abc123"]}),
+ ],
+)
+async def test_download_routes_delegate_to_manager(endpoint, payload):
+ async with example_images_app() as harness:
+ response = await harness.client.post(endpoint, json=payload)
+ body = await response.json()
+
+ assert response.status == 200
+ assert body["payload"] == payload
+ assert body["operation"].startswith("start")
+
+ expected_call = body["operation"], payload
+ assert expected_call in harness.download_manager.calls
+
+
+async def test_status_route_returns_manager_payload():
+ async with example_images_app() as harness:
+ response = await harness.client.get(
+ "/api/lm/example-images-status", params={"detail": "true"}
+ )
+ body = await response.json()
+
+ assert response.status == 200
+ assert body == {"operation": "get_status"}
+ assert harness.download_manager.calls == [("get_status", {"detail": "true"})]
+
+
+async def test_pause_and_resume_routes_delegate():
+ async with example_images_app() as harness:
+ pause_response = await harness.client.post("/api/lm/pause-example-images")
+ resume_response = await harness.client.post("/api/lm/resume-example-images")
+
+ assert pause_response.status == 200
+ assert await pause_response.json() == {"operation": "pause_download"}
+ assert resume_response.status == 200
+ assert await resume_response.json() == {"operation": "resume_download"}
+
+ assert harness.download_manager.calls[-2:] == [
+ ("pause_download", None),
+ ("resume_download", None),
+ ]
+
+
+async def test_import_route_delegates_to_processor():
+ payload = {"model_hash": "abc123", "file_paths": ["/path/image.png"]}
+ async with example_images_app() as harness:
+ response = await harness.client.post(
+ "/api/lm/import-example-images", json=payload
+ )
+ body = await response.json()
+
+ assert response.status == 200
+ assert body == {"operation": "import_images", "payload": payload}
+ expected_call = ("import_images", payload)
+ assert expected_call in harness.processor.calls
+
+
+async def test_delete_route_delegates_to_processor():
+ payload = {"model_hash": "abc123", "short_id": "xyz"}
+ async with example_images_app() as harness:
+ response = await harness.client.post(
+ "/api/lm/delete-example-image", json=payload
+ )
+ body = await response.json()
+
+ assert response.status == 200
+ assert body == {"operation": "delete_custom_image", "payload": payload}
+ assert harness.processor.calls == [("delete_custom_image", payload)]
+
+
+async def test_file_routes_delegate_to_file_manager():
+ open_payload = {"model_hash": "abc123"}
+ files_params = {"model_hash": "def456"}
+
+ async with example_images_app() as harness:
+ open_response = await harness.client.post(
+ "/api/lm/open-example-images-folder", json=open_payload
+ )
+ files_response = await harness.client.get(
+ "/api/lm/example-image-files", params=files_params
+ )
+ has_response = await harness.client.get(
+ "/api/lm/has-example-images", params=files_params
+ )
+
+ assert open_response.status == 200
+ assert files_response.status == 200
+ assert has_response.status == 200
+
+ assert await open_response.json() == {"operation": "open_folder", "payload": open_payload}
+ assert await files_response.json() == {
+ "operation": "get_files",
+ "query": files_params,
+ }
+ assert await has_response.json() == {
+ "operation": "has_images",
+ "query": files_params,
+ }
+
+ assert harness.file_manager.calls == [
+ ("open_folder", open_payload),
+ ("get_files", files_params),
+ ("has_images", files_params),
+ ]
+
+
+async def test_cleanup_route_delegates_to_service():
+ async with example_images_app() as harness:
+ harness.cleanup_service.result = {
+ "success": True,
+ "moved_total": 2,
+ "moved_empty_folders": 1,
+ "moved_orphaned_folders": 1,
+ }
+
+ response = await harness.client.post("/api/lm/cleanup-example-image-folders")
+ body = await response.json()
+
+ assert response.status == 200
+ assert body == harness.cleanup_service.result
+ assert len(harness.cleanup_service.calls) == 1
+
+
+@pytest.mark.asyncio
+async def test_download_handler_methods_delegate() -> None:
+ class Recorder:
+ def __init__(self) -> None:
+ self.calls: List[Tuple[str, Any]] = []
+
+ async def get_status(self, request) -> dict:
+ self.calls.append(("get_status", request))
+ return {"status": "ok"}
+
+ async def pause_download(self, request) -> dict:
+ self.calls.append(("pause_download", request))
+ return {"status": "paused"}
+
+ async def resume_download(self, request) -> dict:
+ self.calls.append(("resume_download", request))
+ return {"status": "running"}
+
+ async def start_force_download(self, payload) -> dict:
+ self.calls.append(("start_force_download", payload))
+ return {"status": "force", "payload": payload}
+
+ class StubDownloadUseCase:
+ def __init__(self) -> None:
+ self.payloads: List[Any] = []
+
+ async def execute(self, payload: dict) -> dict:
+ self.payloads.append(payload)
+ return {"status": "started", "payload": payload}
+
+ class DummyRequest:
+ def __init__(self, payload: dict) -> None:
+ self._payload = payload
+ self.query = {}
+
+ async def json(self) -> dict:
+ return self._payload
+
+ recorder = Recorder()
+ use_case = StubDownloadUseCase()
+ handler = ExampleImagesDownloadHandler(use_case, recorder)
+ request = DummyRequest({"foo": "bar"})
+
+ download_response = await handler.download_example_images(request)
+ assert json.loads(download_response.text) == {"status": "started", "payload": {"foo": "bar"}}
+ status_response = await handler.get_example_images_status(request)
+ assert json.loads(status_response.text) == {"status": "ok"}
+ pause_response = await handler.pause_example_images(request)
+ assert json.loads(pause_response.text) == {"status": "paused"}
+ resume_response = await handler.resume_example_images(request)
+ assert json.loads(resume_response.text) == {"status": "running"}
+ force_response = await handler.force_download_example_images(request)
+ assert json.loads(force_response.text) == {"status": "force", "payload": {"foo": "bar"}}
+
+ assert use_case.payloads == [{"foo": "bar"}]
+ assert recorder.calls == [
+ ("get_status", request),
+ ("pause_download", request),
+ ("resume_download", request),
+ ("start_force_download", {"foo": "bar"}),
+ ]
+
+
+@pytest.mark.asyncio
+async def test_management_handler_methods_delegate() -> None:
+ class StubImportUseCase:
+ def __init__(self) -> None:
+ self.requests: List[Any] = []
+
+ async def execute(self, request: Any) -> dict:
+ self.requests.append(request)
+ return {"status": "imported"}
+
+ class Recorder:
+ def __init__(self) -> None:
+ self.calls: List[Tuple[str, Any]] = []
+
+ async def delete_custom_image(self, request) -> str:
+ self.calls.append(("delete_custom_image", request))
+ return "delete"
+
+ recorder = Recorder()
+ cleanup_service = StubExampleImagesCleanupService()
+ use_case = StubImportUseCase()
+ handler = ExampleImagesManagementHandler(use_case, recorder, cleanup_service)
+ request = object()
+
+ import_response = await handler.import_example_images(request)
+ assert json.loads(import_response.text) == {"status": "imported"}
+ assert await handler.delete_example_image(request) == "delete"
+ cleanup_service.result = {"success": True}
+ cleanup_response = await handler.cleanup_example_image_folders(request)
+ assert json.loads(cleanup_response.text) == {"success": True}
+ assert use_case.requests == [request]
+ assert recorder.calls == [("delete_custom_image", request)]
+ assert len(cleanup_service.calls) == 1
+
+
+@pytest.mark.asyncio
+async def test_file_handler_methods_delegate() -> None:
+ class Recorder:
+ def __init__(self) -> None:
+ self.calls: List[Tuple[str, Any]] = []
+
+ async def open_folder(self, request) -> str:
+ self.calls.append(("open_folder", request))
+ return "open"
+
+ async def get_files(self, request) -> str:
+ self.calls.append(("get_files", request))
+ return "files"
+
+ async def has_images(self, request) -> str:
+ self.calls.append(("has_images", request))
+ return "has"
+
+ recorder = Recorder()
+ handler = ExampleImagesFileHandler(recorder)
+ request = object()
+
+ assert await handler.open_example_images_folder(request) == "open"
+ assert await handler.get_example_image_files(request) == "files"
+ assert await handler.has_example_images(request) == "has"
+ assert recorder.calls == [
+ ("open_folder", request),
+ ("get_files", request),
+ ("has_images", request),
+ ]
+
+
+def test_handler_set_route_mapping_includes_all_handlers() -> None:
+ class DummyUseCase:
+ async def execute(self, payload):
+ return payload
+
+ class DummyManager:
+ async def get_status(self, request):
+ return {}
+
+ async def pause_download(self, request):
+ return {}
+
+ async def resume_download(self, request):
+ return {}
+
+ async def start_force_download(self, payload):
+ return payload
+
+ class DummyProcessor:
+ async def delete_custom_image(self, request):
+ return {}
+
+ download = ExampleImagesDownloadHandler(DummyUseCase(), DummyManager())
+ cleanup_service = StubExampleImagesCleanupService()
+ management = ExampleImagesManagementHandler(DummyUseCase(), DummyProcessor(), cleanup_service)
+ files = ExampleImagesFileHandler(object())
+ handler_set = ExampleImagesHandlerSet(
+ download=download,
+ management=management,
+ files=files,
+ )
+
+ mapping = handler_set.to_route_mapping()
+
+ expected_keys = {
+ "download_example_images",
+ "get_example_images_status",
+ "pause_example_images",
+ "resume_example_images",
+ "force_download_example_images",
+ "import_example_images",
+ "delete_example_image",
+ "cleanup_example_image_folders",
+ "open_example_images_folder",
+ "get_example_image_files",
+ "has_example_images",
+ }
+
+ assert mapping.keys() == expected_keys
+ for key in expected_keys:
+ assert callable(mapping[key])
diff --git a/tests/routes/test_lora_routes.py b/tests/routes/test_lora_routes.py
new file mode 100644
index 00000000..2b447987
--- /dev/null
+++ b/tests/routes/test_lora_routes.py
@@ -0,0 +1,213 @@
+import json
+from types import SimpleNamespace
+from unittest.mock import MagicMock
+
+import pytest
+
+from py.routes.lora_routes import LoraRoutes
+from server import PromptServer
+
+
+class DummyRequest:
+ def __init__(self, *, query=None, match_info=None, json_data=None):
+ self.query = query or {}
+ self.match_info = match_info or {}
+ self._json_data = json_data or {}
+
+ async def json(self):
+ return self._json_data
+
+
+class StubLoraService:
+ def __init__(self):
+ self.notes = {}
+ self.trigger_words = {}
+ self.usage_tips = {}
+ self.previews = {}
+ self.civitai = {}
+
+ async def get_lora_notes(self, name):
+ return self.notes.get(name)
+
+ async def get_lora_trigger_words(self, name):
+ return self.trigger_words.get(name, [])
+
+ async def get_lora_usage_tips_by_relative_path(self, path):
+ return self.usage_tips.get(path)
+
+ async def get_lora_preview_url(self, name):
+ return self.previews.get(name)
+
+ async def get_lora_civitai_url(self, name):
+ return self.civitai.get(name, {"civitai_url": ""})
+
+
+@pytest.fixture
+def routes():
+ handler = LoraRoutes()
+ handler.service = StubLoraService()
+ return handler
+
+
+async def test_get_lora_notes_success(routes):
+ routes.service.notes["demo"] = "Great notes"
+ request = DummyRequest(query={"name": "demo"})
+
+ response = await routes.get_lora_notes(request)
+ payload = json.loads(response.text)
+
+ assert payload == {"success": True, "notes": "Great notes"}
+
+
+async def test_get_lora_notes_missing_name(routes):
+ response = await routes.get_lora_notes(DummyRequest())
+ assert response.status == 400
+ assert response.text == "Lora file name is required"
+
+
+async def test_get_lora_notes_not_found(routes):
+ response = await routes.get_lora_notes(DummyRequest(query={"name": "missing"}))
+ payload = json.loads(response.text)
+ assert response.status == 404
+ assert payload == {"success": False, "error": "LoRA not found in cache"}
+
+
+async def test_get_lora_notes_error(routes, monkeypatch):
+ async def failing(*_args, **_kwargs):
+ raise RuntimeError("boom")
+
+ routes.service.get_lora_notes = failing
+
+ response = await routes.get_lora_notes(DummyRequest(query={"name": "demo"}))
+ payload = json.loads(response.text)
+
+ assert response.status == 500
+ assert payload["success"] is False
+ assert payload["error"] == "boom"
+
+
+async def test_get_lora_trigger_words_success(routes):
+ routes.service.trigger_words["demo"] = ["trigger"]
+ response = await routes.get_lora_trigger_words(DummyRequest(query={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert payload == {"success": True, "trigger_words": ["trigger"]}
+
+
+async def test_get_lora_trigger_words_missing_name(routes):
+ response = await routes.get_lora_trigger_words(DummyRequest())
+ assert response.status == 400
+
+
+async def test_get_lora_trigger_words_error(routes):
+ async def failing(*_args, **_kwargs):
+ raise RuntimeError("fail")
+
+ routes.service.get_lora_trigger_words = failing
+
+ response = await routes.get_lora_trigger_words(DummyRequest(query={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert response.status == 500
+ assert payload["success"] is False
+
+
+async def test_get_usage_tips_success(routes):
+ routes.service.usage_tips["path"] = "tips"
+ response = await routes.get_lora_usage_tips_by_path(DummyRequest(query={"relative_path": "path"}))
+ payload = json.loads(response.text)
+ assert payload == {"success": True, "usage_tips": "tips"}
+
+
+async def test_get_usage_tips_missing_param(routes):
+ response = await routes.get_lora_usage_tips_by_path(DummyRequest())
+ assert response.status == 400
+
+
+async def test_get_usage_tips_error(routes):
+ async def failing(*_args, **_kwargs):
+ raise RuntimeError("bad")
+
+ routes.service.get_lora_usage_tips_by_relative_path = failing
+ response = await routes.get_lora_usage_tips_by_path(DummyRequest(query={"relative_path": "path"}))
+ payload = json.loads(response.text)
+ assert response.status == 500
+ assert payload["success"] is False
+
+
+async def test_get_preview_url_success(routes):
+ routes.service.previews["demo"] = "http://preview"
+ response = await routes.get_lora_preview_url(DummyRequest(query={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert payload == {"success": True, "preview_url": "http://preview"}
+
+
+async def test_get_preview_url_missing(routes):
+ response = await routes.get_lora_preview_url(DummyRequest())
+ assert response.status == 400
+
+
+async def test_get_preview_url_not_found(routes):
+ response = await routes.get_lora_preview_url(DummyRequest(query={"name": "missing"}))
+ payload = json.loads(response.text)
+ assert response.status == 404
+ assert payload["success"] is False
+
+
+async def test_get_civitai_url_success(routes):
+ routes.service.civitai["demo"] = {"civitai_url": "https://civitai.com"}
+ response = await routes.get_lora_civitai_url(DummyRequest(query={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert payload == {"success": True, "civitai_url": "https://civitai.com"}
+
+
+async def test_get_civitai_url_missing(routes):
+ response = await routes.get_lora_civitai_url(DummyRequest())
+ assert response.status == 400
+
+
+async def test_get_civitai_url_not_found(routes):
+ response = await routes.get_lora_civitai_url(DummyRequest(query={"name": "missing"}))
+ payload = json.loads(response.text)
+ assert response.status == 404
+ assert payload["success"] is False
+
+
+async def test_get_civitai_url_error(routes):
+ async def failing(*_args, **_kwargs):
+ raise RuntimeError("oops")
+
+ routes.service.get_lora_civitai_url = failing
+ response = await routes.get_lora_civitai_url(DummyRequest(query={"name": "demo"}))
+ payload = json.loads(response.text)
+ assert response.status == 500
+ assert payload["success"] is False
+
+
+async def test_get_trigger_words_broadcasts(monkeypatch, routes):
+ send_mock = MagicMock()
+ PromptServer.instance = SimpleNamespace(send_sync=send_mock)
+
+ monkeypatch.setattr("py.routes.lora_routes.get_lora_info", lambda name: (f"path/{name}", [f"trigger-{name}"]))
+
+ request = DummyRequest(json_data={"lora_names": ["one"], "node_ids": ["node"]})
+
+ response = await routes.get_trigger_words(request)
+ payload = json.loads(response.text)
+
+ assert payload == {"success": True}
+ send_mock.assert_called_once_with(
+ "trigger_word_update",
+ {"id": "node", "message": "trigger-one"},
+ )
+
+
+async def test_get_trigger_words_error(monkeypatch, routes):
+ async def failing_json():
+ raise RuntimeError("bad json")
+
+ request = DummyRequest(json_data=None)
+ request.json = failing_json
+
+ response = await routes.get_trigger_words(request)
+ payload = json.loads(response.text)
+ assert response.status == 500
+ assert payload["success"] is False
diff --git a/tests/routes/test_recipe_route_scaffolding.py b/tests/routes/test_recipe_route_scaffolding.py
new file mode 100644
index 00000000..59765d36
--- /dev/null
+++ b/tests/routes/test_recipe_route_scaffolding.py
@@ -0,0 +1,236 @@
+"""Smoke tests for the recipe routing scaffolding.
+
+The cases keep the registrar/controller contract aligned with
+``docs/architecture/recipe_routes.md`` so future refactors can focus on handler
+logic.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import importlib.util
+import sys
+import types
+from collections import Counter
+from pathlib import Path
+from typing import Any, Awaitable, Callable, Dict
+
+import pytest
+from aiohttp import web
+
+
+REPO_ROOT = Path(__file__).resolve().parents[2]
+PY_PACKAGE_PATH = REPO_ROOT / "py"
+
+spec = importlib.util.spec_from_file_location(
+ "py_local",
+ PY_PACKAGE_PATH / "__init__.py",
+ submodule_search_locations=[str(PY_PACKAGE_PATH)],
+)
+py_local = importlib.util.module_from_spec(spec)
+assert spec.loader is not None
+spec.loader.exec_module(py_local)
+sys.modules.setdefault("py_local", py_local)
+
+base_routes_module = importlib.import_module("py_local.routes.base_recipe_routes")
+recipe_routes_module = importlib.import_module("py_local.routes.recipe_routes")
+registrar_module = importlib.import_module("py_local.routes.recipe_route_registrar")
+
+
+@pytest.fixture(autouse=True)
+def reset_service_registry(monkeypatch: pytest.MonkeyPatch):
+ """Ensure each test starts from a clean registry state."""
+
+ services_module = importlib.import_module("py_local.services.service_registry")
+ registry = services_module.ServiceRegistry
+ previous_services = dict(registry._services)
+ previous_locks = dict(registry._locks)
+ registry._services.clear()
+ registry._locks.clear()
+ try:
+ yield
+ finally:
+ registry._services = previous_services
+ registry._locks = previous_locks
+
+
+def _make_stub_scanner():
+ class _StubScanner:
+ def __init__(self):
+ self._cache = types.SimpleNamespace()
+
+ async def _lora_get_cached_data(): # pragma: no cover - smoke hook
+ return None
+
+ self._lora_scanner = types.SimpleNamespace(
+ get_cached_data=_lora_get_cached_data,
+ _hash_index=types.SimpleNamespace(_hash_to_path={}),
+ )
+
+ async def get_cached_data(self, force_refresh: bool = False):
+ return self._cache
+
+ return _StubScanner()
+
+
+def test_attach_dependencies_resolves_services_once(monkeypatch: pytest.MonkeyPatch):
+ base_module = base_routes_module
+ services_module = importlib.import_module("py_local.services.service_registry")
+ registry = services_module.ServiceRegistry
+ server_i18n = importlib.import_module("py_local.services.server_i18n").server_i18n
+
+ scanner = _make_stub_scanner()
+ civitai_client = object()
+ filter_calls = Counter()
+
+ async def fake_get_recipe_scanner():
+ return scanner
+
+ async def fake_get_civitai_client():
+ return civitai_client
+
+ def fake_create_filter():
+ filter_calls["create_filter"] += 1
+ return object()
+
+ monkeypatch.setattr(registry, "get_recipe_scanner", fake_get_recipe_scanner)
+ monkeypatch.setattr(registry, "get_civitai_client", fake_get_civitai_client)
+ monkeypatch.setattr(server_i18n, "create_template_filter", fake_create_filter)
+
+ async def scenario():
+ routes = base_module.BaseRecipeRoutes()
+
+ await routes.attach_dependencies()
+ await routes.attach_dependencies() # idempotent
+
+ assert routes.recipe_scanner is scanner
+ assert routes.lora_scanner is scanner._lora_scanner
+ assert routes.civitai_client is civitai_client
+ assert routes.template_env.filters["t"] is not None
+ assert filter_calls["create_filter"] == 1
+
+ asyncio.run(scenario())
+
+
+def test_register_startup_hooks_appends_once():
+ routes = base_routes_module.BaseRecipeRoutes()
+
+ app = web.Application()
+ routes.register_startup_hooks(app)
+ routes.register_startup_hooks(app)
+
+ startup_bound_to_routes = [
+ callback for callback in app.on_startup if getattr(callback, "__self__", None) is routes
+ ]
+
+ assert routes.attach_dependencies in startup_bound_to_routes
+ assert routes.prewarm_cache in startup_bound_to_routes
+ assert len(startup_bound_to_routes) == 2
+
+
+def test_to_route_mapping_uses_handler_set():
+ class DummyHandlerSet:
+ def __init__(self):
+ self.calls = 0
+
+ def to_route_mapping(self):
+ self.calls += 1
+
+ async def render_page(request): # pragma: no cover - simple coroutine
+ return web.Response(text="ok")
+
+ return {"render_page": render_page}
+
+ class DummyRoutes(base_routes_module.BaseRecipeRoutes):
+ def __init__(self):
+ super().__init__()
+ self.created = 0
+
+ def _create_handler_set(self): # noqa: D401 - simple override for test
+ self.created += 1
+ return DummyHandlerSet()
+
+ routes = DummyRoutes()
+ mapping = routes.to_route_mapping()
+
+ assert set(mapping.keys()) == {"render_page"}
+ assert asyncio.iscoroutinefunction(mapping["render_page"])
+ # Cached mapping reused on subsequent calls
+ assert routes.to_route_mapping() is mapping
+ # Handler set cached for get_handler_owner callers
+ assert isinstance(routes.get_handler_owner(), DummyHandlerSet)
+ assert routes.created == 1
+
+
+def test_recipe_route_registrar_binds_every_route():
+ class FakeRouter:
+ def __init__(self):
+ self.calls: list[tuple[str, str, Callable[..., Awaitable[Any]]]] = []
+
+ def add_get(self, path, handler):
+ self.calls.append(("GET", path, handler))
+
+ def add_post(self, path, handler):
+ self.calls.append(("POST", path, handler))
+
+ def add_put(self, path, handler):
+ self.calls.append(("PUT", path, handler))
+
+ def add_delete(self, path, handler):
+ self.calls.append(("DELETE", path, handler))
+
+ class FakeApp:
+ def __init__(self):
+ self.router = FakeRouter()
+
+ app = FakeApp()
+ registrar = registrar_module.RecipeRouteRegistrar(app)
+
+ handler_mapping = {
+ definition.handler_name: object()
+ for definition in registrar_module.ROUTE_DEFINITIONS
+ }
+
+ registrar.register_routes(handler_mapping)
+
+ assert {
+ (method, path)
+ for method, path, _ in app.router.calls
+ } == {(d.method, d.path) for d in registrar_module.ROUTE_DEFINITIONS}
+
+
+def test_recipe_routes_setup_routes_uses_registrar(monkeypatch: pytest.MonkeyPatch):
+ registered_mappings: list[Dict[str, Callable[..., Awaitable[Any]]]] = []
+
+ class DummyRegistrar:
+ def __init__(self, app):
+ self.app = app
+
+ def register_routes(self, mapping):
+ registered_mappings.append(mapping)
+
+ monkeypatch.setattr(recipe_routes_module, "RecipeRouteRegistrar", DummyRegistrar)
+
+ expected_mapping = {name: object() for name in ("render_page", "list_recipes")}
+
+ def fake_to_route_mapping(self):
+ return expected_mapping
+
+ monkeypatch.setattr(base_routes_module.BaseRecipeRoutes, "to_route_mapping", fake_to_route_mapping)
+ monkeypatch.setattr(
+ base_routes_module.BaseRecipeRoutes,
+ "_HANDLER_NAMES",
+ tuple(expected_mapping.keys()),
+ )
+
+ app = web.Application()
+ recipe_routes_module.RecipeRoutes.setup_routes(app)
+
+ assert registered_mappings == [expected_mapping]
+ recipe_callbacks = {
+ cb
+ for cb in app.on_startup
+ if isinstance(getattr(cb, "__self__", None), recipe_routes_module.RecipeRoutes)
+ }
+ assert {type(cb.__self__) for cb in recipe_callbacks} == {recipe_routes_module.RecipeRoutes}
+ assert {cb.__name__ for cb in recipe_callbacks} == {"attach_dependencies", "prewarm_cache"}
diff --git a/tests/routes/test_recipe_routes.py b/tests/routes/test_recipe_routes.py
new file mode 100644
index 00000000..467cb5b5
--- /dev/null
+++ b/tests/routes/test_recipe_routes.py
@@ -0,0 +1,330 @@
+"""Integration smoke tests for the recipe route stack."""
+from __future__ import annotations
+
+import json
+from contextlib import asynccontextmanager
+from dataclasses import dataclass
+from pathlib import Path
+from types import SimpleNamespace
+from typing import Any, AsyncIterator, Dict, List, Optional
+
+from aiohttp import FormData, web
+from aiohttp.test_utils import TestClient, TestServer
+
+from py.config import config
+from py.routes import base_recipe_routes
+from py.routes.recipe_routes import RecipeRoutes
+from py.services.recipes import RecipeValidationError
+from py.services.service_registry import ServiceRegistry
+
+
+@dataclass
+class RecipeRouteHarness:
+ """Container exposing the aiohttp client and stubbed collaborators."""
+
+ client: TestClient
+ scanner: "StubRecipeScanner"
+ analysis: "StubAnalysisService"
+ persistence: "StubPersistenceService"
+ sharing: "StubSharingService"
+ tmp_dir: Path
+
+
+class StubRecipeScanner:
+ """Minimal scanner double with the surface used by the handlers."""
+
+ def __init__(self, base_dir: Path) -> None:
+ self.recipes_dir = str(base_dir / "recipes")
+ self.listing_items: List[Dict[str, Any]] = []
+ self.cached_raw: List[Dict[str, Any]] = []
+ self.recipes: Dict[str, Dict[str, Any]] = {}
+ self.removed: List[str] = []
+
+ async def _noop_get_cached_data(force_refresh: bool = False) -> None: # noqa: ARG001 - signature mirrors real scanner
+ return None
+
+ self._lora_scanner = SimpleNamespace( # mimic BaseRecipeRoutes expectations
+ get_cached_data=_noop_get_cached_data,
+ _hash_index=SimpleNamespace(_hash_to_path={}),
+ )
+
+ async def get_cached_data(self, force_refresh: bool = False) -> SimpleNamespace: # noqa: ARG002 - flag unused by stub
+ return SimpleNamespace(raw_data=list(self.cached_raw))
+
+ async def get_paginated_data(self, **params: Any) -> Dict[str, Any]:
+ items = [dict(item) for item in self.listing_items]
+ page = int(params.get("page", 1))
+ page_size = int(params.get("page_size", 20))
+ return {
+ "items": items,
+ "total": len(items),
+ "page": page,
+ "page_size": page_size,
+ "total_pages": max(1, (len(items) + page_size - 1) // max(page_size, 1)),
+ }
+
+ async def get_recipe_by_id(self, recipe_id: str) -> Optional[Dict[str, Any]]:
+ return self.recipes.get(recipe_id)
+
+ async def remove_recipe(self, recipe_id: str) -> None:
+ self.removed.append(recipe_id)
+ self.recipes.pop(recipe_id, None)
+
+
+class StubAnalysisService:
+ """Captures calls made by analysis routes while returning canned responses."""
+
+ instances: List["StubAnalysisService"] = []
+
+ def __init__(self, **_: Any) -> None:
+ self.raise_for_uploaded: Optional[Exception] = None
+ self.raise_for_remote: Optional[Exception] = None
+ self.raise_for_local: Optional[Exception] = None
+ self.upload_calls: List[bytes] = []
+ self.remote_calls: List[Optional[str]] = []
+ self.local_calls: List[Optional[str]] = []
+ self.result = SimpleNamespace(payload={"loras": []}, status=200)
+ StubAnalysisService.instances.append(self)
+
+ async def analyze_uploaded_image(self, *, image_bytes: bytes | None, recipe_scanner) -> SimpleNamespace: # noqa: D401 - mirrors real signature
+ if self.raise_for_uploaded:
+ raise self.raise_for_uploaded
+ self.upload_calls.append(image_bytes or b"")
+ return self.result
+
+ async def analyze_remote_image(self, *, url: Optional[str], recipe_scanner, civitai_client) -> SimpleNamespace: # noqa: D401
+ if self.raise_for_remote:
+ raise self.raise_for_remote
+ self.remote_calls.append(url)
+ return self.result
+
+ async def analyze_local_image(self, *, file_path: Optional[str], recipe_scanner) -> SimpleNamespace: # noqa: D401
+ if self.raise_for_local:
+ raise self.raise_for_local
+ self.local_calls.append(file_path)
+ return self.result
+
+ async def analyze_widget_metadata(self, *, recipe_scanner) -> SimpleNamespace:
+ return SimpleNamespace(payload={"metadata": {}, "image_bytes": b""}, status=200)
+
+
+class StubPersistenceService:
+ """Stub for persistence operations to avoid filesystem writes."""
+
+ instances: List["StubPersistenceService"] = []
+
+ def __init__(self, **_: Any) -> None:
+ self.save_calls: List[Dict[str, Any]] = []
+ self.delete_calls: List[str] = []
+ self.save_result = SimpleNamespace(payload={"success": True, "recipe_id": "stub-id"}, status=200)
+ self.delete_result = SimpleNamespace(payload={"success": True}, status=200)
+ StubPersistenceService.instances.append(self)
+
+ async def save_recipe(self, *, recipe_scanner, image_bytes, image_base64, name, tags, metadata) -> SimpleNamespace: # noqa: D401
+ self.save_calls.append(
+ {
+ "recipe_scanner": recipe_scanner,
+ "image_bytes": image_bytes,
+ "image_base64": image_base64,
+ "name": name,
+ "tags": list(tags),
+ "metadata": metadata,
+ }
+ )
+ return self.save_result
+
+ async def delete_recipe(self, *, recipe_scanner, recipe_id: str) -> SimpleNamespace:
+ self.delete_calls.append(recipe_id)
+ await recipe_scanner.remove_recipe(recipe_id)
+ return self.delete_result
+
+ async def update_recipe(self, *, recipe_scanner, recipe_id: str, updates: Dict[str, Any]) -> SimpleNamespace: # pragma: no cover - unused by smoke tests
+ return SimpleNamespace(payload={"success": True, "recipe_id": recipe_id, "updates": updates}, status=200)
+
+ async def reconnect_lora(self, *, recipe_scanner, recipe_id: str, lora_index: int, target_name: str) -> SimpleNamespace: # pragma: no cover
+ return SimpleNamespace(payload={"success": True}, status=200)
+
+ async def bulk_delete(self, *, recipe_scanner, recipe_ids: List[str]) -> SimpleNamespace: # pragma: no cover
+ return SimpleNamespace(payload={"success": True, "deleted": recipe_ids}, status=200)
+
+ async def save_recipe_from_widget(self, *, recipe_scanner, metadata: Dict[str, Any], image_bytes: bytes) -> SimpleNamespace: # pragma: no cover
+ return SimpleNamespace(payload={"success": True}, status=200)
+
+
+class StubSharingService:
+ """Share service stub recording requests and returning canned responses."""
+
+ instances: List["StubSharingService"] = []
+
+ def __init__(self, *, ttl_seconds: int = 300, logger) -> None: # noqa: ARG002 - ttl unused in stub
+ self.share_calls: List[str] = []
+ self.download_calls: List[str] = []
+ self.share_result = SimpleNamespace(
+ payload={"success": True, "download_url": "/share/stub", "filename": "recipe.png"},
+ status=200,
+ )
+ self.download_info = SimpleNamespace(file_path="", download_filename="")
+ StubSharingService.instances.append(self)
+
+ async def share_recipe(self, *, recipe_scanner, recipe_id: str) -> SimpleNamespace:
+ self.share_calls.append(recipe_id)
+ return self.share_result
+
+ async def prepare_download(self, *, recipe_scanner, recipe_id: str) -> SimpleNamespace:
+ self.download_calls.append(recipe_id)
+ return self.download_info
+
+
+@asynccontextmanager
+async def recipe_harness(monkeypatch, tmp_path: Path) -> AsyncIterator[RecipeRouteHarness]:
+ """Context manager that yields a fully wired recipe route harness."""
+
+ StubAnalysisService.instances.clear()
+ StubPersistenceService.instances.clear()
+ StubSharingService.instances.clear()
+
+ scanner = StubRecipeScanner(tmp_path)
+
+ async def fake_get_recipe_scanner():
+ return scanner
+
+ async def fake_get_civitai_client():
+ return object()
+
+ monkeypatch.setattr(ServiceRegistry, "get_recipe_scanner", fake_get_recipe_scanner)
+ monkeypatch.setattr(ServiceRegistry, "get_civitai_client", fake_get_civitai_client)
+ monkeypatch.setattr(base_recipe_routes, "RecipeAnalysisService", StubAnalysisService)
+ monkeypatch.setattr(base_recipe_routes, "RecipePersistenceService", StubPersistenceService)
+ monkeypatch.setattr(base_recipe_routes, "RecipeSharingService", StubSharingService)
+ monkeypatch.setattr(config, "loras_roots", [str(tmp_path)], raising=False)
+
+ app = web.Application()
+ RecipeRoutes.setup_routes(app)
+
+ server = TestServer(app)
+ client = TestClient(server)
+ await client.start_server()
+
+ harness = RecipeRouteHarness(
+ client=client,
+ scanner=scanner,
+ analysis=StubAnalysisService.instances[-1],
+ persistence=StubPersistenceService.instances[-1],
+ sharing=StubSharingService.instances[-1],
+ tmp_dir=tmp_path,
+ )
+
+ try:
+ yield harness
+ finally:
+ await client.close()
+ StubAnalysisService.instances.clear()
+ StubPersistenceService.instances.clear()
+ StubSharingService.instances.clear()
+
+
+async def test_list_recipes_provides_file_urls(monkeypatch, tmp_path: Path) -> None:
+ async with recipe_harness(monkeypatch, tmp_path) as harness:
+ recipe_path = harness.tmp_dir / "recipes" / "demo.png"
+ harness.scanner.listing_items = [
+ {
+ "id": "recipe-1",
+ "file_path": str(recipe_path),
+ "title": "Demo",
+ "loras": [],
+ }
+ ]
+ harness.scanner.cached_raw = list(harness.scanner.listing_items)
+
+ response = await harness.client.get("/api/lm/recipes")
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload["items"][0]["file_url"].endswith("demo.png")
+ assert payload["items"][0]["loras"] == []
+
+
+async def test_save_and_delete_recipe_round_trip(monkeypatch, tmp_path: Path) -> None:
+ async with recipe_harness(monkeypatch, tmp_path) as harness:
+ form = FormData()
+ form.add_field("image", b"stub", filename="sample.png", content_type="image/png")
+ form.add_field("name", "Test Recipe")
+ form.add_field("tags", json.dumps(["tag-a"]))
+ form.add_field("metadata", json.dumps({"loras": []}))
+ form.add_field("image_base64", "aW1hZ2U=")
+
+ harness.persistence.save_result = SimpleNamespace(
+ payload={"success": True, "recipe_id": "saved-id"},
+ status=201,
+ )
+
+ save_response = await harness.client.post("/api/lm/recipes/save", data=form)
+ save_payload = await save_response.json()
+
+ assert save_response.status == 201
+ assert save_payload["recipe_id"] == "saved-id"
+ assert harness.persistence.save_calls[-1]["name"] == "Test Recipe"
+
+ harness.persistence.delete_result = SimpleNamespace(payload={"success": True}, status=200)
+
+ delete_response = await harness.client.delete("/api/lm/recipe/saved-id")
+ delete_payload = await delete_response.json()
+
+ assert delete_response.status == 200
+ assert delete_payload["success"] is True
+ assert harness.persistence.delete_calls == ["saved-id"]
+
+
+async def test_analyze_uploaded_image_error_path(monkeypatch, tmp_path: Path) -> None:
+ async with recipe_harness(monkeypatch, tmp_path) as harness:
+ harness.analysis.raise_for_uploaded = RecipeValidationError("No image data provided")
+
+ form = FormData()
+ form.add_field("image", b"", filename="empty.png", content_type="image/png")
+
+ response = await harness.client.post("/api/lm/recipes/analyze-image", data=form)
+ payload = await response.json()
+
+ assert response.status == 400
+ assert payload["error"] == "No image data provided"
+ assert payload["loras"] == []
+
+
+async def test_share_and_download_recipe(monkeypatch, tmp_path: Path) -> None:
+ async with recipe_harness(monkeypatch, tmp_path) as harness:
+ recipe_id = "share-me"
+ download_path = harness.tmp_dir / "recipes" / "share.png"
+ download_path.parent.mkdir(parents=True, exist_ok=True)
+ download_path.write_bytes(b"stub")
+
+ harness.scanner.recipes[recipe_id] = {
+ "id": recipe_id,
+ "title": "Shared",
+ "file_path": str(download_path),
+ }
+
+ harness.sharing.share_result = SimpleNamespace(
+ payload={"success": True, "download_url": "/api/share", "filename": "share.png"},
+ status=200,
+ )
+ harness.sharing.download_info = SimpleNamespace(
+ file_path=str(download_path),
+ download_filename="share.png",
+ )
+
+ share_response = await harness.client.get(f"/api/lm/recipe/{recipe_id}/share")
+ share_payload = await share_response.json()
+
+ assert share_response.status == 200
+ assert share_payload["filename"] == "share.png"
+ assert harness.sharing.share_calls == [recipe_id]
+
+ download_response = await harness.client.get(f"/api/lm/recipe/{recipe_id}/share/download")
+ body = await download_response.read()
+
+ assert download_response.status == 200
+ assert download_response.headers["Content-Disposition"] == 'attachment; filename="share.png"'
+ assert body == b"stub"
+
+ download_path.unlink(missing_ok=True)
+
diff --git a/tests/routes/test_route_integration.py b/tests/routes/test_route_integration.py
new file mode 100644
index 00000000..dc0a1261
--- /dev/null
+++ b/tests/routes/test_route_integration.py
@@ -0,0 +1,160 @@
+"""End-to-end integration tests for aiohttp route registrars."""
+
+from __future__ import annotations
+
+import asyncio
+from contextlib import asynccontextmanager
+from types import SimpleNamespace
+from typing import AsyncIterator, Dict, Iterable, List, Sequence
+
+from aiohttp import web
+from aiohttp.test_utils import TestClient, TestServer
+
+from py.routes.lora_routes import LoraRoutes
+from py.services.service_registry import ServiceRegistry
+from py.services.websocket_manager import ws_manager as global_ws_manager
+
+
+class IntegrationCache:
+ """Minimal cache implementation satisfying the service contract."""
+
+ def __init__(self, items: Sequence[Dict[str, object]]) -> None:
+ self.raw_data: List[Dict[str, object]] = [dict(item) for item in items]
+ self.folders: List[str] = ["/"]
+
+ async def get_sorted_data(self, *_: object, **__: object) -> List[Dict[str, object]]:
+ """Return cached data without additional sorting."""
+ return [dict(item) for item in self.raw_data]
+
+ async def resort(self) -> None:
+ """Resort is a no-op for the static fixture data."""
+ return None
+
+
+class IntegrationScanner:
+ """Scanner double that registers with ServiceRegistry expectations."""
+
+ def __init__(self, items: Iterable[Dict[str, object]]) -> None:
+ self.model_type = "lora"
+ self._cache = IntegrationCache(list(items))
+ self._hash_index = SimpleNamespace(
+ removed_paths=[],
+ remove_by_path=lambda path: self._hash_index.removed_paths.append(path),
+ get_duplicate_hashes=lambda: {},
+ get_duplicate_filenames=lambda: {},
+ )
+ self._tags_count: Dict[str, int] = {}
+ self._excluded_models: List[str] = []
+
+ async def get_cached_data(self, *_: object, **__: object) -> IntegrationCache:
+ return self._cache
+
+ def get_model_roots(self) -> List[str]: # pragma: no cover - documented surface
+ return ["/"]
+
+ async def bulk_delete_models(self, file_paths: Iterable[str]) -> Dict[str, object]:
+ existing_paths = {item["file_path"] for item in self._cache.raw_data}
+ deleted = [path for path in file_paths if path in existing_paths]
+ self._cache.raw_data = [
+ item for item in self._cache.raw_data if item["file_path"] not in deleted
+ ]
+ await self._cache.resort()
+ for path in deleted:
+ self._hash_index.remove_by_path(path)
+ return {"success": True, "deleted": deleted}
+
+ async def check_model_version_exists(self, *_: object, **__: object) -> bool:
+ return False
+
+
+@asynccontextmanager
+async def aiohttp_client(app: web.Application) -> AsyncIterator[TestClient]:
+ """Spin up a TestClient with lifecycle management."""
+
+ server = TestServer(app)
+ client = TestClient(server)
+ await client.start_server()
+ try:
+ yield client
+ finally:
+ await client.close()
+
+
+def test_lora_route_stack_returns_real_data():
+ """Spin up LoRA routes and ensure ServiceRegistry-powered wiring succeeds."""
+
+ async def scenario() -> None:
+ ServiceRegistry.clear_services()
+
+ fixture_item = {
+ "model_name": "Alpha",
+ "file_name": "alpha.safetensors",
+ "folder": "root",
+ "file_path": "/tmp/alpha.safetensors",
+ "size": 128,
+ "modified": "2024-01-01T00:00:00Z",
+ "tags": ["integration"],
+ "civitai": {"trainedWords": ["alpha"]},
+ "preview_url": "",
+ "preview_nsfw_level": 0,
+ "base_model": "SD1",
+ "usage_tips": "Use gently",
+ "notes": "Integration sample",
+ "from_civitai": True,
+ }
+ scanner = IntegrationScanner([fixture_item])
+ await ServiceRegistry.register_service("lora_scanner", scanner)
+
+ app = web.Application()
+ routes = LoraRoutes()
+ routes.setup_routes(app)
+
+ async with aiohttp_client(app) as client:
+ response = await client.get("/api/lm/loras/list")
+ payload = await response.json()
+
+ assert response.status == 200
+ assert payload["total"] == 1
+ returned = payload["items"][0]
+ assert returned["model_name"] == "Alpha"
+ assert returned["file_name"] == "alpha.safetensors"
+ assert returned["usage_tips"] == "Use gently"
+
+ asyncio.run(scenario())
+ ServiceRegistry.clear_services()
+
+
+def test_websocket_routes_broadcast_through_registry():
+ """Ensure websocket endpoints accept connections and relay broadcasts."""
+
+ async def scenario() -> None:
+ ServiceRegistry.clear_services()
+ ws_manager = await ServiceRegistry.get_websocket_manager()
+
+ app = web.Application()
+ app.router.add_get("/ws/fetch-progress", ws_manager.handle_connection)
+ app.router.add_get("/ws/download-progress", ws_manager.handle_download_connection)
+
+ async with aiohttp_client(app) as client:
+ fetch_ws = await client.ws_connect("/ws/fetch-progress")
+ await ws_manager.broadcast({"kind": "ping"})
+ message = await asyncio.wait_for(fetch_ws.receive_json(), timeout=1)
+ assert message == {"kind": "ping"}
+
+ download_ws = await client.ws_connect("/ws/download-progress?id=session-1")
+ greeting = await asyncio.wait_for(download_ws.receive_json(), timeout=1)
+ assert greeting["type"] == "download_id"
+ assert greeting["download_id"] == "session-1"
+
+ await ws_manager.broadcast_download_progress("session-1", {"progress": 55})
+ progress = await asyncio.wait_for(download_ws.receive_json(), timeout=1)
+ assert progress["progress"] == 55
+
+ await fetch_ws.close()
+ await download_ws.close()
+
+ # Ensure the registry cached instance matches the module-level singleton.
+ assert ws_manager is global_ws_manager
+
+ asyncio.run(scenario())
+ ServiceRegistry.clear_services()
diff --git a/tests/services/test_base_model_service.py b/tests/services/test_base_model_service.py
new file mode 100644
index 00000000..fc28a54e
--- /dev/null
+++ b/tests/services/test_base_model_service.py
@@ -0,0 +1,296 @@
+import pytest
+
+import importlib
+import importlib.util
+import sys
+from pathlib import Path
+
+ROOT = Path(__file__).resolve().parents[2]
+if str(ROOT) not in sys.path:
+ sys.path.insert(0, str(ROOT))
+
+
+def import_from(module_name: str):
+ existing = sys.modules.get("py")
+ if existing is None or getattr(existing, "__file__", "") != str(ROOT / "py/__init__.py"):
+ sys.modules.pop("py", None)
+ spec = importlib.util.spec_from_file_location("py", ROOT / "py/__init__.py")
+ module = importlib.util.module_from_spec(spec)
+ assert spec and spec.loader
+ spec.loader.exec_module(module) # type: ignore[union-attr]
+ module.__path__ = [str(ROOT / "py")]
+ sys.modules["py"] = module
+ return importlib.import_module(module_name)
+
+
+BaseModelService = import_from("py.services.base_model_service").BaseModelService
+model_query_module = import_from("py.services.model_query")
+ModelCacheRepository = model_query_module.ModelCacheRepository
+ModelFilterSet = model_query_module.ModelFilterSet
+SearchStrategy = model_query_module.SearchStrategy
+SortParams = model_query_module.SortParams
+BaseModelMetadata = import_from("py.utils.models").BaseModelMetadata
+
+
+class StubSettings:
+ def __init__(self, values):
+ self._values = dict(values)
+
+ def get(self, key, default=None):
+ return self._values.get(key, default)
+
+
+class DummyService(BaseModelService):
+ async def format_response(self, model_data):
+ return model_data
+
+
+class StubRepository:
+ def __init__(self, data):
+ self._data = list(data)
+ self.parse_sort_calls = []
+ self.fetch_sorted_calls = []
+
+ def parse_sort(self, sort_by):
+ params = ModelCacheRepository.parse_sort(sort_by)
+ self.parse_sort_calls.append(sort_by)
+ return params
+
+ async def fetch_sorted(self, params):
+ self.fetch_sorted_calls.append(params)
+ return list(self._data)
+
+
+class StubFilterSet:
+ def __init__(self, result):
+ self.result = list(result)
+ self.calls = []
+
+ def apply(self, data, criteria):
+ self.calls.append((list(data), criteria))
+ return list(self.result)
+
+
+class StubSearchStrategy:
+ def __init__(self, search_result):
+ self.search_result = list(search_result)
+ self.normalize_calls = []
+ self.apply_calls = []
+
+ def normalize_options(self, options):
+ self.normalize_calls.append(options)
+ normalized = {"recursive": True}
+ if options:
+ normalized.update(options)
+ return normalized
+
+ def apply(self, data, search_term, options, fuzzy):
+ self.apply_calls.append((list(data), search_term, options, fuzzy))
+ return list(self.search_result)
+
+
+@pytest.mark.asyncio
+async def test_get_paginated_data_uses_injected_collaborators():
+ data = [
+ {"model_name": "Alpha", "folder": "root"},
+ {"model_name": "Beta", "folder": "root"},
+ ]
+ repository = StubRepository(data)
+ filter_set = StubFilterSet([{"model_name": "Filtered"}])
+ search_strategy = StubSearchStrategy([{"model_name": "SearchResult"}])
+ settings = StubSettings({})
+
+ service = DummyService(
+ model_type="stub",
+ scanner=object(),
+ metadata_class=BaseModelMetadata,
+ cache_repository=repository,
+ filter_set=filter_set,
+ search_strategy=search_strategy,
+ settings_provider=settings,
+ )
+
+ response = await service.get_paginated_data(
+ page=1,
+ page_size=5,
+ sort_by="name:desc",
+ folder="root",
+ search="query",
+ fuzzy_search=True,
+ base_models=["base"],
+ tags=["tag"],
+ search_options={"recursive": False},
+ favorites_only=True,
+ )
+
+ assert repository.parse_sort_calls == ["name:desc"]
+ assert repository.fetch_sorted_calls and isinstance(repository.fetch_sorted_calls[0], SortParams)
+ sort_params = repository.fetch_sorted_calls[0]
+ assert sort_params.key == "name" and sort_params.order == "desc"
+
+ assert filter_set.calls, "FilterSet should be invoked"
+ call_data, criteria = filter_set.calls[0]
+ assert call_data == data
+ assert criteria.folder == "root"
+ assert criteria.base_models == ["base"]
+ assert criteria.tags == ["tag"]
+ assert criteria.favorites_only is True
+ assert criteria.search_options.get("recursive") is False
+
+ assert search_strategy.normalize_calls == [{"recursive": False}, {"recursive": False}]
+ assert search_strategy.apply_calls == [([{"model_name": "Filtered"}], "query", {"recursive": False}, True)]
+
+ assert response["items"] == search_strategy.search_result
+ assert response["total"] == len(search_strategy.search_result)
+ assert response["page"] == 1
+ assert response["page_size"] == 5
+
+
+class FakeCache:
+ def __init__(self, items):
+ self.items = list(items)
+
+ async def get_sorted_data(self, sort_key, order):
+ if sort_key == "name":
+ data = sorted(self.items, key=lambda x: x["model_name"].lower())
+ if order == "desc":
+ data.reverse()
+ else:
+ data = list(self.items)
+ return data
+
+
+class FakeScanner:
+ def __init__(self, cache):
+ self._cache = cache
+
+ async def get_cached_data(self, *_, **__):
+ return self._cache
+
+
+@pytest.mark.asyncio
+async def test_get_paginated_data_filters_and_searches_combination():
+ items = [
+ {
+ "model_name": "Alpha",
+ "file_name": "alpha.safetensors",
+ "folder": "root/sub",
+ "tags": ["tag1"],
+ "base_model": "v1",
+ "favorite": True,
+ "preview_nsfw_level": 0,
+ },
+ {
+ "model_name": "Beta",
+ "file_name": "beta.safetensors",
+ "folder": "root",
+ "tags": ["tag2"],
+ "base_model": "v2",
+ "favorite": False,
+ "preview_nsfw_level": 999,
+ },
+ {
+ "model_name": "Gamma",
+ "file_name": "gamma.safetensors",
+ "folder": "root/sub2",
+ "tags": ["tag1", "tag3"],
+ "base_model": "v1",
+ "favorite": True,
+ "preview_nsfw_level": 0,
+ "civitai": {"creator": {"username": "artist"}},
+ },
+ ]
+
+ cache = FakeCache(items)
+ scanner = FakeScanner(cache)
+ settings = StubSettings({"show_only_sfw": True})
+
+ service = DummyService(
+ model_type="stub",
+ scanner=scanner,
+ metadata_class=BaseModelMetadata,
+ cache_repository=ModelCacheRepository(scanner),
+ filter_set=ModelFilterSet(settings),
+ search_strategy=SearchStrategy(),
+ settings_provider=settings,
+ )
+
+ response = await service.get_paginated_data(
+ page=1,
+ page_size=1,
+ sort_by="name:asc",
+ folder="root",
+ search="artist",
+ base_models=["v1"],
+ tags=["tag1"],
+ search_options={"creator": True, "tags": True},
+ favorites_only=True,
+ )
+
+ assert response["items"] == [items[2]]
+ assert response["total"] == 1
+ assert response["page"] == 1
+ assert response["page_size"] == 1
+ assert response["total_pages"] == 1
+
+
+class PassThroughFilterSet:
+ def __init__(self):
+ self.calls = []
+
+ def apply(self, data, criteria):
+ self.calls.append(criteria)
+ return list(data)
+
+
+class NoSearchStrategy:
+ def __init__(self):
+ self.normalize_calls = []
+ self.apply_called = False
+
+ def normalize_options(self, options):
+ self.normalize_calls.append(options)
+ return {"recursive": True}
+
+ def apply(self, *args, **kwargs):
+ self.apply_called = True
+ pytest.fail("Search should not be invoked when no search term is provided")
+
+
+@pytest.mark.asyncio
+async def test_get_paginated_data_paginates_without_search():
+ items = [
+ {"model_name": name, "folder": "root"}
+ for name in ["Alpha", "Beta", "Gamma", "Delta", "Epsilon"]
+ ]
+
+ repository = StubRepository(items)
+ filter_set = PassThroughFilterSet()
+ search_strategy = NoSearchStrategy()
+ settings = StubSettings({})
+
+ service = DummyService(
+ model_type="stub",
+ scanner=object(),
+ metadata_class=BaseModelMetadata,
+ cache_repository=repository,
+ filter_set=filter_set,
+ search_strategy=search_strategy,
+ settings_provider=settings,
+ )
+
+ response = await service.get_paginated_data(
+ page=2,
+ page_size=2,
+ sort_by="name:asc",
+ )
+
+ assert repository.parse_sort_calls == ["name:asc"]
+ assert len(repository.fetch_sorted_calls) == 1
+ assert filter_set.calls and filter_set.calls[0].favorites_only is False
+ assert search_strategy.apply_called is False
+ assert response["items"] == items[2:4]
+ assert response["total"] == len(items)
+ assert response["page"] == 2
+ assert response["page_size"] == 2
+ assert response["total_pages"] == 3
diff --git a/tests/services/test_civitai_client.py b/tests/services/test_civitai_client.py
new file mode 100644
index 00000000..f5283443
--- /dev/null
+++ b/tests/services/test_civitai_client.py
@@ -0,0 +1,222 @@
+from unittest.mock import AsyncMock
+
+import pytest
+
+from py.services import civitai_client as civitai_client_module
+from py.services.civitai_client import CivitaiClient
+from py.services.model_metadata_provider import ModelMetadataProviderManager
+
+
+class DummyDownloader:
+ def __init__(self):
+ self.download_calls = []
+ self.memory_calls = []
+ self.request_calls = []
+
+ async def download_file(self, **kwargs):
+ self.download_calls.append(kwargs)
+ return True, kwargs["save_path"]
+
+ async def download_to_memory(self, url, use_auth=False):
+ self.memory_calls.append({"url": url, "use_auth": use_auth})
+ return True, b"bytes", {"content-type": "image/jpeg"}
+
+ async def make_request(self, method, url, use_auth=True):
+ self.request_calls.append({"method": method, "url": url, "use_auth": use_auth})
+ return True, {}
+
+
+@pytest.fixture(autouse=True)
+def reset_singletons():
+ CivitaiClient._instance = None
+ ModelMetadataProviderManager._instance = None
+ yield
+ CivitaiClient._instance = None
+ ModelMetadataProviderManager._instance = None
+
+
+@pytest.fixture
+def downloader(monkeypatch):
+ instance = DummyDownloader()
+ monkeypatch.setattr(civitai_client_module, "get_downloader", AsyncMock(return_value=instance))
+ return instance
+
+
+async def test_download_file_uses_downloader(tmp_path, downloader):
+ client = await CivitaiClient.get_instance()
+ save_dir = tmp_path / "files"
+ save_dir.mkdir()
+
+ success, path = await client.download_file(
+ url="https://example.invalid/model",
+ save_dir=str(save_dir),
+ default_filename="model.safetensors",
+ )
+
+ assert success is True
+ assert path == str(save_dir / "model.safetensors")
+ assert downloader.download_calls[0]["use_auth"] is True
+
+
+async def test_get_model_by_hash_enriches_metadata(monkeypatch, downloader):
+ version_payload = {
+ "modelId": 123,
+ "model": {"description": "", "tags": []},
+ "creator": {},
+ }
+ model_payload = {"description": "desc", "tags": ["tag"], "creator": {"username": "user"}}
+
+ async def fake_make_request(method, url, use_auth=True):
+ if url.endswith("by-hash/hash"):
+ return True, version_payload.copy()
+ if url.endswith("/models/123"):
+ return True, model_payload
+ return False, "unexpected"
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result, error = await client.get_model_by_hash("hash")
+
+ assert error is None
+ assert result["model"]["description"] == "desc"
+ assert result["model"]["tags"] == ["tag"]
+ assert result["creator"] == {"username": "user"}
+
+
+async def test_get_model_by_hash_handles_not_found(monkeypatch, downloader):
+ async def fake_make_request(method, url, use_auth=True):
+ return False, "not found"
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result, error = await client.get_model_by_hash("missing")
+
+ assert result is None
+ assert error == "Model not found"
+
+
+async def test_download_preview_image_writes_file(tmp_path, downloader):
+ client = await CivitaiClient.get_instance()
+ target = tmp_path / "preview" / "image.jpg"
+
+ success = await client.download_preview_image("https://example.invalid/preview", str(target))
+
+ assert success is True
+ assert target.exists()
+ assert target.read_bytes() == b"bytes"
+
+
+async def test_download_preview_image_failure(monkeypatch, downloader):
+ async def failing_download(url, use_auth=False):
+ return False, b"", {}
+
+ downloader.download_to_memory = failing_download
+
+ client = await CivitaiClient.get_instance()
+ target = "/tmp/ignored.jpg"
+
+ success = await client.download_preview_image("https://example.invalid/preview", target)
+
+ assert success is False
+
+
+async def test_get_model_versions_success(monkeypatch, downloader):
+ async def fake_make_request(method, url, use_auth=True):
+ return True, {"modelVersions": [{"id": 1}], "type": "LORA", "name": "Model"}
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result = await client.get_model_versions("123")
+
+ assert result == {"modelVersions": [{"id": 1}], "type": "LORA", "name": "Model"}
+
+
+async def test_get_model_version_by_version_id(monkeypatch, downloader):
+ async def fake_make_request(method, url, use_auth=True):
+ if url.endswith("/model-versions/7"):
+ return True, {
+ "modelId": 321,
+ "model": {"description": ""},
+ "files": [],
+ }
+ if url.endswith("/models/321"):
+ return True, {"description": "desc", "tags": ["tag"], "creator": {"username": "user"}}
+ return False, "unexpected"
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result = await client.get_model_version(version_id=7)
+
+ assert result["model"]["description"] == "desc"
+ assert result["model"]["tags"] == ["tag"]
+ assert result["creator"] == {"username": "user"}
+
+
+async def test_get_model_version_requires_identifier(monkeypatch, downloader):
+ client = await CivitaiClient.get_instance()
+ result = await client.get_model_version()
+ assert result is None
+
+
+async def test_get_model_version_info_handles_not_found(monkeypatch, downloader):
+ async def fake_make_request(method, url, use_auth=True):
+ return False, "not found"
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result, error = await client.get_model_version_info("55")
+
+ assert result is None
+ assert error == "Model not found"
+
+
+async def test_get_model_version_info_success(monkeypatch, downloader):
+ expected = {"id": 55}
+
+ async def fake_make_request(method, url, use_auth=True):
+ return True, expected
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result, error = await client.get_model_version_info("55")
+
+ assert result == expected
+ assert error is None
+
+
+async def test_get_image_info_returns_first_item(monkeypatch, downloader):
+ async def fake_make_request(method, url, use_auth=True):
+ return True, {"items": [{"id": 1}, {"id": 2}]}
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result = await client.get_image_info("42")
+
+ assert result == {"id": 1}
+
+
+async def test_get_image_info_handles_missing(monkeypatch, downloader):
+ async def fake_make_request(method, url, use_auth=True):
+ return True, {"items": []}
+
+ downloader.make_request = fake_make_request
+
+ client = await CivitaiClient.get_instance()
+
+ result = await client.get_image_info("42")
+
+ assert result is None
diff --git a/tests/services/test_download_manager.py b/tests/services/test_download_manager.py
new file mode 100644
index 00000000..80701e7d
--- /dev/null
+++ b/tests/services/test_download_manager.py
@@ -0,0 +1,247 @@
+from pathlib import Path
+from types import SimpleNamespace
+from unittest.mock import AsyncMock
+
+import pytest
+
+from py.services.download_manager import DownloadManager
+from py.services import download_manager
+from py.services.service_registry import ServiceRegistry
+from py.services.settings_manager import settings
+
+
+@pytest.fixture(autouse=True)
+def reset_download_manager():
+ """Ensure each test operates on a fresh singleton."""
+ DownloadManager._instance = None
+ yield
+ DownloadManager._instance = None
+
+
+@pytest.fixture(autouse=True)
+def isolate_settings(monkeypatch, tmp_path):
+ """Point settings writes at a temporary directory to avoid touching real files."""
+ default_settings = settings._get_default_settings()
+ default_settings.update(
+ {
+ "default_lora_root": str(tmp_path),
+ "default_checkpoint_root": str(tmp_path / "checkpoints"),
+ "default_embedding_root": str(tmp_path / "embeddings"),
+ "download_path_templates": {
+ "lora": "{base_model}/{first_tag}",
+ "checkpoint": "{base_model}/{first_tag}",
+ "embedding": "{base_model}/{first_tag}",
+ },
+ "base_model_path_mappings": {"BaseModel": "MappedModel"},
+ }
+ )
+ monkeypatch.setattr(settings, "settings", default_settings)
+ monkeypatch.setattr(type(settings), "_save_settings", lambda self: None)
+
+
+@pytest.fixture(autouse=True)
+def stub_metadata(monkeypatch):
+ class _StubMetadata:
+ def __init__(self, save_path: str):
+ self.file_path = save_path
+ self.sha256 = "sha256"
+ self.file_name = Path(save_path).stem
+
+ def _factory(save_path: str):
+ return _StubMetadata(save_path)
+
+ def _make_class():
+ @staticmethod
+ def from_civitai_info(_version_info, _file_info, save_path):
+ return _factory(save_path)
+
+ return type("StubMetadata", (), {"from_civitai_info": from_civitai_info})
+
+ stub_class = _make_class()
+ monkeypatch.setattr(download_manager, "LoraMetadata", stub_class)
+ monkeypatch.setattr(download_manager, "CheckpointMetadata", stub_class)
+ monkeypatch.setattr(download_manager, "EmbeddingMetadata", stub_class)
+
+
+class DummyScanner:
+ def __init__(self, exists: bool = False):
+ self.exists = exists
+ self.calls = []
+
+ async def check_model_version_exists(self, version_id):
+ self.calls.append(version_id)
+ return self.exists
+
+
+@pytest.fixture
+def scanners(monkeypatch):
+ lora_scanner = DummyScanner()
+ checkpoint_scanner = DummyScanner()
+ embedding_scanner = DummyScanner()
+
+ monkeypatch.setattr(ServiceRegistry, "get_lora_scanner", AsyncMock(return_value=lora_scanner))
+ monkeypatch.setattr(ServiceRegistry, "get_checkpoint_scanner", AsyncMock(return_value=checkpoint_scanner))
+ monkeypatch.setattr(ServiceRegistry, "get_embedding_scanner", AsyncMock(return_value=embedding_scanner))
+
+ return SimpleNamespace(
+ lora=lora_scanner,
+ checkpoint=checkpoint_scanner,
+ embedding=embedding_scanner,
+ )
+
+
+@pytest.fixture
+def metadata_provider(monkeypatch):
+ class DummyProvider:
+ def __init__(self):
+ self.calls = []
+
+ async def get_model_version(self, model_id, model_version_id):
+ self.calls.append((model_id, model_version_id))
+ return {
+ "id": 42,
+ "model": {"type": "LoRA", "tags": ["fantasy"]},
+ "baseModel": "BaseModel",
+ "creator": {"username": "Author"},
+ "files": [
+ {
+ "primary": True,
+ "downloadUrl": "https://example.invalid/file.safetensors",
+ "name": "file.safetensors",
+ }
+ ],
+ }
+
+ provider = DummyProvider()
+ monkeypatch.setattr(
+ download_manager,
+ "get_default_metadata_provider",
+ AsyncMock(return_value=provider),
+ )
+ return provider
+
+
+@pytest.fixture(autouse=True)
+def noop_cleanup(monkeypatch):
+ async def _cleanup(self, task_id):
+ if task_id in self._active_downloads:
+ self._active_downloads[task_id]["cleaned"] = True
+
+ monkeypatch.setattr(DownloadManager, "_cleanup_download_record", _cleanup)
+
+
+async def test_download_requires_identifier():
+ manager = DownloadManager()
+ result = await manager.download_from_civitai()
+ assert result == {
+ "success": False,
+ "error": "Either model_id or model_version_id must be provided",
+ }
+
+
+async def test_successful_download_uses_defaults(monkeypatch, scanners, metadata_provider, tmp_path):
+ manager = DownloadManager()
+
+ captured = {}
+
+ async def fake_execute_download(
+ self,
+ *,
+ download_url,
+ save_dir,
+ metadata,
+ version_info,
+ relative_path,
+ progress_callback,
+ model_type,
+ download_id,
+ ):
+ captured.update(
+ {
+ "download_url": download_url,
+ "save_dir": Path(save_dir),
+ "relative_path": relative_path,
+ "progress_callback": progress_callback,
+ "model_type": model_type,
+ "download_id": download_id,
+ "metadata_path": metadata.file_path,
+ }
+ )
+ return {"success": True}
+
+ monkeypatch.setattr(DownloadManager, "_execute_download", fake_execute_download, raising=False)
+
+ result = await manager.download_from_civitai(
+ model_version_id=99,
+ save_dir=str(tmp_path),
+ use_default_paths=True,
+ progress_callback=None,
+ source=None,
+ )
+
+ assert result["success"] is True
+ assert "download_id" in result
+ assert manager._download_tasks == {}
+ assert manager._active_downloads[result["download_id"]]["status"] == "completed"
+
+ assert captured["relative_path"] == "MappedModel/fantasy"
+ expected_dir = Path(settings.get("default_lora_root")) / "MappedModel" / "fantasy"
+ assert captured["save_dir"] == expected_dir
+ assert captured["model_type"] == "lora"
+
+
+async def test_download_aborts_when_version_exists(monkeypatch, scanners, metadata_provider):
+ scanners.lora.exists = True
+
+ manager = DownloadManager()
+
+ execute_mock = AsyncMock(return_value={"success": True})
+ monkeypatch.setattr(DownloadManager, "_execute_download", execute_mock)
+
+ result = await manager.download_from_civitai(model_version_id=101, save_dir="/tmp")
+
+ assert result["success"] is False
+ assert result["error"] == "Model version already exists in lora library"
+ assert "download_id" in result
+ assert execute_mock.await_count == 0
+
+
+async def test_download_handles_metadata_errors(monkeypatch, scanners):
+ async def failing_provider(*_args, **_kwargs):
+ return None
+
+ monkeypatch.setattr(
+ download_manager,
+ "get_default_metadata_provider",
+ AsyncMock(return_value=SimpleNamespace(get_model_version=AsyncMock(return_value=None))),
+ )
+
+ manager = DownloadManager()
+
+ result = await manager.download_from_civitai(model_version_id=5, save_dir="/tmp")
+
+ assert result["success"] is False
+ assert result["error"] == "Failed to fetch model metadata"
+ assert "download_id" in result
+
+
+async def test_download_rejects_unsupported_model_type(monkeypatch, scanners):
+ class Provider:
+ async def get_model_version(self, *_args, **_kwargs):
+ return {
+ "model": {"type": "Unsupported", "tags": []},
+ "files": [],
+ }
+
+ monkeypatch.setattr(
+ download_manager,
+ "get_default_metadata_provider",
+ AsyncMock(return_value=Provider()),
+ )
+
+ manager = DownloadManager()
+
+ result = await manager.download_from_civitai(model_version_id=5, save_dir="/tmp")
+
+ assert result["success"] is False
+ assert result["error"].startswith("Model type")
diff --git a/tests/services/test_example_images_cleanup_service.py b/tests/services/test_example_images_cleanup_service.py
new file mode 100644
index 00000000..8256557a
--- /dev/null
+++ b/tests/services/test_example_images_cleanup_service.py
@@ -0,0 +1,86 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+from py.services.example_images_cleanup_service import ExampleImagesCleanupService
+from py.services.service_registry import ServiceRegistry
+from py.services.settings_manager import settings
+
+
+class StubScanner:
+ def __init__(self, valid_hashes: set[str] | None = None) -> None:
+ self._valid_hashes = valid_hashes or set()
+
+ def has_hash(self, value: str) -> bool:
+ return value in self._valid_hashes
+
+
+@pytest.mark.asyncio
+async def test_cleanup_moves_empty_and_orphaned(tmp_path, monkeypatch):
+ service = ExampleImagesCleanupService()
+
+ previous_path = settings.get('example_images_path')
+ settings.settings['example_images_path'] = str(tmp_path)
+
+ try:
+ empty_folder = tmp_path / 'empty_folder'
+ empty_folder.mkdir()
+
+ orphan_hash = 'a' * 64
+ orphan_folder = tmp_path / orphan_hash
+ orphan_folder.mkdir()
+ (orphan_folder / 'image.png').write_text('data', encoding='utf-8')
+
+ valid_hash = 'b' * 64
+ valid_folder = tmp_path / valid_hash
+ valid_folder.mkdir()
+ (valid_folder / 'image.png').write_text('data', encoding='utf-8')
+
+ matching_scanner = StubScanner({valid_hash})
+ empty_scanner = StubScanner()
+
+ async def get_matching_scanner(*_args, **_kwargs):
+ return matching_scanner
+
+ async def get_empty_scanner(*_args, **_kwargs):
+ return empty_scanner
+
+ monkeypatch.setattr(ServiceRegistry, 'get_lora_scanner', get_matching_scanner)
+ monkeypatch.setattr(ServiceRegistry, 'get_checkpoint_scanner', get_empty_scanner)
+ monkeypatch.setattr(ServiceRegistry, 'get_embedding_scanner', get_empty_scanner)
+
+ result = await service.cleanup_example_image_folders()
+
+ deleted_bucket = Path(result['deleted_root'])
+ assert result['success'] is True
+ assert result['moved_total'] == 2
+ assert not empty_folder.exists()
+ assert not (deleted_bucket / 'empty_folder').exists()
+ assert (deleted_bucket / orphan_hash).exists()
+ assert not orphan_folder.exists()
+ assert valid_folder.exists()
+
+ finally:
+ if previous_path is None:
+ settings.settings.pop('example_images_path', None)
+ else:
+ settings.settings['example_images_path'] = previous_path
+
+
+@pytest.mark.asyncio
+async def test_cleanup_handles_missing_path(monkeypatch):
+ service = ExampleImagesCleanupService()
+
+ previous_path = settings.get('example_images_path')
+ settings.settings.pop('example_images_path', None)
+
+ try:
+ result = await service.cleanup_example_image_folders()
+ finally:
+ if previous_path is not None:
+ settings.settings['example_images_path'] = previous_path
+
+ assert result['success'] is False
+ assert result['error_code'] == 'path_not_configured'
diff --git a/tests/services/test_example_images_download_manager_async.py b/tests/services/test_example_images_download_manager_async.py
new file mode 100644
index 00000000..7eef56fb
--- /dev/null
+++ b/tests/services/test_example_images_download_manager_async.py
@@ -0,0 +1,228 @@
+from __future__ import annotations
+
+import asyncio
+from types import SimpleNamespace
+
+import pytest
+
+from py.services.settings_manager import settings
+from py.utils import example_images_download_manager as download_module
+
+
+class RecordingWebSocketManager:
+ """Collects broadcast payloads for assertions."""
+
+ def __init__(self) -> None:
+ self.payloads: list[dict] = []
+
+ async def broadcast(self, payload: dict) -> None:
+ self.payloads.append(payload)
+
+
+class StubScanner:
+ """Scanner double returning predetermined cache contents."""
+
+ def __init__(self, models: list[dict]) -> None:
+ self._cache = SimpleNamespace(raw_data=models)
+
+ async def get_cached_data(self):
+ return self._cache
+
+
+def _patch_scanner(monkeypatch: pytest.MonkeyPatch, scanner: StubScanner) -> None:
+ async def _get_lora_scanner(cls):
+ return scanner
+
+ monkeypatch.setattr(
+ download_module.ServiceRegistry,
+ "get_lora_scanner",
+ classmethod(_get_lora_scanner),
+ )
+
+
+@pytest.mark.usefixtures("tmp_path")
+async def test_start_download_rejects_parallel_runs(monkeypatch: pytest.MonkeyPatch, tmp_path):
+ ws_manager = RecordingWebSocketManager()
+ manager = download_module.DownloadManager(ws_manager=ws_manager)
+
+ monkeypatch.setitem(settings.settings, "example_images_path", str(tmp_path))
+
+ model = {
+ "sha256": "abc123",
+ "model_name": "Example",
+ "file_path": str(tmp_path / "example.safetensors"),
+ "file_name": "example.safetensors",
+ }
+ _patch_scanner(monkeypatch, StubScanner([model]))
+
+ started = asyncio.Event()
+ release = asyncio.Event()
+
+ async def fake_process_local_examples(*_args, **_kwargs):
+ started.set()
+ await release.wait()
+ return True
+
+ async def fake_update_metadata(*_args, **_kwargs):
+ return True
+
+ async def fake_get_downloader():
+ return object()
+
+ monkeypatch.setattr(
+ download_module.ExampleImagesProcessor,
+ "process_local_examples",
+ staticmethod(fake_process_local_examples),
+ )
+ monkeypatch.setattr(
+ download_module.MetadataUpdater,
+ "update_metadata_from_local_examples",
+ staticmethod(fake_update_metadata),
+ )
+ monkeypatch.setattr(download_module, "get_downloader", fake_get_downloader)
+
+ try:
+ result = await manager.start_download({"model_types": ["lora"], "delay": 0})
+ assert result["success"] is True
+
+ await asyncio.wait_for(started.wait(), timeout=1)
+
+ with pytest.raises(download_module.DownloadInProgressError) as exc:
+ await manager.start_download({"model_types": ["lora"], "delay": 0})
+
+ snapshot = exc.value.progress_snapshot
+ assert snapshot["status"] == "running"
+ assert snapshot["current_model"] == "Example (abc123)"
+
+ statuses = [payload["status"] for payload in ws_manager.payloads]
+ assert "running" in statuses
+
+ finally:
+ release.set()
+ if manager._download_task is not None:
+ await asyncio.wait_for(manager._download_task, timeout=1)
+
+
+@pytest.mark.usefixtures("tmp_path")
+async def test_pause_resume_blocks_processing(monkeypatch: pytest.MonkeyPatch, tmp_path):
+ ws_manager = RecordingWebSocketManager()
+ manager = download_module.DownloadManager(ws_manager=ws_manager)
+
+ monkeypatch.setitem(settings.settings, "example_images_path", str(tmp_path))
+
+ models = [
+ {
+ "sha256": "hash-one",
+ "model_name": "Model One",
+ "file_path": str(tmp_path / "model-one.safetensors"),
+ "file_name": "model-one.safetensors",
+ "civitai": {"images": [{"url": "https://example.com/one.png"}]},
+ },
+ {
+ "sha256": "hash-two",
+ "model_name": "Model Two",
+ "file_path": str(tmp_path / "model-two.safetensors"),
+ "file_name": "model-two.safetensors",
+ "civitai": {"images": [{"url": "https://example.com/two.png"}]},
+ },
+ ]
+ _patch_scanner(monkeypatch, StubScanner(models))
+
+ async def fake_process_local_examples(*_args, **_kwargs):
+ return False
+
+ async def fake_update_metadata(*_args, **_kwargs):
+ return True
+
+ first_call_started = asyncio.Event()
+ first_release = asyncio.Event()
+ second_call_started = asyncio.Event()
+ call_order: list[str] = []
+
+ async def fake_download_model_images(model_hash, *_args, **_kwargs):
+ call_order.append(model_hash)
+ if len(call_order) == 1:
+ first_call_started.set()
+ await first_release.wait()
+ else:
+ second_call_started.set()
+ return True, False
+
+ async def fake_get_downloader():
+ class _Downloader:
+ async def download_to_memory(self, *_a, **_kw):
+ return True, b"", {}
+
+ return _Downloader()
+
+ monkeypatch.setattr(
+ download_module.ExampleImagesProcessor,
+ "process_local_examples",
+ staticmethod(fake_process_local_examples),
+ )
+ monkeypatch.setattr(
+ download_module.MetadataUpdater,
+ "update_metadata_from_local_examples",
+ staticmethod(fake_update_metadata),
+ )
+ monkeypatch.setattr(
+ download_module.ExampleImagesProcessor,
+ "download_model_images",
+ staticmethod(fake_download_model_images),
+ )
+ monkeypatch.setattr(download_module, "get_downloader", fake_get_downloader)
+
+ original_sleep = download_module.asyncio.sleep
+ pause_gate = asyncio.Event()
+ resume_gate = asyncio.Event()
+
+ async def fake_sleep(delay: float):
+ if delay == 1:
+ pause_gate.set()
+ await resume_gate.wait()
+ else:
+ await original_sleep(delay)
+
+ monkeypatch.setattr(download_module.asyncio, "sleep", fake_sleep)
+
+ try:
+ await manager.start_download({"model_types": ["lora"], "delay": 0})
+
+ await asyncio.wait_for(first_call_started.wait(), timeout=1)
+
+ await manager.pause_download({})
+
+ first_release.set()
+
+ await asyncio.wait_for(pause_gate.wait(), timeout=1)
+ assert manager._progress["status"] == "paused"
+ assert not second_call_started.is_set()
+
+ statuses = [payload["status"] for payload in ws_manager.payloads]
+ paused_index = statuses.index("paused")
+
+ await asyncio.sleep(0)
+ assert not second_call_started.is_set()
+
+ await manager.resume_download({})
+ resume_gate.set()
+
+ await asyncio.wait_for(second_call_started.wait(), timeout=1)
+
+ if manager._download_task is not None:
+ await asyncio.wait_for(manager._download_task, timeout=1)
+
+ statuses_after = [payload["status"] for payload in ws_manager.payloads]
+ running_after = next(
+ i for i, status in enumerate(statuses_after[paused_index + 1 :], start=paused_index + 1) if status == "running"
+ )
+ assert running_after > paused_index
+ assert "completed" in statuses_after[running_after:]
+ assert call_order == ["hash-one", "hash-two"]
+
+ finally:
+ first_release.set()
+ resume_gate.set()
+ if manager._download_task is not None:
+ await asyncio.wait_for(manager._download_task, timeout=1)
+ monkeypatch.setattr(download_module.asyncio, "sleep", original_sleep)
diff --git a/tests/services/test_recipe_scanner.py b/tests/services/test_recipe_scanner.py
new file mode 100644
index 00000000..63c18f25
--- /dev/null
+++ b/tests/services/test_recipe_scanner.py
@@ -0,0 +1,185 @@
+import asyncio
+import json
+from pathlib import Path
+from types import SimpleNamespace
+
+import pytest
+
+from py.config import config
+from py.services.recipe_scanner import RecipeScanner
+from py.utils.utils import calculate_recipe_fingerprint
+
+
+class StubHashIndex:
+ def __init__(self) -> None:
+ self._hash_to_path: dict[str, str] = {}
+
+ def get_path(self, hash_value: str) -> str | None:
+ return self._hash_to_path.get(hash_value)
+
+
+class StubLoraScanner:
+ def __init__(self) -> None:
+ self._hash_index = StubHashIndex()
+ self._hash_meta: dict[str, dict[str, str]] = {}
+ self._models_by_name: dict[str, dict] = {}
+ self._cache = SimpleNamespace(raw_data=[])
+
+ async def get_cached_data(self):
+ return self._cache
+
+ def has_hash(self, hash_value: str) -> bool:
+ return hash_value.lower() in self._hash_meta
+
+ def get_preview_url_by_hash(self, hash_value: str) -> str:
+ meta = self._hash_meta.get(hash_value.lower())
+ return meta.get("preview_url", "") if meta else ""
+
+ def get_path_by_hash(self, hash_value: str) -> str | None:
+ meta = self._hash_meta.get(hash_value.lower())
+ return meta.get("path") if meta else None
+
+ async def get_model_info_by_name(self, name: str):
+ return self._models_by_name.get(name)
+
+ def register_model(self, name: str, info: dict) -> None:
+ self._models_by_name[name] = info
+ hash_value = (info.get("sha256") or "").lower()
+ if hash_value:
+ self._hash_meta[hash_value] = {
+ "path": info.get("file_path", ""),
+ "preview_url": info.get("preview_url", ""),
+ }
+ self._hash_index._hash_to_path[hash_value] = info.get("file_path", "")
+ self._cache.raw_data.append({
+ "sha256": info.get("sha256", ""),
+ "path": info.get("file_path", ""),
+ "civitai": info.get("civitai", {}),
+ })
+
+
+@pytest.fixture
+def recipe_scanner(tmp_path: Path, monkeypatch):
+ RecipeScanner._instance = None
+ monkeypatch.setattr(config, "loras_roots", [str(tmp_path)])
+ stub = StubLoraScanner()
+ scanner = RecipeScanner(lora_scanner=stub)
+ asyncio.run(scanner.refresh_cache(force=True))
+ yield scanner, stub
+ RecipeScanner._instance = None
+
+
+async def test_add_recipe_during_concurrent_reads(recipe_scanner):
+ scanner, _ = recipe_scanner
+
+ initial_recipe = {
+ "id": "one",
+ "file_path": "path/a.png",
+ "title": "First",
+ "modified": 1.0,
+ "created_date": 1.0,
+ "loras": [],
+ }
+ await scanner.add_recipe(initial_recipe)
+
+ new_recipe = {
+ "id": "two",
+ "file_path": "path/b.png",
+ "title": "Second",
+ "modified": 2.0,
+ "created_date": 2.0,
+ "loras": [],
+ }
+
+ async def reader_task():
+ for _ in range(5):
+ cache = await scanner.get_cached_data()
+ _ = [item["id"] for item in cache.raw_data]
+ await asyncio.sleep(0)
+
+ await asyncio.gather(reader_task(), reader_task(), scanner.add_recipe(new_recipe))
+ await asyncio.sleep(0)
+ cache = await scanner.get_cached_data()
+
+ assert {item["id"] for item in cache.raw_data} == {"one", "two"}
+ assert len(cache.sorted_by_name) == len(cache.raw_data)
+
+
+async def test_remove_recipe_during_reads(recipe_scanner):
+ scanner, _ = recipe_scanner
+
+ recipe_ids = ["alpha", "beta", "gamma"]
+ for index, recipe_id in enumerate(recipe_ids):
+ await scanner.add_recipe({
+ "id": recipe_id,
+ "file_path": f"path/{recipe_id}.png",
+ "title": recipe_id,
+ "modified": float(index),
+ "created_date": float(index),
+ "loras": [],
+ })
+
+ async def reader_task():
+ for _ in range(5):
+ cache = await scanner.get_cached_data()
+ _ = list(cache.sorted_by_date)
+ await asyncio.sleep(0)
+
+ await asyncio.gather(reader_task(), scanner.remove_recipe("beta"))
+ await asyncio.sleep(0)
+ cache = await scanner.get_cached_data()
+
+ assert {item["id"] for item in cache.raw_data} == {"alpha", "gamma"}
+
+
+async def test_update_lora_entry_updates_cache_and_file(tmp_path: Path, recipe_scanner):
+ scanner, stub = recipe_scanner
+ recipes_dir = Path(config.loras_roots[0]) / "recipes"
+ recipes_dir.mkdir(parents=True, exist_ok=True)
+
+ recipe_id = "recipe-1"
+ recipe_path = recipes_dir / f"{recipe_id}.recipe.json"
+ recipe_data = {
+ "id": recipe_id,
+ "file_path": str(tmp_path / "image.png"),
+ "title": "Original",
+ "modified": 0.0,
+ "created_date": 0.0,
+ "loras": [
+ {"file_name": "old", "strength": 1.0, "hash": "", "isDeleted": True, "exclude": True},
+ ],
+ }
+ recipe_path.write_text(json.dumps(recipe_data))
+
+ await scanner.add_recipe(dict(recipe_data))
+
+ target_hash = "abc123"
+ target_info = {
+ "sha256": target_hash,
+ "file_path": str(tmp_path / "loras" / "target.safetensors"),
+ "preview_url": "preview.png",
+ "civitai": {"id": 42, "name": "v1", "model": {"name": "Target"}},
+ }
+ stub.register_model("target", target_info)
+
+ updated_recipe, updated_lora = await scanner.update_lora_entry(
+ recipe_id,
+ 0,
+ target_name="target",
+ target_lora=target_info,
+ )
+
+ assert updated_lora["inLibrary"] is True
+ assert updated_lora["localPath"] == target_info["file_path"]
+ assert updated_lora["hash"] == target_hash
+
+ with recipe_path.open("r", encoding="utf-8") as file_obj:
+ persisted = json.load(file_obj)
+
+ expected_fingerprint = calculate_recipe_fingerprint(persisted["loras"])
+ assert persisted["fingerprint"] == expected_fingerprint
+
+ cache = await scanner.get_cached_data()
+ cached_recipe = next(item for item in cache.raw_data if item["id"] == recipe_id)
+ assert cached_recipe["loras"][0]["hash"] == target_hash
+ assert cached_recipe["fingerprint"] == expected_fingerprint
diff --git a/tests/services/test_recipe_services.py b/tests/services/test_recipe_services.py
new file mode 100644
index 00000000..81a15424
--- /dev/null
+++ b/tests/services/test_recipe_services.py
@@ -0,0 +1,150 @@
+import logging
+import os
+from types import SimpleNamespace
+
+import pytest
+
+from py.services.recipes.analysis_service import RecipeAnalysisService
+from py.services.recipes.errors import RecipeDownloadError, RecipeNotFoundError
+from py.services.recipes.persistence_service import RecipePersistenceService
+
+
+class DummyExifUtils:
+ def optimize_image(self, image_data, target_width, format, quality, preserve_metadata):
+ return image_data, ".webp"
+
+ def append_recipe_metadata(self, image_path, recipe_data):
+ self.appended = (image_path, recipe_data)
+
+ def extract_image_metadata(self, path):
+ return {}
+
+
+@pytest.mark.asyncio
+async def test_analyze_remote_image_download_failure_cleans_temp(tmp_path, monkeypatch):
+ exif_utils = DummyExifUtils()
+
+ class DummyFactory:
+ def create_parser(self, metadata):
+ return None
+
+ async def downloader_factory():
+ class Downloader:
+ async def download_file(self, url, path, use_auth=False):
+ return False, "failure"
+
+ return Downloader()
+
+ service = RecipeAnalysisService(
+ exif_utils=exif_utils,
+ recipe_parser_factory=DummyFactory(),
+ downloader_factory=downloader_factory,
+ metadata_collector=None,
+ metadata_processor_cls=None,
+ metadata_registry_cls=None,
+ standalone_mode=False,
+ logger=logging.getLogger("test"),
+ )
+
+ temp_path = tmp_path / "temp.jpg"
+
+ def create_temp_path():
+ temp_path.write_bytes(b"")
+ return str(temp_path)
+
+ monkeypatch.setattr(service, "_create_temp_path", create_temp_path)
+
+ with pytest.raises(RecipeDownloadError):
+ await service.analyze_remote_image(
+ url="https://example.com/image.jpg",
+ recipe_scanner=SimpleNamespace(),
+ civitai_client=SimpleNamespace(),
+ )
+
+ assert not temp_path.exists(), "temporary file should be cleaned after failure"
+
+
+@pytest.mark.asyncio
+async def test_analyze_local_image_missing_file(tmp_path):
+ async def downloader_factory():
+ return SimpleNamespace()
+
+ service = RecipeAnalysisService(
+ exif_utils=DummyExifUtils(),
+ recipe_parser_factory=SimpleNamespace(create_parser=lambda metadata: None),
+ downloader_factory=downloader_factory,
+ metadata_collector=None,
+ metadata_processor_cls=None,
+ metadata_registry_cls=None,
+ standalone_mode=False,
+ logger=logging.getLogger("test"),
+ )
+
+ with pytest.raises(RecipeNotFoundError):
+ await service.analyze_local_image(
+ file_path=str(tmp_path / "missing.png"),
+ recipe_scanner=SimpleNamespace(),
+ )
+
+
+@pytest.mark.asyncio
+async def test_save_recipe_reports_duplicates(tmp_path):
+ exif_utils = DummyExifUtils()
+
+ class DummyCache:
+ def __init__(self):
+ self.raw_data = []
+
+ async def resort(self):
+ pass
+
+ class DummyScanner:
+ def __init__(self, root):
+ self.recipes_dir = str(root)
+ self._cache = DummyCache()
+ self.last_fingerprint = None
+
+ async def find_recipes_by_fingerprint(self, fingerprint):
+ self.last_fingerprint = fingerprint
+ return ["existing"]
+
+ async def add_recipe(self, recipe_data):
+ self._cache.raw_data.append(recipe_data)
+ await self._cache.resort()
+
+ scanner = DummyScanner(tmp_path)
+ service = RecipePersistenceService(
+ exif_utils=exif_utils,
+ card_preview_width=512,
+ logger=logging.getLogger("test"),
+ )
+
+ metadata = {
+ "base_model": "sd",
+ "loras": [
+ {
+ "file_name": "sample",
+ "hash": "abc123",
+ "weight": 0.5,
+ "id": 1,
+ "name": "Sample",
+ "version": "v1",
+ "isDeleted": False,
+ "exclude": False,
+ }
+ ],
+ }
+
+ result = await service.save_recipe(
+ recipe_scanner=scanner,
+ image_bytes=b"image-bytes",
+ image_base64=None,
+ name="My Recipe",
+ tags=["tag"],
+ metadata=metadata,
+ )
+
+ assert result.payload["matching_recipes"] == ["existing"]
+ assert scanner.last_fingerprint is not None
+ assert os.path.exists(result.payload["json_path"])
+ assert scanner._cache.raw_data
diff --git a/tests/services/test_route_support_services.py b/tests/services/test_route_support_services.py
new file mode 100644
index 00000000..978438c3
--- /dev/null
+++ b/tests/services/test_route_support_services.py
@@ -0,0 +1,273 @@
+import asyncio
+import json
+import os
+import sys
+from pathlib import Path
+from typing import Any, Dict, List
+
+ROOT = Path(__file__).resolve().parents[2]
+if str(ROOT) not in sys.path:
+ sys.path.insert(0, str(ROOT))
+
+import importlib
+import importlib.util
+
+import pytest
+
+
+def import_from(module_name: str):
+ existing = sys.modules.get("py")
+ if existing is None or getattr(existing, "__file__", "") != str(ROOT / "py/__init__.py"):
+ sys.modules.pop("py", None)
+ spec = importlib.util.spec_from_file_location("py", ROOT / "py/__init__.py")
+ module = importlib.util.module_from_spec(spec)
+ assert spec and spec.loader
+ spec.loader.exec_module(module) # type: ignore[union-attr]
+ module.__path__ = [str(ROOT / "py")]
+ sys.modules["py"] = module
+ return importlib.import_module(module_name)
+
+
+DownloadCoordinator = import_from("py.services.download_coordinator").DownloadCoordinator
+MetadataSyncService = import_from("py.services.metadata_sync_service").MetadataSyncService
+PreviewAssetService = import_from("py.services.preview_asset_service").PreviewAssetService
+TagUpdateService = import_from("py.services.tag_update_service").TagUpdateService
+
+
+class DummySettings:
+ def __init__(self, values: Dict[str, Any] | None = None) -> None:
+ self._values = values or {}
+
+ def get(self, key: str, default: Any = None) -> Any:
+ return self._values.get(key, default)
+
+
+class RecordingMetadataManager:
+ def __init__(self) -> None:
+ self.saved: List[tuple[str, Dict[str, Any]]] = []
+
+ async def save_metadata(self, path: str, metadata: Dict[str, Any]) -> bool:
+ self.saved.append((path, json.loads(json.dumps(metadata))))
+ metadata_path = path if path.endswith(".metadata.json") else f"{os.path.splitext(path)[0]}.metadata.json"
+ Path(metadata_path).write_text(json.dumps(metadata))
+ return True
+
+
+class RecordingPreviewService:
+ def __init__(self) -> None:
+ self.calls: List[tuple[str, List[Dict[str, Any]]]] = []
+
+ async def ensure_preview_for_metadata(
+ self, metadata_path: str, local_metadata: Dict[str, Any], images
+ ) -> None:
+ self.calls.append((metadata_path, list(images or [])))
+ local_metadata["preview_url"] = "preview.webp"
+ local_metadata["preview_nsfw_level"] = 1
+
+
+class DummyProvider:
+ def __init__(self, payload: Dict[str, Any]) -> None:
+ self.payload = payload
+
+ async def get_model_by_hash(self, sha256: str):
+ return self.payload, None
+
+ async def get_model_version(self, model_id: int, model_version_id: int | None):
+ return self.payload
+
+
+class FakeExifUtils:
+ @staticmethod
+ def optimize_image(**kwargs):
+ return kwargs["image_data"], {}
+
+
+def test_metadata_sync_merges_remote_fields(tmp_path: Path) -> None:
+ manager = RecordingMetadataManager()
+ preview = RecordingPreviewService()
+ provider = DummyProvider({
+ "baseModel": "SD15",
+ "model": {"name": "Merged", "description": "desc", "tags": ["tag"], "creator": {"username": "user"}},
+ "trainedWords": ["word"],
+ "images": [{"url": "http://example", "nsfwLevel": 2, "type": "image"}],
+ })
+
+ service = MetadataSyncService(
+ metadata_manager=manager,
+ preview_service=preview,
+ settings=DummySettings(),
+ default_metadata_provider_factory=lambda: asyncio.sleep(0, result=provider),
+ metadata_provider_selector=lambda _name=None: asyncio.sleep(0, result=provider),
+ )
+
+ metadata_path = str(tmp_path / "model.metadata.json")
+ local_metadata = {"civitai": {"trainedWords": ["existing"]}}
+
+ updated = asyncio.run(service.update_model_metadata(metadata_path, local_metadata, provider.payload))
+
+ assert updated["model_name"] == "Merged"
+ assert updated["modelDescription"] == "desc"
+ assert set(updated["civitai"]["trainedWords"]) == {"existing", "word"}
+ assert manager.saved
+ assert preview.calls
+
+
+def test_metadata_sync_fetch_and_update_updates_cache(tmp_path: Path) -> None:
+ manager = RecordingMetadataManager()
+ preview = RecordingPreviewService()
+ provider = DummyProvider({
+ "baseModel": "SDXL",
+ "model": {"name": "Updated"},
+ "images": [],
+ })
+
+ update_cache_calls: List[Dict[str, Any]] = []
+
+ async def update_cache(original: str, new: str, metadata: Dict[str, Any]) -> bool:
+ update_cache_calls.append({"original": original, "metadata": metadata})
+ return True
+
+ service = MetadataSyncService(
+ metadata_manager=manager,
+ preview_service=preview,
+ settings=DummySettings(),
+ default_metadata_provider_factory=lambda: asyncio.sleep(0, result=provider),
+ metadata_provider_selector=lambda _name=None: asyncio.sleep(0, result=provider),
+ )
+
+ model_data = {"sha256": "abc", "file_path": str(tmp_path / "model.safetensors")}
+ success, error = asyncio.run(
+ service.fetch_and_update_model(
+ sha256="abc",
+ file_path=str(tmp_path / "model.safetensors"),
+ model_data=model_data,
+ update_cache_func=update_cache,
+ )
+ )
+
+ assert success is True
+ assert error is None
+ assert update_cache_calls
+ assert manager.saved
+
+
+def test_preview_asset_service_replace_preview(tmp_path: Path) -> None:
+ metadata_path = tmp_path / "sample.metadata.json"
+ metadata_path.write_text(json.dumps({}))
+
+ async def metadata_loader(path: str) -> Dict[str, Any]:
+ return json.loads(Path(path).read_text())
+
+ manager = RecordingMetadataManager()
+
+ service = PreviewAssetService(
+ metadata_manager=manager,
+ downloader_factory=lambda: asyncio.sleep(0, result=None),
+ exif_utils=FakeExifUtils(),
+ )
+
+ preview_calls: List[Dict[str, Any]] = []
+
+ async def update_preview(model_path: str, preview_path: str, nsfw: int) -> bool:
+ preview_calls.append({"model_path": model_path, "preview_path": preview_path, "nsfw": nsfw})
+ return True
+
+ model_path = str(tmp_path / "sample.safetensors")
+ Path(model_path).write_bytes(b"model")
+
+ result = asyncio.run(
+ service.replace_preview(
+ model_path=model_path,
+ preview_data=b"image-bytes",
+ content_type="image/png",
+ original_filename="preview.png",
+ nsfw_level=2,
+ update_preview_in_cache=update_preview,
+ metadata_loader=metadata_loader,
+ )
+ )
+
+ assert result["preview_nsfw_level"] == 2
+ assert preview_calls
+ saved_metadata = json.loads(metadata_path.read_text())
+ assert saved_metadata["preview_nsfw_level"] == 2
+
+
+def test_download_coordinator_emits_progress() -> None:
+ class WSStub:
+ def __init__(self) -> None:
+ self.progress_events: List[Dict[str, Any]] = []
+ self.counter = 0
+
+ def generate_download_id(self) -> str:
+ self.counter += 1
+ return f"dl-{self.counter}"
+
+ async def broadcast_download_progress(self, download_id: str, payload: Dict[str, Any]) -> None:
+ self.progress_events.append({"id": download_id, **payload})
+
+ class DownloadManagerStub:
+ def __init__(self) -> None:
+ self.calls: List[Dict[str, Any]] = []
+
+ async def download_from_civitai(self, **kwargs) -> Dict[str, Any]:
+ self.calls.append(kwargs)
+ await kwargs["progress_callback"](10)
+ return {"success": True}
+
+ async def cancel_download(self, download_id: str) -> Dict[str, Any]:
+ return {"success": True, "download_id": download_id}
+
+ async def get_active_downloads(self) -> Dict[str, Any]:
+ return {"active": []}
+
+ ws_stub = WSStub()
+ manager_stub = DownloadManagerStub()
+
+ coordinator = DownloadCoordinator(
+ ws_manager=ws_stub,
+ download_manager_factory=lambda: asyncio.sleep(0, result=manager_stub),
+ )
+
+ result = asyncio.run(coordinator.schedule_download({"model_id": 1}))
+
+ assert result["success"] is True
+ assert manager_stub.calls
+ assert ws_stub.progress_events
+
+ cancel_result = asyncio.run(coordinator.cancel_download(result["download_id"]))
+ assert cancel_result["success"] is True
+
+ active = asyncio.run(coordinator.list_active_downloads())
+ assert active == {"active": []}
+
+
+def test_tag_update_service_adds_unique_tags(tmp_path: Path) -> None:
+ metadata_path = tmp_path / "model.metadata.json"
+ metadata_path.write_text(json.dumps({"tags": ["Existing"]}))
+
+ async def loader(path: str) -> Dict[str, Any]:
+ return json.loads(Path(path).read_text())
+
+ manager = RecordingMetadataManager()
+
+ service = TagUpdateService(metadata_manager=manager)
+
+ cache_updates: List[Dict[str, Any]] = []
+
+ async def update_cache(original: str, new: str, metadata: Dict[str, Any]) -> bool:
+ cache_updates.append(metadata)
+ return True
+
+ tags = asyncio.run(
+ service.add_tags(
+ file_path=str(tmp_path / "model.safetensors"),
+ new_tags=["New", "existing"],
+ metadata_loader=loader,
+ update_cache=update_cache,
+ )
+ )
+
+ assert tags == ["Existing", "New"]
+ assert manager.saved
+ assert cache_updates
diff --git a/tests/services/test_settings_manager.py b/tests/services/test_settings_manager.py
new file mode 100644
index 00000000..7e547680
--- /dev/null
+++ b/tests/services/test_settings_manager.py
@@ -0,0 +1,61 @@
+import json
+
+import pytest
+
+from py.services.settings_manager import SettingsManager
+
+
+@pytest.fixture
+def manager(tmp_path, monkeypatch):
+ monkeypatch.setattr(SettingsManager, "_save_settings", lambda self: None)
+ mgr = SettingsManager()
+ mgr.settings_file = str(tmp_path / "settings.json")
+ return mgr
+
+
+def test_environment_variable_overrides_settings(tmp_path, monkeypatch):
+ monkeypatch.setattr(SettingsManager, "_save_settings", lambda self: None)
+ monkeypatch.setenv("CIVITAI_API_KEY", "secret")
+ mgr = SettingsManager()
+ mgr.settings_file = str(tmp_path / "settings.json")
+
+ assert mgr.get("civitai_api_key") == "secret"
+
+
+def test_download_path_template_parses_json_string(manager):
+ templates = {"lora": "{author}", "checkpoint": "{author}", "embedding": "{author}"}
+ manager.settings["download_path_templates"] = json.dumps(templates)
+
+ template = manager.get_download_path_template("lora")
+
+ assert template == "{author}"
+ assert isinstance(manager.settings["download_path_templates"], dict)
+
+
+def test_download_path_template_invalid_json(manager):
+ manager.settings["download_path_templates"] = "not json"
+
+ template = manager.get_download_path_template("checkpoint")
+
+ assert template == "{base_model}/{first_tag}"
+ assert manager.settings["download_path_templates"]["lora"] == "{base_model}/{first_tag}"
+
+
+def test_auto_set_default_roots(manager):
+ manager.settings["folder_paths"] = {
+ "loras": ["/loras"],
+ "checkpoints": ["/checkpoints"],
+ "embeddings": ["/embeddings"],
+ }
+
+ manager._auto_set_default_roots()
+
+ assert manager.get("default_lora_root") == "/loras"
+ assert manager.get("default_checkpoint_root") == "/checkpoints"
+ assert manager.get("default_embedding_root") == "/embeddings"
+
+
+def test_delete_setting(manager):
+ manager.set("example", 1)
+ manager.delete("example")
+ assert manager.get("example") is None
diff --git a/tests/services/test_use_cases.py b/tests/services/test_use_cases.py
new file mode 100644
index 00000000..cfd0f10c
--- /dev/null
+++ b/tests/services/test_use_cases.py
@@ -0,0 +1,317 @@
+import asyncio
+import logging
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional
+
+import pytest
+
+from py_local.services.model_file_service import AutoOrganizeResult
+from py_local.services.use_cases import (
+ AutoOrganizeInProgressError,
+ AutoOrganizeUseCase,
+ BulkMetadataRefreshUseCase,
+ DownloadExampleImagesConfigurationError,
+ DownloadExampleImagesInProgressError,
+ DownloadExampleImagesUseCase,
+ DownloadModelEarlyAccessError,
+ DownloadModelUseCase,
+ DownloadModelValidationError,
+ ImportExampleImagesUseCase,
+ ImportExampleImagesValidationError,
+)
+from py_local.utils.example_images_download_manager import (
+ DownloadConfigurationError,
+ DownloadInProgressError,
+ ExampleImagesDownloadError,
+)
+from py_local.utils.example_images_processor import (
+ ExampleImagesImportError,
+ ExampleImagesValidationError,
+)
+from tests.conftest import MockModelService, MockScanner
+
+
+class StubLockProvider:
+ def __init__(self) -> None:
+ self._lock = asyncio.Lock()
+ self.running = False
+
+ def is_auto_organize_running(self) -> bool:
+ return self.running
+
+ async def get_auto_organize_lock(self) -> asyncio.Lock:
+ return self._lock
+
+
+class StubFileService:
+ def __init__(self) -> None:
+ self.calls: List[Dict[str, Any]] = []
+
+ async def auto_organize_models(
+ self,
+ *,
+ file_paths: Optional[List[str]] = None,
+ progress_callback=None,
+ ) -> AutoOrganizeResult:
+ result = AutoOrganizeResult()
+ result.total = len(file_paths or [])
+ self.calls.append({"file_paths": file_paths, "progress_callback": progress_callback})
+ return result
+
+
+class StubMetadataSync:
+ def __init__(self) -> None:
+ self.calls: List[Dict[str, Any]] = []
+
+ async def fetch_and_update_model(self, **kwargs: Any):
+ self.calls.append(kwargs)
+ model_data = kwargs["model_data"]
+ model_data["model_name"] = model_data.get("model_name", "model") + "-updated"
+ return True, None
+
+
+@dataclass
+class StubSettings:
+ enable_metadata_archive_db: bool = False
+
+ def get(self, key: str, default: Any = None) -> Any:
+ if key == "enable_metadata_archive_db":
+ return self.enable_metadata_archive_db
+ return default
+
+
+class ProgressCollector:
+ def __init__(self) -> None:
+ self.events: List[Dict[str, Any]] = []
+
+ async def on_progress(self, payload: Dict[str, Any]) -> None:
+ self.events.append(payload)
+
+
+class StubDownloadCoordinator:
+ def __init__(self, *, error: Optional[str] = None) -> None:
+ self.error = error
+ self.payloads: List[Dict[str, Any]] = []
+
+ async def schedule_download(self, payload: Dict[str, Any]) -> Dict[str, Any]:
+ self.payloads.append(payload)
+ if self.error == "validation":
+ raise ValueError("Missing required parameter: Please provide either 'model_id' or 'model_version_id'")
+ if self.error == "401":
+ raise RuntimeError("401 Unauthorized")
+ return {"success": True, "download_id": "abc123"}
+
+
+class StubExampleImagesDownloadManager:
+ def __init__(self) -> None:
+ self.payloads: List[Dict[str, Any]] = []
+ self.error: Optional[str] = None
+ self.progress_snapshot = {"status": "running"}
+
+ async def start_download(self, payload: Dict[str, Any]) -> Dict[str, Any]:
+ self.payloads.append(payload)
+ if self.error == "in_progress":
+ raise DownloadInProgressError(self.progress_snapshot)
+ if self.error == "configuration":
+ raise DownloadConfigurationError("path missing")
+ if self.error == "generic":
+ raise ExampleImagesDownloadError("boom")
+ return {"success": True, "message": "ok"}
+
+
+class StubExampleImagesProcessor:
+ def __init__(self) -> None:
+ self.calls: List[Dict[str, Any]] = []
+ self.error: Optional[str] = None
+ self.response: Dict[str, Any] = {"success": True}
+
+ async def import_images(self, model_hash: str, files: List[str]) -> Dict[str, Any]:
+ self.calls.append({"model_hash": model_hash, "files": files})
+ if self.error == "validation":
+ raise ExampleImagesValidationError("missing")
+ if self.error == "generic":
+ raise ExampleImagesImportError("boom")
+ return self.response
+
+
+async def test_auto_organize_use_case_executes_with_lock() -> None:
+ file_service = StubFileService()
+ lock_provider = StubLockProvider()
+ use_case = AutoOrganizeUseCase(file_service=file_service, lock_provider=lock_provider)
+
+ result = await use_case.execute(file_paths=["model1"], progress_callback=None)
+
+ assert isinstance(result, AutoOrganizeResult)
+ assert file_service.calls[0]["file_paths"] == ["model1"]
+
+
+async def test_auto_organize_use_case_rejects_when_running() -> None:
+ file_service = StubFileService()
+ lock_provider = StubLockProvider()
+ lock_provider.running = True
+ use_case = AutoOrganizeUseCase(file_service=file_service, lock_provider=lock_provider)
+
+ with pytest.raises(AutoOrganizeInProgressError):
+ await use_case.execute(file_paths=None, progress_callback=None)
+
+
+async def test_bulk_metadata_refresh_emits_progress_and_updates_cache() -> None:
+ scanner = MockScanner()
+ scanner._cache.raw_data = [
+ {
+ "file_path": "model1.safetensors",
+ "sha256": "hash",
+ "from_civitai": True,
+ "model_name": "Demo",
+ }
+ ]
+ service = MockModelService(scanner)
+ metadata_sync = StubMetadataSync()
+ settings = StubSettings()
+ progress = ProgressCollector()
+
+ use_case = BulkMetadataRefreshUseCase(
+ service=service,
+ metadata_sync=metadata_sync,
+ settings_service=settings,
+ logger=logging.getLogger("test"),
+ )
+
+ result = await use_case.execute_with_error_handling(progress_callback=progress)
+
+ assert result["success"] is True
+ assert progress.events[0]["status"] == "started"
+ assert progress.events[-1]["status"] == "completed"
+ assert metadata_sync.calls
+ assert scanner._cache.resort_calls == 1
+
+
+async def test_bulk_metadata_refresh_reports_errors() -> None:
+ class FailingScanner(MockScanner):
+ async def get_cached_data(self, force_refresh: bool = False):
+ raise RuntimeError("boom")
+
+ service = MockModelService(FailingScanner())
+ metadata_sync = StubMetadataSync()
+ settings = StubSettings()
+ progress = ProgressCollector()
+
+ use_case = BulkMetadataRefreshUseCase(
+ service=service,
+ metadata_sync=metadata_sync,
+ settings_service=settings,
+ logger=logging.getLogger("test"),
+ )
+
+ with pytest.raises(RuntimeError):
+ await use_case.execute_with_error_handling(progress_callback=progress)
+
+ assert progress.events
+ assert progress.events[-1]["status"] == "error"
+ assert progress.events[-1]["error"] == "boom"
+
+
+async def test_download_model_use_case_raises_validation_error() -> None:
+ coordinator = StubDownloadCoordinator(error="validation")
+ use_case = DownloadModelUseCase(download_coordinator=coordinator)
+
+ with pytest.raises(DownloadModelValidationError):
+ await use_case.execute({})
+
+
+async def test_download_model_use_case_raises_early_access() -> None:
+ coordinator = StubDownloadCoordinator(error="401")
+ use_case = DownloadModelUseCase(download_coordinator=coordinator)
+
+ with pytest.raises(DownloadModelEarlyAccessError):
+ await use_case.execute({"model_id": 1})
+
+
+async def test_download_model_use_case_returns_result() -> None:
+ coordinator = StubDownloadCoordinator()
+ use_case = DownloadModelUseCase(download_coordinator=coordinator)
+
+ result = await use_case.execute({"model_id": 1})
+
+ assert result["success"] is True
+ assert result["download_id"] == "abc123"
+
+
+async def test_download_example_images_use_case_triggers_manager() -> None:
+ manager = StubExampleImagesDownloadManager()
+ use_case = DownloadExampleImagesUseCase(download_manager=manager)
+
+ payload = {"optimize": True}
+ result = await use_case.execute(payload)
+
+ assert manager.payloads == [payload]
+ assert result == {"success": True, "message": "ok"}
+
+
+async def test_download_example_images_use_case_maps_in_progress() -> None:
+ manager = StubExampleImagesDownloadManager()
+ manager.error = "in_progress"
+ use_case = DownloadExampleImagesUseCase(download_manager=manager)
+
+ with pytest.raises(DownloadExampleImagesInProgressError) as exc:
+ await use_case.execute({})
+
+ assert exc.value.progress == manager.progress_snapshot
+
+
+async def test_download_example_images_use_case_maps_configuration() -> None:
+ manager = StubExampleImagesDownloadManager()
+ manager.error = "configuration"
+ use_case = DownloadExampleImagesUseCase(download_manager=manager)
+
+ with pytest.raises(DownloadExampleImagesConfigurationError):
+ await use_case.execute({})
+
+
+async def test_download_example_images_use_case_propagates_generic_error() -> None:
+ manager = StubExampleImagesDownloadManager()
+ manager.error = "generic"
+ use_case = DownloadExampleImagesUseCase(download_manager=manager)
+
+ with pytest.raises(ExampleImagesDownloadError):
+ await use_case.execute({})
+
+
+class DummyJsonRequest:
+ def __init__(self, payload: Dict[str, Any]) -> None:
+ self._payload = payload
+ self.content_type = "application/json"
+
+ async def json(self) -> Dict[str, Any]:
+ return self._payload
+
+
+async def test_import_example_images_use_case_delegates() -> None:
+ processor = StubExampleImagesProcessor()
+ use_case = ImportExampleImagesUseCase(processor=processor)
+
+ request = DummyJsonRequest({"model_hash": "abc", "file_paths": ["/tmp/file"]})
+ result = await use_case.execute(request)
+
+ assert processor.calls == [{"model_hash": "abc", "files": ["/tmp/file"]}]
+ assert result == {"success": True}
+
+
+async def test_import_example_images_use_case_maps_validation_error() -> None:
+ processor = StubExampleImagesProcessor()
+ processor.error = "validation"
+ use_case = ImportExampleImagesUseCase(processor=processor)
+ request = DummyJsonRequest({"model_hash": None, "file_paths": []})
+
+ with pytest.raises(ImportExampleImagesValidationError):
+ await use_case.execute(request)
+
+
+async def test_import_example_images_use_case_propagates_generic_error() -> None:
+ processor = StubExampleImagesProcessor()
+ processor.error = "generic"
+ use_case = ImportExampleImagesUseCase(processor=processor)
+ request = DummyJsonRequest({"model_hash": "abc", "file_paths": ["/tmp/file"]})
+
+ with pytest.raises(ExampleImagesImportError):
+ await use_case.execute(request)
diff --git a/tests/services/test_websocket_manager.py b/tests/services/test_websocket_manager.py
new file mode 100644
index 00000000..b85c2197
--- /dev/null
+++ b/tests/services/test_websocket_manager.py
@@ -0,0 +1,84 @@
+from datetime import datetime, timedelta
+
+import pytest
+
+from py.services.websocket_manager import WebSocketManager
+
+
+class DummyWebSocket:
+ def __init__(self):
+ self.messages = []
+ self.closed = False
+
+ async def send_json(self, data):
+ if self.closed:
+ raise RuntimeError("WebSocket closed")
+ self.messages.append(data)
+
+
+@pytest.fixture
+def manager():
+ return WebSocketManager()
+
+
+async def test_broadcast_init_progress_adds_defaults(manager):
+ ws = DummyWebSocket()
+ manager._init_websockets.add(ws)
+
+ await manager.broadcast_init_progress({})
+
+ assert ws.messages == [
+ {
+ "stage": "processing",
+ "progress": 0,
+ "details": "Processing...",
+ }
+ ]
+
+
+async def test_broadcast_download_progress_tracks_state(manager):
+ ws = DummyWebSocket()
+ download_id = "abc"
+ manager._download_websockets[download_id] = ws
+
+ await manager.broadcast_download_progress(download_id, {"progress": 55})
+
+ assert ws.messages == [{"progress": 55}]
+ assert manager.get_download_progress(download_id)["progress"] == 55
+
+
+async def test_broadcast_download_progress_missing_socket(manager):
+ await manager.broadcast_download_progress("missing", {"progress": 30})
+ # Progress should be stored even without a live websocket
+ assert manager.get_download_progress("missing")["progress"] == 30
+
+
+async def test_auto_organize_progress_helpers(manager):
+ payload = {"status": "processing", "progress": 10}
+ await manager.broadcast_auto_organize_progress(payload)
+
+ assert manager.get_auto_organize_progress() == payload
+ assert manager.is_auto_organize_running() is True
+
+ manager.cleanup_auto_organize_progress()
+ assert manager.get_auto_organize_progress() is None
+ assert manager.is_auto_organize_running() is False
+
+
+def test_cleanup_old_downloads(manager):
+ now = datetime.now()
+ manager._download_progress = {
+ "recent": {"progress": 10, "timestamp": now},
+ "stale": {"progress": 100, "timestamp": now - timedelta(hours=48)},
+ }
+
+ manager.cleanup_old_downloads(max_age_hours=24)
+
+ assert "stale" not in manager._download_progress
+ assert "recent" in manager._download_progress
+
+
+def test_generate_download_id(manager):
+ download_id = manager.generate_download_id()
+ assert isinstance(download_id, str)
+ assert download_id
diff --git a/vitest.config.js b/vitest.config.js
new file mode 100644
index 00000000..3d88d268
--- /dev/null
+++ b/vitest.config.js
@@ -0,0 +1,18 @@
+import { defineConfig } from 'vitest/config';
+
+export default defineConfig({
+ test: {
+ environment: 'jsdom',
+ globals: true,
+ setupFiles: ['tests/frontend/setup.js'],
+ include: [
+ 'tests/frontend/**/*.test.js'
+ ],
+ coverage: {
+ enabled: process.env.VITEST_COVERAGE === 'true',
+ provider: 'v8',
+ reporter: ['text', 'lcov', 'json-summary'],
+ reportsDirectory: 'coverage/frontend'
+ }
+ }
+});
diff --git a/web/comfyui/autocomplete.js b/web/comfyui/autocomplete.js
index 61fa1984..790c062a 100644
--- a/web/comfyui/autocomplete.js
+++ b/web/comfyui/autocomplete.js
@@ -2,12 +2,25 @@ import { api } from "../../scripts/api.js";
import { app } from "../../scripts/app.js";
import { TextAreaCaretHelper } from "./textarea_caret_helper.js";
+function parseUsageTipNumber(value) {
+ if (typeof value === 'number' && Number.isFinite(value)) {
+ return value;
+ }
+ if (typeof value === 'string') {
+ const parsed = parseFloat(value);
+ if (Number.isFinite(parsed)) {
+ return parsed;
+ }
+ }
+ return null;
+}
+
class AutoComplete {
constructor(inputElement, modelType = 'loras', options = {}) {
this.inputElement = inputElement;
this.modelType = modelType;
this.options = {
- maxItems: 15,
+ maxItems: 20,
minChars: 1,
debounceDelay: 200,
showPreview: true,
@@ -47,8 +60,6 @@ class AutoComplete {
border-radius: 8px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
display: none;
- font-family: Arial, sans-serif;
- font-size: 14px;
min-width: 200px;
width: auto;
backdrop-filter: blur(8px);
@@ -147,8 +158,8 @@ class AutoComplete {
return '';
}
- // Split on multiple delimiters: comma, space, '>' and other common separators
- const segments = beforeCursor.split(/[,\s>]+/);
+ // Split on comma and '>' delimiters only (do not split on spaces)
+ const segments = beforeCursor.split(/[,\>]+/);
// Return the last non-empty segment as search term
const lastSegment = segments[segments.length - 1] || '';
@@ -158,7 +169,7 @@ class AutoComplete {
async search(term = '') {
try {
this.currentSearchTerm = term;
- const response = await api.fetchApi(`/${this.modelType}/relative-paths?search=${encodeURIComponent(term)}&limit=${this.options.maxItems}`);
+ const response = await api.fetchApi(`/lm/${this.modelType}/relative-paths?search=${encodeURIComponent(term)}&limit=${this.options.maxItems}`);
const data = await response.json();
if (data.success && data.relative_paths && data.relative_paths.length > 0) {
@@ -381,19 +392,30 @@ class AutoComplete {
async insertSelection(relativePath) {
// Extract just the filename for LoRA name
const fileName = relativePath.split(/[/\\]/).pop().replace(/\.(safetensors|ckpt|pt|bin)$/i, '');
-
- // Get usage tips and extract strength
+
+ // Get usage tips and extract strength information
let strength = 1.0; // Default strength
+ let hasStrength = false;
+ let clipStrength = null;
try {
- const response = await api.fetchApi(`/loras/usage-tips-by-path?relative_path=${encodeURIComponent(relativePath)}`);
+ const response = await api.fetchApi(`/lm/loras/usage-tips-by-path?relative_path=${encodeURIComponent(relativePath)}`);
if (response.ok) {
const data = await response.json();
if (data.success && data.usage_tips) {
- // Parse JSON string and extract strength
try {
const usageTips = JSON.parse(data.usage_tips);
- if (usageTips.strength && typeof usageTips.strength === 'number') {
- strength = usageTips.strength;
+ const parsedStrength = parseUsageTipNumber(usageTips.strength);
+ if (parsedStrength !== null) {
+ strength = parsedStrength;
+ hasStrength = true;
+ }
+ const clipSource = usageTips.clip_strength ?? usageTips.clipStrength;
+ const parsedClipStrength = parseUsageTipNumber(clipSource);
+ if (parsedClipStrength !== null) {
+ clipStrength = parsedClipStrength;
+ if (!hasStrength) {
+ strength = 1.0;
+ }
}
} catch (parseError) {
console.warn('Failed to parse usage tips JSON:', parseError);
@@ -403,44 +425,32 @@ class AutoComplete {
} catch (error) {
console.warn('Failed to fetch usage tips:', error);
}
-
- // Format the LoRA code with strength
- const loraCode = `, `;
-
+
+ // Format the LoRA code with strength values
+ const loraCode = clipStrength !== null
+ ? `, `
+ : `, `;
+
const currentValue = this.inputElement.value;
const caretPos = this.getCaretPosition();
- const lastCommaIndex = currentValue.lastIndexOf(',', caretPos - 1);
-
- let newValue;
- let newCaretPos;
-
- if (lastCommaIndex === -1) {
- // No comma found before cursor, replace from start or current search term start
- const searchTerm = this.getSearchTerm(currentValue.substring(0, caretPos));
- const searchStartPos = caretPos - searchTerm.length;
- newValue = currentValue.substring(0, searchStartPos) + loraCode + currentValue.substring(caretPos);
- newCaretPos = searchStartPos + loraCode.length;
- } else {
- // Replace text after last comma before cursor
- const afterCommaPos = lastCommaIndex + 1;
- // Skip whitespace after comma
- let insertPos = afterCommaPos;
- while (insertPos < caretPos && /\s/.test(currentValue[insertPos])) {
- insertPos++;
- }
-
- newValue = currentValue.substring(0, insertPos) + loraCode + currentValue.substring(caretPos);
- newCaretPos = insertPos + loraCode.length;
- }
-
+
+ // Use getSearchTerm to get the current search term before cursor
+ const beforeCursor = currentValue.substring(0, caretPos);
+ const searchTerm = this.getSearchTerm(beforeCursor);
+ const searchStartPos = caretPos - searchTerm.length;
+
+ // Only replace the search term, not everything after the last comma
+ const newValue = currentValue.substring(0, searchStartPos) + loraCode + currentValue.substring(caretPos);
+ const newCaretPos = searchStartPos + loraCode.length;
+
this.inputElement.value = newValue;
-
+
// Trigger input event to notify about the change
const event = new Event('input', { bubbles: true });
this.inputElement.dispatchEvent(event);
-
+
this.hide();
-
+
// Focus back to input and position cursor
this.inputElement.focus();
this.inputElement.setSelectionRange(newCaretPos, newCaretPos);
diff --git a/web/comfyui/legacy_loras_widget.js b/web/comfyui/legacy_loras_widget.js
deleted file mode 100644
index bf2ba96f..00000000
--- a/web/comfyui/legacy_loras_widget.js
+++ /dev/null
@@ -1,978 +0,0 @@
-import { api } from "../../scripts/api.js";
-import { app } from "../../scripts/app.js";
-
-export function addLorasWidget(node, name, opts, callback) {
- // Create container for loras
- const container = document.createElement("div");
- container.className = "comfy-loras-container";
- Object.assign(container.style, {
- display: "flex",
- flexDirection: "column",
- gap: "8px",
- padding: "6px",
- backgroundColor: "rgba(40, 44, 52, 0.6)",
- borderRadius: "6px",
- width: "100%",
- });
-
- // Initialize default value
- const defaultValue = opts?.defaultVal || [];
-
- // Parse LoRA entries from value
- const parseLoraValue = (value) => {
- if (!value) return [];
- return Array.isArray(value) ? value : [];
- };
-
- // Format LoRA data
- const formatLoraValue = (loras) => {
- return loras;
- };
-
- // Function to create toggle element
- const createToggle = (active, onChange) => {
- const toggle = document.createElement("div");
- toggle.className = "comfy-lora-toggle";
-
- updateToggleStyle(toggle, active);
-
- toggle.addEventListener("click", (e) => {
- e.stopPropagation();
- onChange(!active);
- });
-
- return toggle;
- };
-
- // Helper function to update toggle style
- function updateToggleStyle(toggleEl, active) {
- Object.assign(toggleEl.style, {
- width: "18px",
- height: "18px",
- borderRadius: "4px",
- cursor: "pointer",
- transition: "all 0.2s ease",
- backgroundColor: active ? "rgba(66, 153, 225, 0.9)" : "rgba(45, 55, 72, 0.7)",
- border: `1px solid ${active ? "rgba(66, 153, 225, 0.9)" : "rgba(226, 232, 240, 0.2)"}`,
- });
-
- // Add hover effect
- toggleEl.onmouseenter = () => {
- toggleEl.style.transform = "scale(1.05)";
- toggleEl.style.boxShadow = "0 2px 4px rgba(0,0,0,0.15)";
- };
-
- toggleEl.onmouseleave = () => {
- toggleEl.style.transform = "scale(1)";
- toggleEl.style.boxShadow = "none";
- };
- }
-
- // Create arrow button for strength adjustment
- const createArrowButton = (direction, onClick) => {
- const button = document.createElement("div");
- button.className = `comfy-lora-arrow comfy-lora-arrow-${direction}`;
-
- Object.assign(button.style, {
- width: "16px",
- height: "16px",
- display: "flex",
- alignItems: "center",
- justifyContent: "center",
- cursor: "pointer",
- userSelect: "none",
- fontSize: "12px",
- color: "rgba(226, 232, 240, 0.8)",
- transition: "all 0.2s ease",
- });
-
- button.textContent = direction === "left" ? "◀" : "▶";
-
- button.addEventListener("click", (e) => {
- e.stopPropagation();
- onClick();
- });
-
- // Add hover effect
- button.onmouseenter = () => {
- button.style.color = "white";
- button.style.transform = "scale(1.2)";
- };
-
- button.onmouseleave = () => {
- button.style.color = "rgba(226, 232, 240, 0.8)";
- button.style.transform = "scale(1)";
- };
-
- return button;
- };
-
- // 添加预览弹窗组件
- class PreviewTooltip {
- constructor() {
- this.element = document.createElement('div');
- Object.assign(this.element.style, {
- position: 'fixed',
- zIndex: 9999,
- background: 'rgba(0, 0, 0, 0.85)',
- borderRadius: '6px',
- boxShadow: '0 4px 12px rgba(0, 0, 0, 0.3)',
- display: 'none',
- overflow: 'hidden',
- maxWidth: '300px',
- });
- document.body.appendChild(this.element);
- this.hideTimeout = null; // 添加超时处理变量
-
- // 添加全局点击事件来隐藏tooltip
- document.addEventListener('click', () => this.hide());
-
- // 添加滚动事件监听
- document.addEventListener('scroll', () => this.hide(), true);
- }
-
- async show(loraName, x, y) {
- try {
- // 清除之前的隐藏定时器
- if (this.hideTimeout) {
- clearTimeout(this.hideTimeout);
- this.hideTimeout = null;
- }
-
- // 如果已经显示同一个lora的预览,则不重复显示
- if (this.element.style.display === 'block' && this.currentLora === loraName) {
- return;
- }
-
- this.currentLora = loraName;
-
- // 获取预览URL
- const response = await api.fetchApi(`/loras/preview-url?name=${encodeURIComponent(loraName)}`, {
- method: 'GET'
- });
-
- if (!response.ok) {
- throw new Error('Failed to fetch preview URL');
- }
-
- const data = await response.json();
- if (!data.success || !data.preview_url) {
- throw new Error('No preview available');
- }
-
- // 清除现有内容
- while (this.element.firstChild) {
- this.element.removeChild(this.element.firstChild);
- }
-
- // Create media container with relative positioning
- const mediaContainer = document.createElement('div');
- Object.assign(mediaContainer.style, {
- position: 'relative',
- maxWidth: '300px',
- maxHeight: '300px',
- });
-
- const isVideo = data.preview_url.endsWith('.mp4');
- const mediaElement = isVideo ? document.createElement('video') : document.createElement('img');
-
- Object.assign(mediaElement.style, {
- maxWidth: '300px',
- maxHeight: '300px',
- objectFit: 'contain',
- display: 'block',
- });
-
- if (isVideo) {
- mediaElement.autoplay = true;
- mediaElement.loop = true;
- mediaElement.muted = true;
- mediaElement.controls = false;
- }
-
- mediaElement.src = data.preview_url;
-
- // Create name label with absolute positioning
- const nameLabel = document.createElement('div');
- nameLabel.textContent = loraName;
- Object.assign(nameLabel.style, {
- position: 'absolute',
- bottom: '0',
- left: '0',
- right: '0',
- padding: '8px',
- color: 'rgba(255, 255, 255, 0.95)',
- fontSize: '13px',
- fontFamily: "'Inter', 'Segoe UI', system-ui, -apple-system, sans-serif",
- background: 'linear-gradient(transparent, rgba(0, 0, 0, 0.8))',
- whiteSpace: 'nowrap',
- overflow: 'hidden',
- textOverflow: 'ellipsis',
- textAlign: 'center',
- backdropFilter: 'blur(4px)',
- WebkitBackdropFilter: 'blur(4px)',
- });
-
- mediaContainer.appendChild(mediaElement);
- mediaContainer.appendChild(nameLabel);
- this.element.appendChild(mediaContainer);
-
- // 添加淡入效果
- this.element.style.opacity = '0';
- this.element.style.display = 'block';
- this.position(x, y);
-
- requestAnimationFrame(() => {
- this.element.style.transition = 'opacity 0.15s ease';
- this.element.style.opacity = '1';
- });
- } catch (error) {
- console.warn('Failed to load preview:', error);
- }
- }
-
- position(x, y) {
- // 确保预览框不超出视窗边界
- const rect = this.element.getBoundingClientRect();
- const viewportWidth = window.innerWidth;
- const viewportHeight = window.innerHeight;
-
- let left = x + 10; // 默认在鼠标右侧偏移10px
- let top = y + 10; // 默认在鼠标下方偏移10px
-
- // 检查右边界
- if (left + rect.width > viewportWidth) {
- left = x - rect.width - 10;
- }
-
- // 检查下边界
- if (top + rect.height > viewportHeight) {
- top = y - rect.height - 10;
- }
-
- Object.assign(this.element.style, {
- left: `${left}px`,
- top: `${top}px`
- });
- }
-
- hide() {
- // 使用淡出效果
- if (this.element.style.display === 'block') {
- this.element.style.opacity = '0';
- this.hideTimeout = setTimeout(() => {
- this.element.style.display = 'none';
- this.currentLora = null;
- // 停止视频播放
- const video = this.element.querySelector('video');
- if (video) {
- video.pause();
- }
- this.hideTimeout = null;
- }, 150);
- }
- }
-
- cleanup() {
- if (this.hideTimeout) {
- clearTimeout(this.hideTimeout);
- }
- // 移除所有事件监听器
- document.removeEventListener('click', () => this.hide());
- document.removeEventListener('scroll', () => this.hide(), true);
- this.element.remove();
- }
- }
-
- // 创建预览tooltip实例
- const previewTooltip = new PreviewTooltip();
-
- // Function to handle strength adjustment via dragging
- const handleStrengthDrag = (name, initialStrength, initialX, event, widget) => {
- // Calculate drag sensitivity (how much the strength changes per pixel)
- // Using 0.01 per 10 pixels of movement
- const sensitivity = 0.001;
-
- // Get the current mouse position
- const currentX = event.clientX;
-
- // Calculate the distance moved
- const deltaX = currentX - initialX;
-
- // Calculate the new strength value based on movement
- // Moving right increases, moving left decreases
- let newStrength = Number(initialStrength) + (deltaX * sensitivity);
-
- // Limit the strength to reasonable bounds (now between -10 and 10)
- newStrength = Math.max(-10, Math.min(10, newStrength));
- newStrength = Number(newStrength.toFixed(2));
-
- // Update the lora data
- const lorasData = parseLoraValue(widget.value);
- const loraIndex = lorasData.findIndex(l => l.name === name);
-
- if (loraIndex >= 0) {
- lorasData[loraIndex].strength = newStrength;
-
- // Update the widget value
- widget.value = formatLoraValue(lorasData);
-
- // Force re-render to show updated strength value
- renderLoras(widget.value, widget);
- }
- };
-
- // Function to initialize drag operation
- const initDrag = (loraEl, nameEl, name, widget) => {
- let isDragging = false;
- let initialX = 0;
- let initialStrength = 0;
-
- // Create a style element for drag cursor override if it doesn't exist
- if (!document.getElementById('comfy-lora-drag-style')) {
- const styleEl = document.createElement('style');
- styleEl.id = 'comfy-lora-drag-style';
- styleEl.textContent = `
- body.comfy-lora-dragging,
- body.comfy-lora-dragging * {
- cursor: ew-resize !important;
- }
- `;
- document.head.appendChild(styleEl);
- }
-
- // Create a drag handler that's applied to the entire lora entry
- // except toggle and strength controls
- loraEl.addEventListener('mousedown', (e) => {
- // Skip if clicking on toggle or strength control areas
- if (e.target.closest('.comfy-lora-toggle') ||
- e.target.closest('input') ||
- e.target.closest('.comfy-lora-arrow')) {
- return;
- }
-
- // Store initial values
- const lorasData = parseLoraValue(widget.value);
- const loraData = lorasData.find(l => l.name === name);
-
- if (!loraData) return;
-
- initialX = e.clientX;
- initialStrength = loraData.strength;
- isDragging = true;
-
- // Add class to body to enforce cursor style globally
- document.body.classList.add('comfy-lora-dragging');
-
- // Prevent text selection during drag
- e.preventDefault();
- });
-
- // Use the document for move and up events to ensure drag continues
- // even if mouse leaves the element
- document.addEventListener('mousemove', (e) => {
- if (!isDragging) return;
-
- // Call the strength adjustment function
- handleStrengthDrag(name, initialStrength, initialX, e, widget);
-
- // Prevent showing the preview tooltip during drag
- previewTooltip.hide();
- });
-
- document.addEventListener('mouseup', () => {
- if (isDragging) {
- isDragging = false;
- // Remove the class to restore normal cursor behavior
- document.body.classList.remove('comfy-lora-dragging');
- }
- });
- };
-
- // Function to create menu item
- const createMenuItem = (text, icon, onClick) => {
- const menuItem = document.createElement('div');
- Object.assign(menuItem.style, {
- padding: '6px 20px',
- cursor: 'pointer',
- color: 'rgba(226, 232, 240, 0.9)',
- fontSize: '13px',
- userSelect: 'none',
- display: 'flex',
- alignItems: 'center',
- gap: '8px',
- });
-
- // Create icon element
- const iconEl = document.createElement('div');
- iconEl.innerHTML = icon;
- Object.assign(iconEl.style, {
- width: '14px',
- height: '14px',
- display: 'flex',
- alignItems: 'center',
- justifyContent: 'center',
- });
-
- // Create text element
- const textEl = document.createElement('span');
- textEl.textContent = text;
-
- menuItem.appendChild(iconEl);
- menuItem.appendChild(textEl);
-
- menuItem.addEventListener('mouseenter', () => {
- menuItem.style.backgroundColor = 'rgba(66, 153, 225, 0.2)';
- });
-
- menuItem.addEventListener('mouseleave', () => {
- menuItem.style.backgroundColor = 'transparent';
- });
-
- if (onClick) {
- menuItem.addEventListener('click', onClick);
- }
-
- return menuItem;
- };
-
- // Function to create context menu
- const createContextMenu = (x, y, loraName, widget) => {
- // Hide preview tooltip first
- previewTooltip.hide();
-
- // Remove existing context menu if any
- const existingMenu = document.querySelector('.comfy-lora-context-menu');
- if (existingMenu) {
- existingMenu.remove();
- }
-
- const menu = document.createElement('div');
- menu.className = 'comfy-lora-context-menu';
- Object.assign(menu.style, {
- position: 'fixed',
- left: `${x}px`,
- top: `${y}px`,
- backgroundColor: 'rgba(30, 30, 30, 0.95)',
- border: '1px solid rgba(255, 255, 255, 0.1)',
- borderRadius: '4px',
- padding: '4px 0',
- zIndex: 1000,
- boxShadow: '0 2px 10px rgba(0,0,0,0.2)',
- minWidth: '180px',
- });
-
- // View on Civitai option with globe icon
- const viewOnCivitaiOption = createMenuItem(
- 'View on Civitai',
- ' ',
- async () => {
- menu.remove();
- document.removeEventListener('click', closeMenu);
-
- try {
- // Get Civitai URL from API
- const response = await api.fetchApi(`/loras/civitai-url?name=${encodeURIComponent(loraName)}`, {
- method: 'GET'
- });
-
- if (!response.ok) {
- const errorText = await response.text();
- throw new Error(errorText || 'Failed to get Civitai URL');
- }
-
- const data = await response.json();
- if (data.success && data.civitai_url) {
- // Open the URL in a new tab
- window.open(data.civitai_url, '_blank');
- } else {
- // Show error message if no Civitai URL
- if (app && app.extensionManager && app.extensionManager.toast) {
- app.extensionManager.toast.add({
- severity: 'warning',
- summary: 'Not Found',
- detail: 'This LoRA has no associated Civitai URL',
- life: 3000
- });
- } else {
- alert('This LoRA has no associated Civitai URL');
- }
- }
- } catch (error) {
- console.error('Error getting Civitai URL:', error);
- if (app && app.extensionManager && app.extensionManager.toast) {
- app.extensionManager.toast.add({
- severity: 'error',
- summary: 'Error',
- detail: error.message || 'Failed to get Civitai URL',
- life: 5000
- });
- } else {
- alert('Error: ' + (error.message || 'Failed to get Civitai URL'));
- }
- }
- }
- );
-
- // Delete option with trash icon
- const deleteOption = createMenuItem(
- 'Delete',
- ' ',
- () => {
- menu.remove();
- document.removeEventListener('click', closeMenu);
-
- const lorasData = parseLoraValue(widget.value).filter(l => l.name !== loraName);
- widget.value = formatLoraValue(lorasData);
-
- if (widget.callback) {
- widget.callback(widget.value);
- }
- }
- );
-
- // Save recipe option with bookmark icon
- const saveOption = createMenuItem(
- 'Save Recipe',
- ' ',
- () => {
- menu.remove();
- document.removeEventListener('click', closeMenu);
- saveRecipeDirectly(widget);
- }
- );
-
- // Add separator
- const separator = document.createElement('div');
- Object.assign(separator.style, {
- margin: '4px 0',
- borderTop: '1px solid rgba(255, 255, 255, 0.1)',
- });
-
- menu.appendChild(viewOnCivitaiOption); // Add the new menu option
- menu.appendChild(deleteOption);
- menu.appendChild(separator);
- menu.appendChild(saveOption);
-
- document.body.appendChild(menu);
-
- // Close menu when clicking outside
- const closeMenu = (e) => {
- if (!menu.contains(e.target)) {
- menu.remove();
- document.removeEventListener('click', closeMenu);
- }
- };
- setTimeout(() => document.addEventListener('click', closeMenu), 0);
- };
-
- // Function to render loras from data
- const renderLoras = (value, widget) => {
- // Clear existing content
- while (container.firstChild) {
- container.removeChild(container.firstChild);
- }
-
- // Parse the loras data
- const lorasData = parseLoraValue(value);
-
- if (lorasData.length === 0) {
- // Show message when no loras are added
- const emptyMessage = document.createElement("div");
- emptyMessage.textContent = "No LoRAs added";
- Object.assign(emptyMessage.style, {
- textAlign: "center",
- padding: "20px 0",
- color: "rgba(226, 232, 240, 0.8)",
- fontStyle: "italic",
- userSelect: "none", // Add this line to prevent text selection
- WebkitUserSelect: "none", // For Safari support
- MozUserSelect: "none", // For Firefox support
- msUserSelect: "none", // For IE/Edge support
- });
- container.appendChild(emptyMessage);
- return;
- }
-
- // Create header
- const header = document.createElement("div");
- header.className = "comfy-loras-header";
- Object.assign(header.style, {
- display: "flex",
- justifyContent: "space-between",
- alignItems: "center",
- padding: "4px 8px",
- borderBottom: "1px solid rgba(226, 232, 240, 0.2)",
- marginBottom: "8px"
- });
-
- // Add toggle all control
- const allActive = lorasData.every(lora => lora.active);
- const toggleAll = createToggle(allActive, (active) => {
- // Update all loras active state
- const lorasData = parseLoraValue(widget.value);
- lorasData.forEach(lora => lora.active = active);
-
- const newValue = formatLoraValue(lorasData);
- widget.value = newValue;
- });
-
- // Add label to toggle all
- const toggleLabel = document.createElement("div");
- toggleLabel.textContent = "Toggle All";
- Object.assign(toggleLabel.style, {
- color: "rgba(226, 232, 240, 0.8)",
- fontSize: "13px",
- marginLeft: "8px",
- userSelect: "none", // Add this line to prevent text selection
- WebkitUserSelect: "none", // For Safari support
- MozUserSelect: "none", // For Firefox support
- msUserSelect: "none", // For IE/Edge support
- });
-
- const toggleContainer = document.createElement("div");
- Object.assign(toggleContainer.style, {
- display: "flex",
- alignItems: "center",
- });
- toggleContainer.appendChild(toggleAll);
- toggleContainer.appendChild(toggleLabel);
-
- // Strength label
- const strengthLabel = document.createElement("div");
- strengthLabel.textContent = "Strength";
- Object.assign(strengthLabel.style, {
- color: "rgba(226, 232, 240, 0.8)",
- fontSize: "13px",
- marginRight: "8px",
- userSelect: "none", // Add this line to prevent text selection
- WebkitUserSelect: "none", // For Safari support
- MozUserSelect: "none", // For Firefox support
- msUserSelect: "none", // For IE/Edge support
- });
-
- header.appendChild(toggleContainer);
- header.appendChild(strengthLabel);
- container.appendChild(header);
-
- // Render each lora entry
- lorasData.forEach((loraData) => {
- const { name, strength, active } = loraData;
-
- const loraEl = document.createElement("div");
- loraEl.className = "comfy-lora-entry";
- Object.assign(loraEl.style, {
- display: "flex",
- justifyContent: "space-between",
- alignItems: "center",
- padding: "8px",
- borderRadius: "6px",
- backgroundColor: active ? "rgba(45, 55, 72, 0.7)" : "rgba(35, 40, 50, 0.5)",
- transition: "all 0.2s ease",
- marginBottom: "6px",
- });
-
- // Create toggle for this lora
- const toggle = createToggle(active, (newActive) => {
- // Update this lora's active state
- const lorasData = parseLoraValue(widget.value);
- const loraIndex = lorasData.findIndex(l => l.name === name);
-
- if (loraIndex >= 0) {
- lorasData[loraIndex].active = newActive;
-
- const newValue = formatLoraValue(lorasData);
- widget.value = newValue;
- }
- });
-
- // Create name display
- const nameEl = document.createElement("div");
- nameEl.textContent = name;
- Object.assign(nameEl.style, {
- marginLeft: "10px",
- flex: "1",
- overflow: "hidden",
- textOverflow: "ellipsis",
- whiteSpace: "nowrap",
- color: active ? "rgba(226, 232, 240, 0.9)" : "rgba(226, 232, 240, 0.6)",
- fontSize: "13px",
- cursor: "pointer", // Add pointer cursor to indicate hoverable area
- userSelect: "none", // Add this line to prevent text selection
- WebkitUserSelect: "none", // For Safari support
- MozUserSelect: "none", // For Firefox support
- msUserSelect: "none", // For IE/Edge support
- });
-
- // Move preview tooltip events to nameEl instead of loraEl
- nameEl.addEventListener('mouseenter', async (e) => {
- e.stopPropagation();
- const rect = nameEl.getBoundingClientRect();
- await previewTooltip.show(name, rect.right, rect.top);
- });
-
- nameEl.addEventListener('mouseleave', (e) => {
- e.stopPropagation();
- previewTooltip.hide();
- });
-
- // Remove the preview tooltip events from loraEl
- loraEl.onmouseenter = () => {
- loraEl.style.backgroundColor = active ? "rgba(50, 60, 80, 0.8)" : "rgba(40, 45, 55, 0.6)";
- };
-
- loraEl.onmouseleave = () => {
- loraEl.style.backgroundColor = active ? "rgba(45, 55, 72, 0.7)" : "rgba(35, 40, 50, 0.5)";
- };
-
- // Add context menu event
- loraEl.addEventListener('contextmenu', (e) => {
- e.preventDefault();
- e.stopPropagation();
- createContextMenu(e.clientX, e.clientY, name, widget);
- });
-
- // Create strength control
- const strengthControl = document.createElement("div");
- Object.assign(strengthControl.style, {
- display: "flex",
- alignItems: "center",
- gap: "8px",
- });
-
- // Left arrow
- const leftArrow = createArrowButton("left", () => {
- // Decrease strength
- const lorasData = parseLoraValue(widget.value);
- const loraIndex = lorasData.findIndex(l => l.name === name);
-
- if (loraIndex >= 0) {
- lorasData[loraIndex].strength = (lorasData[loraIndex].strength - 0.05).toFixed(2);
-
- const newValue = formatLoraValue(lorasData);
- widget.value = newValue;
- }
- });
-
- // Strength display
- const strengthEl = document.createElement("input");
- strengthEl.type = "text";
- strengthEl.value = typeof strength === 'number' ? strength.toFixed(2) : Number(strength).toFixed(2);
- Object.assign(strengthEl.style, {
- minWidth: "50px",
- width: "50px",
- textAlign: "center",
- color: active ? "rgba(226, 232, 240, 0.9)" : "rgba(226, 232, 240, 0.6)",
- fontSize: "13px",
- background: "none",
- border: "1px solid transparent",
- padding: "2px 4px",
- borderRadius: "3px",
- outline: "none",
- });
-
- // 添加hover效果
- strengthEl.addEventListener('mouseenter', () => {
- strengthEl.style.border = "1px solid rgba(226, 232, 240, 0.2)";
- });
-
- strengthEl.addEventListener('mouseleave', () => {
- if (document.activeElement !== strengthEl) {
- strengthEl.style.border = "1px solid transparent";
- }
- });
-
- // 处理焦点
- strengthEl.addEventListener('focus', () => {
- strengthEl.style.border = "1px solid rgba(66, 153, 225, 0.6)";
- strengthEl.style.background = "rgba(0, 0, 0, 0.2)";
- // 自动选中所有内容
- strengthEl.select();
- });
-
- strengthEl.addEventListener('blur', () => {
- strengthEl.style.border = "1px solid transparent";
- strengthEl.style.background = "none";
- });
-
- // 处理输入变化
- strengthEl.addEventListener('change', () => {
- let newValue = parseFloat(strengthEl.value);
-
- // 验证输入
- if (isNaN(newValue)) {
- newValue = 1.0;
- }
-
- // 更新数值
- const lorasData = parseLoraValue(widget.value);
- const loraIndex = lorasData.findIndex(l => l.name === name);
-
- if (loraIndex >= 0) {
- lorasData[loraIndex].strength = newValue.toFixed(2);
-
- // 更新值并触发回调
- const newLorasValue = formatLoraValue(lorasData);
- widget.value = newLorasValue;
- }
- });
-
- // 处理按键事件
- strengthEl.addEventListener('keydown', (e) => {
- if (e.key === 'Enter') {
- strengthEl.blur();
- }
- });
-
- // Right arrow
- const rightArrow = createArrowButton("right", () => {
- // Increase strength
- const lorasData = parseLoraValue(widget.value);
- const loraIndex = lorasData.findIndex(l => l.name === name);
-
- if (loraIndex >= 0) {
- lorasData[loraIndex].strength = (parseFloat(lorasData[loraIndex].strength) + 0.05).toFixed(2);
-
- const newValue = formatLoraValue(lorasData);
- widget.value = newValue;
- }
- });
-
- strengthControl.appendChild(leftArrow);
- strengthControl.appendChild(strengthEl);
- strengthControl.appendChild(rightArrow);
-
- // Assemble entry
- const leftSection = document.createElement("div");
- Object.assign(leftSection.style, {
- display: "flex",
- alignItems: "center",
- flex: "1",
- minWidth: "0", // Allow shrinking
- });
-
- leftSection.appendChild(toggle);
- leftSection.appendChild(nameEl);
-
- loraEl.appendChild(leftSection);
- loraEl.appendChild(strengthControl);
-
- container.appendChild(loraEl);
-
- // Initialize drag functionality
- initDrag(loraEl, nameEl, name, widget);
- });
- };
-
- // Store the value in a variable to avoid recursion
- let widgetValue = defaultValue;
-
- // Create widget with initial properties
- const widget = node.addDOMWidget(name, "loras", container, {
- getValue: function() {
- return widgetValue;
- },
- setValue: function(v) {
- // Remove duplicates by keeping the last occurrence of each lora name
- const uniqueValue = (v || []).reduce((acc, lora) => {
- // Remove any existing lora with the same name
- const filtered = acc.filter(l => l.name !== lora.name);
- // Add the current lora
- return [...filtered, lora];
- }, []);
-
- widgetValue = uniqueValue;
- renderLoras(widgetValue, widget);
-
- // Update container height after rendering
- requestAnimationFrame(() => {
- const minHeight = this.getMinHeight();
- container.style.height = `${minHeight}px`;
-
- // Force node to update size
- node.setSize([node.size[0], node.computeSize()[1]]);
- node.setDirtyCanvas(true, true);
- });
- },
- getMinHeight: function() {
- // Calculate height based on content
- const lorasCount = parseLoraValue(widgetValue).length;
- return Math.max(
- 100,
- lorasCount > 0 ? 60 + lorasCount * 44 : 60
- );
- },
- });
-
- widget.value = defaultValue;
-
- widget.callback = callback;
-
- widget.serializeValue = () => {
- // Add dummy items to avoid the 2-element serialization issue, a bug in comfyui
- return [...widgetValue,
- { name: "__dummy_item1__", strength: 0, active: false, _isDummy: true },
- { name: "__dummy_item2__", strength: 0, active: false, _isDummy: true }
- ];
- }
-
- widget.onRemove = () => {
- container.remove();
- previewTooltip.cleanup();
- };
-
- return { minWidth: 400, minHeight: 200, widget };
-}
-
-// Function to directly save the recipe without dialog
-async function saveRecipeDirectly(widget) {
- try {
- // Show loading toast
- if (app && app.extensionManager && app.extensionManager.toast) {
- app.extensionManager.toast.add({
- severity: 'info',
- summary: 'Saving Recipe',
- detail: 'Please wait...',
- life: 2000
- });
- }
-
- // Send the request
- const response = await fetch('/api/recipes/save-from-widget', {
- method: 'POST'
- });
-
- const result = await response.json();
-
- // Show result toast
- if (app && app.extensionManager && app.extensionManager.toast) {
- if (result.success) {
- app.extensionManager.toast.add({
- severity: 'success',
- summary: 'Recipe Saved',
- detail: 'Recipe has been saved successfully',
- life: 3000
- });
- } else {
- app.extensionManager.toast.add({
- severity: 'error',
- summary: 'Error',
- detail: result.error || 'Failed to save recipe',
- life: 5000
- });
- }
- }
- } catch (error) {
- console.error('Error saving recipe:', error);
-
- // Show error toast
- if (app && app.extensionManager && app.extensionManager.toast) {
- app.extensionManager.toast.add({
- severity: 'error',
- summary: 'Error',
- detail: 'Failed to save recipe: ' + (error.message || 'Unknown error'),
- life: 5000
- });
- }
- }
-}
diff --git a/web/comfyui/legacy_tags_widget.js b/web/comfyui/legacy_tags_widget.js
deleted file mode 100644
index d43cb016..00000000
--- a/web/comfyui/legacy_tags_widget.js
+++ /dev/null
@@ -1,193 +0,0 @@
-export function addTagsWidget(node, name, opts, callback) {
- // Create container for tags
- const container = document.createElement("div");
- container.className = "comfy-tags-container";
- Object.assign(container.style, {
- display: "flex",
- flexWrap: "wrap",
- gap: "4px", // 从8px减小到4px
- padding: "6px",
- minHeight: "30px",
- backgroundColor: "rgba(40, 44, 52, 0.6)", // Darker, more modern background
- borderRadius: "6px", // Slightly larger radius
- width: "100%",
- });
-
- // Initialize default value as array
- const initialTagsData = opts?.defaultVal || [];
-
- // Function to render tags from array data
- const renderTags = (tagsData, widget) => {
- // Clear existing tags
- while (container.firstChild) {
- container.removeChild(container.firstChild);
- }
-
- const normalizedTags = tagsData;
-
- if (normalizedTags.length === 0) {
- // Show message when no tags are present
- const emptyMessage = document.createElement("div");
- emptyMessage.textContent = "No trigger words detected";
- Object.assign(emptyMessage.style, {
- textAlign: "center",
- padding: "20px 0",
- color: "rgba(226, 232, 240, 0.8)",
- fontStyle: "italic",
- userSelect: "none",
- WebkitUserSelect: "none",
- MozUserSelect: "none",
- msUserSelect: "none",
- });
- container.appendChild(emptyMessage);
- return;
- }
-
- normalizedTags.forEach((tagData, index) => {
- const { text, active } = tagData;
- const tagEl = document.createElement("div");
- tagEl.className = "comfy-tag";
-
- updateTagStyle(tagEl, active);
-
- tagEl.textContent = text;
- tagEl.title = text; // Set tooltip for full content
-
- // Add click handler to toggle state
- tagEl.addEventListener("click", (e) => {
- e.stopPropagation();
-
- // Toggle active state for this specific tag using its index
- const updatedTags = [...widget.value];
- updatedTags[index].active = !updatedTags[index].active;
- updateTagStyle(tagEl, updatedTags[index].active);
-
- widget.value = updatedTags;
- });
-
- container.appendChild(tagEl);
- });
- };
-
- // Helper function to update tag style based on active state
- function updateTagStyle(tagEl, active) {
- const baseStyles = {
- padding: "4px 12px", // 垂直内边距从6px减小到4px
- borderRadius: "6px", // Matching container radius
- maxWidth: "200px", // Increased max width
- overflow: "hidden",
- textOverflow: "ellipsis",
- whiteSpace: "nowrap",
- fontSize: "13px", // Slightly larger font
- cursor: "pointer",
- transition: "all 0.2s ease", // Smoother transition
- border: "1px solid transparent",
- display: "inline-block",
- boxShadow: "0 1px 2px rgba(0,0,0,0.1)",
- margin: "2px", // 从4px减小到2px
- userSelect: "none", // Add this line to prevent text selection
- WebkitUserSelect: "none", // For Safari support
- MozUserSelect: "none", // For Firefox support
- msUserSelect: "none", // For IE/Edge support
- };
-
- if (active) {
- Object.assign(tagEl.style, {
- ...baseStyles,
- backgroundColor: "rgba(66, 153, 225, 0.9)", // Modern blue
- color: "white",
- borderColor: "rgba(66, 153, 225, 0.9)",
- });
- } else {
- Object.assign(tagEl.style, {
- ...baseStyles,
- backgroundColor: "rgba(45, 55, 72, 0.7)", // Darker inactive state
- color: "rgba(226, 232, 240, 0.8)", // Lighter text for contrast
- borderColor: "rgba(226, 232, 240, 0.2)",
- });
- }
-
- // Add hover effect
- tagEl.onmouseenter = () => {
- tagEl.style.transform = "translateY(-1px)";
- tagEl.style.boxShadow = "0 2px 4px rgba(0,0,0,0.15)";
- };
-
- tagEl.onmouseleave = () => {
- tagEl.style.transform = "translateY(0)";
- tagEl.style.boxShadow = "0 1px 2px rgba(0,0,0,0.1)";
- };
- }
-
- // Store the value as array
- let widgetValue = initialTagsData;
-
- // Create widget with initial properties
- const widget = node.addDOMWidget(name, "tags", container, {
- getValue: function() {
- return widgetValue;
- },
- setValue: function(v) {
- widgetValue = v;
- renderTags(widgetValue, widget);
-
- // Update container height after rendering
- requestAnimationFrame(() => {
- const minHeight = this.getMinHeight();
- container.style.height = `${minHeight}px`;
-
- // Force node to update size
- node.setSize([node.size[0], node.computeSize()[1]]);
- node.setDirtyCanvas(true, true);
- });
- },
- getMinHeight: function() {
- const minHeight = 150;
- // If no tags or only showing the empty message, return a minimum height
- if (widgetValue.length === 0) {
- return minHeight; // Height for empty state with message
- }
-
- // Get all tag elements
- const tagElements = container.querySelectorAll('.comfy-tag');
-
- if (tagElements.length === 0) {
- return minHeight; // Fallback if elements aren't rendered yet
- }
-
- // Calculate the actual height based on tag positions
- let maxBottom = 0;
-
- tagElements.forEach(tag => {
- const rect = tag.getBoundingClientRect();
- const tagBottom = rect.bottom - container.getBoundingClientRect().top;
- maxBottom = Math.max(maxBottom, tagBottom);
- });
-
- // Add padding (top and bottom padding of container)
- const computedStyle = window.getComputedStyle(container);
- const paddingTop = parseInt(computedStyle.paddingTop, 10) || 0;
- const paddingBottom = parseInt(computedStyle.paddingBottom, 10) || 0;
-
- // Add extra buffer for potential wrapping issues and to ensure no clipping
- const extraBuffer = 20;
-
- // Round up to nearest 5px for clean sizing and ensure minimum height
- return Math.max(minHeight, Math.ceil((maxBottom + paddingBottom + extraBuffer) / 5) * 5);
- },
- });
-
- widget.value = initialTagsData;
-
- widget.callback = callback;
-
- widget.serializeValue = () => {
- // Add dummy items to avoid the 2-element serialization issue, a bug in comfyui
- return [...widgetValue,
- { text: "__dummy_item__", active: false, _isDummy: true },
- { text: "__dummy_item__", active: false, _isDummy: true }
- ];
- };
-
- return { minWidth: 300, minHeight: 150, widget };
-}
diff --git a/web/comfyui/lora_loader.js b/web/comfyui/lora_loader.js
index aa62252f..f13f48a6 100644
--- a/web/comfyui/lora_loader.js
+++ b/web/comfyui/lora_loader.js
@@ -176,32 +176,6 @@ app.registerExtension({
inputWidget,
originalCallback
);
-
- // Register this node with the backend
- this.registerNode = async () => {
- try {
- await fetch("/api/register-node", {
- method: "POST",
- headers: {
- "Content-Type": "application/json",
- },
- body: JSON.stringify({
- node_id: this.id,
- bgcolor: this.bgcolor,
- title: this.title,
- graph_id: this.graph.id,
- }),
- });
- } catch (error) {
- console.warn("Failed to register node:", error);
- }
- };
-
- // Ensure the node is registered after creation
- // Call registration
- // setTimeout(() => {
- // this.registerNode();
- // }, 0);
});
}
},
diff --git a/web/comfyui/lora_stacker.js b/web/comfyui/lora_stacker.js
index 9b812926..5648891d 100644
--- a/web/comfyui/lora_stacker.js
+++ b/web/comfyui/lora_stacker.js
@@ -101,31 +101,6 @@ app.registerExtension({
inputWidget,
originalCallback
);
-
- // Register this node with the backend
- this.registerNode = async () => {
- try {
- await fetch("/api/register-node", {
- method: "POST",
- headers: {
- "Content-Type": "application/json",
- },
- body: JSON.stringify({
- node_id: this.id,
- bgcolor: this.bgcolor,
- title: this.title,
- graph_id: this.graph.id,
- }),
- });
- } catch (error) {
- console.warn("Failed to register node:", error);
- }
- };
-
- // Call registration
- // setTimeout(() => {
- // this.registerNode();
- // }, 0);
});
}
},
diff --git a/web/comfyui/loras_widget_components.js b/web/comfyui/loras_widget_components.js
index fcde1972..fc40dce3 100644
--- a/web/comfyui/loras_widget_components.js
+++ b/web/comfyui/loras_widget_components.js
@@ -269,7 +269,7 @@ export class PreviewTooltip {
this.currentLora = loraName;
// Get preview URL
- const response = await api.fetchApi(`/loras/preview-url?name=${encodeURIComponent(loraName)}`, {
+ const response = await api.fetchApi(`/lm/loras/preview-url?name=${encodeURIComponent(loraName)}`, {
method: 'GET'
});
diff --git a/web/comfyui/loras_widget_events.js b/web/comfyui/loras_widget_events.js
index fa0d5633..85564891 100644
--- a/web/comfyui/loras_widget_events.js
+++ b/web/comfyui/loras_widget_events.js
@@ -491,7 +491,7 @@ export function createContextMenu(x, y, loraName, widget, previewTooltip, render
try {
// Get Civitai URL from API
- const response = await api.fetchApi(`/loras/civitai-url?name=${encodeURIComponent(loraName)}`, {
+ const response = await api.fetchApi(`/lm/loras/civitai-url?name=${encodeURIComponent(loraName)}`, {
method: 'GET'
});
@@ -547,7 +547,7 @@ export function createContextMenu(x, y, loraName, widget, previewTooltip, render
try {
// Get notes from API
- const response = await api.fetchApi(`/loras/get-notes?name=${encodeURIComponent(loraName)}`, {
+ const response = await api.fetchApi(`/lm/loras/get-notes?name=${encodeURIComponent(loraName)}`, {
method: 'GET'
});
@@ -584,7 +584,7 @@ export function createContextMenu(x, y, loraName, widget, previewTooltip, render
try {
// Get trigger words from API
- const response = await api.fetchApi(`/loras/get-trigger-words?name=${encodeURIComponent(loraName)}`, {
+ const response = await api.fetchApi(`/lm/loras/get-trigger-words?name=${encodeURIComponent(loraName)}`, {
method: 'GET'
});
diff --git a/web/comfyui/loras_widget_utils.js b/web/comfyui/loras_widget_utils.js
index 1b4193b3..85913c72 100644
--- a/web/comfyui/loras_widget_utils.js
+++ b/web/comfyui/loras_widget_utils.js
@@ -70,7 +70,7 @@ export async function saveRecipeDirectly() {
}
// Send the request to the backend API
- const response = await fetch('/api/recipes/save-from-widget', {
+ const response = await fetch('/api/lm/recipes/save-from-widget', {
method: 'POST'
});
diff --git a/web/comfyui/trigger_word_toggle.js b/web/comfyui/trigger_word_toggle.js
index 74a3fe1d..57541bd4 100644
--- a/web/comfyui/trigger_word_toggle.js
+++ b/web/comfyui/trigger_word_toggle.js
@@ -1,11 +1,7 @@
import { app } from "../../scripts/app.js";
import { api } from "../../scripts/api.js";
-import { CONVERTED_TYPE, dynamicImportByVersion } from "./utils.js";
-
-// Function to get the appropriate tags widget based on ComfyUI version
-async function getTagsWidgetModule() {
- return await dynamicImportByVersion("./tags_widget.js", "./legacy_tags_widget.js");
-}
+import { CONVERTED_TYPE } from "./utils.js";
+import { addTagsWidget } from "./tags_widget.js";
// TriggerWordToggle extension for ComfyUI
app.registerExtension({
@@ -30,10 +26,6 @@ app.registerExtension({
// Wait for node to be properly initialized
requestAnimationFrame(async () => {
- // Dynamically import the appropriate tags widget module
- const tagsModule = await getTagsWidgetModule();
- const { addTagsWidget } = tagsModule;
-
// Get the widget object directly from the returned object
const result = addTagsWidget(node, "toggle_trigger_words", {
defaultVal: []
diff --git a/web/comfyui/ui_utils.js b/web/comfyui/ui_utils.js
index 524940c8..4996c67a 100644
--- a/web/comfyui/ui_utils.js
+++ b/web/comfyui/ui_utils.js
@@ -107,7 +107,7 @@ const initializeWidgets = () => {
// Fetch version info from the API
const fetchVersionInfo = async () => {
try {
- const response = await fetch('/api/version-info');
+ const response = await fetch('/api/lm/version-info');
const data = await response.json();
if (data.success) {
diff --git a/web/comfyui/usage_stats.js b/web/comfyui/usage_stats.js
index b0844114..b89eaf46 100644
--- a/web/comfyui/usage_stats.js
+++ b/web/comfyui/usage_stats.js
@@ -38,7 +38,7 @@ app.registerExtension({
async updateUsageStats(promptId) {
try {
// Call backend endpoint with the prompt_id
- const response = await fetch(`/api/update-usage-stats`, {
+ const response = await fetch(`/api/lm/update-usage-stats`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
@@ -79,7 +79,7 @@ app.registerExtension({
}
}
- const response = await fetch('/api/register-nodes', {
+ const response = await fetch('/api/lm/register-nodes', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
@@ -158,7 +158,7 @@ app.registerExtension({
try {
// Search for current relative path
- const response = await api.fetchApi(`/${modelType}/relative-paths?search=${encodeURIComponent(fileName)}&limit=2`);
+ const response = await api.fetchApi(`/lm/${modelType}/relative-paths?search=${encodeURIComponent(fileName)}&limit=2`);
const data = await response.json();
if (!data.success || !data.relative_paths || data.relative_paths.length === 0) {
diff --git a/web/comfyui/utils.js b/web/comfyui/utils.js
index 7c64621b..8060414d 100644
--- a/web/comfyui/utils.js
+++ b/web/comfyui/utils.js
@@ -20,10 +20,6 @@ export function chainCallback(object, property, callback) {
}
}
-export function getComfyUIFrontendVersion() {
- return window['__COMFYUI_FRONTEND_VERSION__'] || "0.0.0";
-}
-
/**
* Show a toast notification
* @param {Object|string} options - Toast options object or message string for backward compatibility
@@ -78,29 +74,6 @@ export function showToast(options, type = 'info') {
}
}
-// Dynamically import the appropriate widget based on app version
-export async function dynamicImportByVersion(latestModulePath, legacyModulePath) {
- // Parse app version and compare with 1.12.6 (version when tags widget API changed)
- const currentVersion = getComfyUIFrontendVersion();
- const versionParts = currentVersion.split('.').map(part => parseInt(part, 10));
- const requiredVersion = [1, 12, 6];
-
- // Compare version numbers
- for (let i = 0; i < 3; i++) {
- if (versionParts[i] > requiredVersion[i]) {
- console.log(`Using latest widget: ${latestModulePath}`);
- return import(latestModulePath);
- } else if (versionParts[i] < requiredVersion[i]) {
- console.log(`Using legacy widget: ${legacyModulePath}`);
- return import(legacyModulePath);
- }
- }
-
- // If we get here, versions are equal, use the latest module
- console.log(`Using latest widget: ${latestModulePath}`);
- return import(latestModulePath);
-}
-
export function hideWidgetForGood(node, widget, suffix = "") {
widget.origType = widget.type;
widget.origComputeSize = widget.computeSize;
@@ -124,11 +97,6 @@ export function hideWidgetForGood(node, widget, suffix = "") {
}
}
-// Function to get the appropriate loras widget based on ComfyUI version
-export async function getLorasWidgetModule() {
- return await dynamicImportByVersion("./loras_widget.js", "./legacy_loras_widget.js");
-}
-
// Update pattern to match both formats: or
export const LORA_PATTERN = //g;
@@ -215,7 +183,7 @@ export function collectActiveLorasFromChain(node, visited = new Set()) {
export function updateConnectedTriggerWords(node, loraNames) {
const connectedNodeIds = getConnectedTriggerToggleNodes(node);
if (connectedNodeIds.length > 0) {
- fetch("/api/loras/get_trigger_words", {
+ fetch("/api/lm/loras/get_trigger_words", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
@@ -285,7 +253,7 @@ export function setupInputWidgetWithAutocomplete(node, inputWidget, originalCall
// Initialize autocomplete on first callback if not already done
if (!autocomplete && inputWidget.inputEl) {
autocomplete = new AutoComplete(inputWidget.inputEl, 'loras', {
- maxItems: 15,
+ maxItems: 20,
minChars: 1,
debounceDelay: 200
});
diff --git a/wiki-images/civarchive-models-page.png b/wiki-images/civarchive-models-page.png
new file mode 100644
index 00000000..780add13
Binary files /dev/null and b/wiki-images/civarchive-models-page.png differ