mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-22 13:42:12 -03:00
Compare commits
128 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e08cae97f1 | ||
|
|
a0cf78842e | ||
|
|
0b48654ae6 | ||
|
|
807f4e03ee | ||
|
|
773adb27c9 | ||
|
|
d653494ee1 | ||
|
|
9117ee60dd | ||
|
|
879588e252 | ||
|
|
1725558fbc | ||
|
|
67869f19ff | ||
|
|
e8b37365a6 | ||
|
|
b9516c6b62 | ||
|
|
16c52877ad | ||
|
|
466351b23a | ||
|
|
83fc3282d4 | ||
|
|
d8adb97af6 | ||
|
|
85e511d81c | ||
|
|
8e30008b29 | ||
|
|
e335a527d4 | ||
|
|
25e6d72c4f | ||
|
|
6b1e3f06ed | ||
|
|
94edde7744 | ||
|
|
024dfff021 | ||
|
|
a13fbbff48 | ||
|
|
765c1c42a9 | ||
|
|
2b74b2373d | ||
|
|
b4ad03c9bf | ||
|
|
199c9f742c | ||
|
|
e2f1520e7f | ||
|
|
1606a3ff46 | ||
|
|
b313f36be9 | ||
|
|
fa3625ff72 | ||
|
|
895d13dc96 | ||
|
|
b7e0821f66 | ||
|
|
36e3e62e70 | ||
|
|
7bcf4e4491 | ||
|
|
c12aefa82a | ||
|
|
990a3527e4 | ||
|
|
655d3cab71 | ||
|
|
358e658459 | ||
|
|
f28c32f2b1 | ||
|
|
f5dbd6b8e8 | ||
|
|
2c026a2646 | ||
|
|
bd83f7520e | ||
|
|
b9a4e7a09b | ||
|
|
c30e57ede8 | ||
|
|
0dba1b336d | ||
|
|
820afe9319 | ||
|
|
5a97f4bc75 | ||
|
|
94da404cc5 | ||
|
|
1da476d858 | ||
|
|
1daaff6bd4 | ||
|
|
e252e44403 | ||
|
|
778ad8abd2 | ||
|
|
68cf381b50 | ||
|
|
337f73e711 | ||
|
|
04ba966a6e | ||
|
|
71c8cf84e0 | ||
|
|
db1aec94e5 | ||
|
|
553e1868e1 | ||
|
|
938ceb49b2 | ||
|
|
c0f03b79a8 | ||
|
|
a492638133 | ||
|
|
e17d6c8ebf | ||
|
|
ffcfe5ea3e | ||
|
|
719e18adb6 | ||
|
|
92d471daf5 | ||
|
|
66babf9ee1 | ||
|
|
60df2df324 | ||
|
|
b86bd44c65 | ||
|
|
77bfbe1bc9 | ||
|
|
666db4cdd0 | ||
|
|
233427600a | ||
|
|
84c62f2954 | ||
|
|
5e91073476 | ||
|
|
08267cdb48 | ||
|
|
e50b2c802e | ||
|
|
2eea92abdf | ||
|
|
58ae6b9de6 | ||
|
|
b775333d32 | ||
|
|
bad0a8c5df | ||
|
|
ee25643f68 | ||
|
|
a78868adce | ||
|
|
2ccfbaf073 | ||
|
|
565b61d1c2 | ||
|
|
18d3ecb4da | ||
|
|
a02462fff4 | ||
|
|
ad4574e02f | ||
|
|
822ac046e0 | ||
|
|
55fa31b144 | ||
|
|
d17808d9e5 | ||
|
|
5d9f64e43b | ||
|
|
5dc5fd5971 | ||
|
|
0ff551551e | ||
|
|
9032226724 | ||
|
|
7249c9fd4b | ||
|
|
31d94d7ea2 | ||
|
|
b28f148ce8 | ||
|
|
93cd0b54dc | ||
|
|
7b0c6c8bab | ||
|
|
e14afde4b3 | ||
|
|
4b36d60e46 | ||
|
|
6ef6c116e4 | ||
|
|
42f35be9d3 | ||
|
|
d063d48417 | ||
|
|
c9e305397c | ||
|
|
6142b3dc0c | ||
|
|
d5a2bd1e24 | ||
|
|
1f6fc59aa2 | ||
|
|
41101ad5c6 | ||
|
|
b71b3f99dc | ||
|
|
d655fb8008 | ||
|
|
194f2f702c | ||
|
|
fad43ad003 | ||
|
|
b05762b066 | ||
|
|
13b18ac85f | ||
|
|
eb2af454cc | ||
|
|
7bba24c19f | ||
|
|
0bb75fdf77 | ||
|
|
7c7d2e12b5 | ||
|
|
2121054cb9 | ||
|
|
bf0291ec0e | ||
|
|
932d85617c | ||
|
|
6832469889 | ||
|
|
b0f852cc6c | ||
|
|
d1c65a6186 | ||
|
|
6fbea77137 | ||
|
|
17c5583297 |
201
.agents/skills/lora-manager-e2e/SKILL.md
Normal file
201
.agents/skills/lora-manager-e2e/SKILL.md
Normal file
@@ -0,0 +1,201 @@
|
||||
---
|
||||
name: lora-manager-e2e
|
||||
description: End-to-end testing and validation for LoRa Manager features. Use when performing automated E2E validation of LoRa Manager standalone mode, including starting/restarting the server, using Chrome DevTools MCP to interact with the web UI at http://127.0.0.1:8188/loras, and verifying frontend-to-backend functionality. Covers workflow validation, UI interaction testing, and integration testing between the standalone Python backend and the browser frontend.
|
||||
---
|
||||
|
||||
# LoRa Manager E2E Testing
|
||||
|
||||
This skill provides workflows and utilities for end-to-end testing of LoRa Manager using Chrome DevTools MCP.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- LoRa Manager project cloned and dependencies installed (`pip install -r requirements.txt`)
|
||||
- Chrome browser available for debugging
|
||||
- Chrome DevTools MCP connected
|
||||
|
||||
## Quick Start Workflow
|
||||
|
||||
### 1. Start LoRa Manager Standalone
|
||||
|
||||
```python
|
||||
# Use the provided script to start the server
|
||||
python .agents/skills/lora-manager-e2e/scripts/start_server.py --port 8188
|
||||
```
|
||||
|
||||
Or manually:
|
||||
```bash
|
||||
cd /home/miao/workspace/ComfyUI/custom_nodes/ComfyUI-Lora-Manager
|
||||
python standalone.py --port 8188
|
||||
```
|
||||
|
||||
Wait for server ready message before proceeding.
|
||||
|
||||
### 2. Open Chrome Debug Mode
|
||||
|
||||
```bash
|
||||
# Chrome with remote debugging on port 9222
|
||||
google-chrome --remote-debugging-port=9222 --user-data-dir=/tmp/chrome-lora-manager http://127.0.0.1:8188/loras
|
||||
```
|
||||
|
||||
### 3. Connect Chrome DevTools MCP
|
||||
|
||||
Ensure the MCP server is connected to Chrome at `http://localhost:9222`.
|
||||
|
||||
### 4. Navigate and Interact
|
||||
|
||||
Use Chrome DevTools MCP tools to:
|
||||
- Take snapshots: `take_snapshot`
|
||||
- Click elements: `click`
|
||||
- Fill forms: `fill` or `fill_form`
|
||||
- Evaluate scripts: `evaluate_script`
|
||||
- Wait for elements: `wait_for`
|
||||
|
||||
## Common E2E Test Patterns
|
||||
|
||||
### Pattern: Full Page Load Verification
|
||||
|
||||
```python
|
||||
# Navigate to LoRA list page
|
||||
navigate_page(type="url", url="http://127.0.0.1:8188/loras")
|
||||
|
||||
# Wait for page to load
|
||||
wait_for(text="LoRAs", timeout=10000)
|
||||
|
||||
# Take snapshot to verify UI state
|
||||
snapshot = take_snapshot()
|
||||
```
|
||||
|
||||
### Pattern: Restart Server for Configuration Changes
|
||||
|
||||
```python
|
||||
# Stop current server (if running)
|
||||
# Start with new configuration
|
||||
python .agents/skills/lora-manager-e2e/scripts/start_server.py --port 8188 --restart
|
||||
|
||||
# Wait and refresh browser
|
||||
navigate_page(type="reload", ignoreCache=True)
|
||||
wait_for(text="LoRAs", timeout=15000)
|
||||
```
|
||||
|
||||
### Pattern: Verify Backend API via Frontend
|
||||
|
||||
```python
|
||||
# Execute script in browser to call backend API
|
||||
result = evaluate_script(function="""
|
||||
async () => {
|
||||
const response = await fetch('/loras/api/list');
|
||||
const data = await response.json();
|
||||
return { count: data.length, firstItem: data[0]?.name };
|
||||
}
|
||||
""")
|
||||
```
|
||||
|
||||
### Pattern: Form Submission Flow
|
||||
|
||||
```python
|
||||
# Fill a form (e.g., search or filter)
|
||||
fill_form(elements=[
|
||||
{"uid": "search-input", "value": "character"},
|
||||
])
|
||||
|
||||
# Click submit button
|
||||
click(uid="search-button")
|
||||
|
||||
# Wait for results
|
||||
wait_for(text="Results", timeout=5000)
|
||||
|
||||
# Verify results via snapshot
|
||||
snapshot = take_snapshot()
|
||||
```
|
||||
|
||||
### Pattern: Modal Dialog Interaction
|
||||
|
||||
```python
|
||||
# Open modal (e.g., add LoRA)
|
||||
click(uid="add-lora-button")
|
||||
|
||||
# Wait for modal to appear
|
||||
wait_for(text="Add LoRA", timeout=3000)
|
||||
|
||||
# Fill modal form
|
||||
fill_form(elements=[
|
||||
{"uid": "lora-name", "value": "Test LoRA"},
|
||||
{"uid": "lora-path", "value": "/path/to/lora.safetensors"},
|
||||
])
|
||||
|
||||
# Submit
|
||||
click(uid="modal-submit-button")
|
||||
|
||||
# Wait for success message or close
|
||||
wait_for(text="Success", timeout=5000)
|
||||
```
|
||||
|
||||
## Available Scripts
|
||||
|
||||
### scripts/start_server.py
|
||||
|
||||
Starts or restarts the LoRa Manager standalone server.
|
||||
|
||||
```bash
|
||||
python scripts/start_server.py [--port PORT] [--restart] [--wait]
|
||||
```
|
||||
|
||||
Options:
|
||||
- `--port`: Server port (default: 8188)
|
||||
- `--restart`: Kill existing server before starting
|
||||
- `--wait`: Wait for server to be ready before exiting
|
||||
|
||||
### scripts/wait_for_server.py
|
||||
|
||||
Polls server until ready or timeout.
|
||||
|
||||
```bash
|
||||
python scripts/wait_for_server.py [--port PORT] [--timeout SECONDS]
|
||||
```
|
||||
|
||||
## Test Scenarios Reference
|
||||
|
||||
See [references/test-scenarios.md](references/test-scenarios.md) for detailed test scenarios including:
|
||||
- LoRA list display and filtering
|
||||
- Model metadata editing
|
||||
- Recipe creation and management
|
||||
- Settings configuration
|
||||
- Import/export functionality
|
||||
|
||||
## Network Request Verification
|
||||
|
||||
Use `list_network_requests` and `get_network_request` to verify API calls:
|
||||
|
||||
```python
|
||||
# List recent XHR/fetch requests
|
||||
requests = list_network_requests(resourceTypes=["xhr", "fetch"])
|
||||
|
||||
# Get details of specific request
|
||||
details = get_network_request(reqid=123)
|
||||
```
|
||||
|
||||
## Console Message Monitoring
|
||||
|
||||
```python
|
||||
# Check for errors or warnings
|
||||
messages = list_console_messages(types=["error", "warn"])
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
```python
|
||||
# Start performance trace
|
||||
performance_start_trace(reload=True, autoStop=False)
|
||||
|
||||
# Perform actions...
|
||||
|
||||
# Stop and analyze
|
||||
results = performance_stop_trace()
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
Always ensure proper cleanup after tests:
|
||||
1. Stop the standalone server
|
||||
2. Close browser pages (keep at least one open)
|
||||
3. Clear temporary data if needed
|
||||
324
.agents/skills/lora-manager-e2e/references/mcp-cheatsheet.md
Normal file
324
.agents/skills/lora-manager-e2e/references/mcp-cheatsheet.md
Normal file
@@ -0,0 +1,324 @@
|
||||
# Chrome DevTools MCP Cheatsheet for LoRa Manager
|
||||
|
||||
Quick reference for common MCP commands used in LoRa Manager E2E testing.
|
||||
|
||||
## Navigation
|
||||
|
||||
```python
|
||||
# Navigate to LoRA list page
|
||||
navigate_page(type="url", url="http://127.0.0.1:8188/loras")
|
||||
|
||||
# Reload page with cache clear
|
||||
navigate_page(type="reload", ignoreCache=True)
|
||||
|
||||
# Go back/forward
|
||||
navigate_page(type="back")
|
||||
navigate_page(type="forward")
|
||||
```
|
||||
|
||||
## Waiting
|
||||
|
||||
```python
|
||||
# Wait for text to appear
|
||||
wait_for(text="LoRAs", timeout=10000)
|
||||
|
||||
# Wait for specific element (via evaluate_script)
|
||||
evaluate_script(function="""
|
||||
() => {
|
||||
return new Promise((resolve) => {
|
||||
const check = () => {
|
||||
if (document.querySelector('.lora-card')) {
|
||||
resolve(true);
|
||||
} else {
|
||||
setTimeout(check, 100);
|
||||
}
|
||||
};
|
||||
check();
|
||||
});
|
||||
}
|
||||
""")
|
||||
```
|
||||
|
||||
## Taking Snapshots
|
||||
|
||||
```python
|
||||
# Full page snapshot
|
||||
snapshot = take_snapshot()
|
||||
|
||||
# Verbose snapshot (more details)
|
||||
snapshot = take_snapshot(verbose=True)
|
||||
|
||||
# Save to file
|
||||
take_snapshot(filePath="test-snapshots/page-load.json")
|
||||
```
|
||||
|
||||
## Element Interaction
|
||||
|
||||
```python
|
||||
# Click element
|
||||
click(uid="element-uid-from-snapshot")
|
||||
|
||||
# Double click
|
||||
click(uid="element-uid", dblClick=True)
|
||||
|
||||
# Fill input
|
||||
fill(uid="search-input", value="test query")
|
||||
|
||||
# Fill multiple inputs
|
||||
fill_form(elements=[
|
||||
{"uid": "input-1", "value": "value 1"},
|
||||
{"uid": "input-2", "value": "value 2"},
|
||||
])
|
||||
|
||||
# Hover
|
||||
hover(uid="lora-card-1")
|
||||
|
||||
# Upload file
|
||||
upload_file(uid="file-input", filePath="/path/to/file.safetensors")
|
||||
```
|
||||
|
||||
## Keyboard Input
|
||||
|
||||
```python
|
||||
# Press key
|
||||
press_key(key="Enter")
|
||||
press_key(key="Escape")
|
||||
press_key(key="Tab")
|
||||
|
||||
# Keyboard shortcuts
|
||||
press_key(key="Control+A") # Select all
|
||||
press_key(key="Control+F") # Find
|
||||
```
|
||||
|
||||
## JavaScript Evaluation
|
||||
|
||||
```python
|
||||
# Simple evaluation
|
||||
result = evaluate_script(function="() => document.title")
|
||||
|
||||
# Async evaluation
|
||||
result = evaluate_script(function="""
|
||||
async () => {
|
||||
const response = await fetch('/loras/api/list');
|
||||
return await response.json();
|
||||
}
|
||||
""")
|
||||
|
||||
# Check element existence
|
||||
exists = evaluate_script(function="""
|
||||
() => document.querySelector('.lora-card') !== null
|
||||
""")
|
||||
|
||||
# Get element count
|
||||
count = evaluate_script(function="""
|
||||
() => document.querySelectorAll('.lora-card').length
|
||||
""")
|
||||
```
|
||||
|
||||
## Network Monitoring
|
||||
|
||||
```python
|
||||
# List all network requests
|
||||
requests = list_network_requests()
|
||||
|
||||
# Filter by resource type
|
||||
xhr_requests = list_network_requests(resourceTypes=["xhr", "fetch"])
|
||||
|
||||
# Get specific request details
|
||||
details = get_network_request(reqid=123)
|
||||
|
||||
# Include preserved requests from previous navigations
|
||||
all_requests = list_network_requests(includePreservedRequests=True)
|
||||
```
|
||||
|
||||
## Console Monitoring
|
||||
|
||||
```python
|
||||
# List all console messages
|
||||
messages = list_console_messages()
|
||||
|
||||
# Filter by type
|
||||
errors = list_console_messages(types=["error", "warn"])
|
||||
|
||||
# Include preserved messages
|
||||
all_messages = list_console_messages(includePreservedMessages=True)
|
||||
|
||||
# Get specific message
|
||||
details = get_console_message(msgid=1)
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
```python
|
||||
# Start trace with page reload
|
||||
performance_start_trace(reload=True, autoStop=False)
|
||||
|
||||
# Start trace without reload
|
||||
performance_start_trace(reload=False, autoStop=True, filePath="trace.json.gz")
|
||||
|
||||
# Stop trace
|
||||
results = performance_stop_trace()
|
||||
|
||||
# Stop and save
|
||||
performance_stop_trace(filePath="trace-results.json.gz")
|
||||
|
||||
# Analyze specific insight
|
||||
insight = performance_analyze_insight(
|
||||
insightSetId="results.insightSets[0].id",
|
||||
insightName="LCPBreakdown"
|
||||
)
|
||||
```
|
||||
|
||||
## Page Management
|
||||
|
||||
```python
|
||||
# List open pages
|
||||
pages = list_pages()
|
||||
|
||||
# Select a page
|
||||
select_page(pageId=0, bringToFront=True)
|
||||
|
||||
# Create new page
|
||||
new_page(url="http://127.0.0.1:8188/loras")
|
||||
|
||||
# Close page (keep at least one open!)
|
||||
close_page(pageId=1)
|
||||
|
||||
# Resize page
|
||||
resize_page(width=1920, height=1080)
|
||||
```
|
||||
|
||||
## Screenshots
|
||||
|
||||
```python
|
||||
# Full page screenshot
|
||||
take_screenshot(fullPage=True)
|
||||
|
||||
# Viewport screenshot
|
||||
take_screenshot()
|
||||
|
||||
# Element screenshot
|
||||
take_screenshot(uid="lora-card-1")
|
||||
|
||||
# Save to file
|
||||
take_screenshot(filePath="screenshots/page.png", format="png")
|
||||
|
||||
# JPEG with quality
|
||||
take_screenshot(filePath="screenshots/page.jpg", format="jpeg", quality=90)
|
||||
```
|
||||
|
||||
## Dialog Handling
|
||||
|
||||
```python
|
||||
# Accept dialog
|
||||
handle_dialog(action="accept")
|
||||
|
||||
# Accept with text input
|
||||
handle_dialog(action="accept", promptText="user input")
|
||||
|
||||
# Dismiss dialog
|
||||
handle_dialog(action="dismiss")
|
||||
```
|
||||
|
||||
## Device Emulation
|
||||
|
||||
```python
|
||||
# Mobile viewport
|
||||
emulate(viewport={"width": 375, "height": 667, "isMobile": True, "hasTouch": True})
|
||||
|
||||
# Tablet viewport
|
||||
emulate(viewport={"width": 768, "height": 1024, "isMobile": True, "hasTouch": True})
|
||||
|
||||
# Desktop viewport
|
||||
emulate(viewport={"width": 1920, "height": 1080})
|
||||
|
||||
# Network throttling
|
||||
emulate(networkConditions="Slow 3G")
|
||||
emulate(networkConditions="Fast 4G")
|
||||
|
||||
# CPU throttling
|
||||
emulate(cpuThrottlingRate=4) # 4x slowdown
|
||||
|
||||
# Geolocation
|
||||
emulate(geolocation={"latitude": 37.7749, "longitude": -122.4194})
|
||||
|
||||
# User agent
|
||||
emulate(userAgent="Mozilla/5.0 (Custom)")
|
||||
|
||||
# Reset emulation
|
||||
emulate(viewport=None, networkConditions="No emulation", userAgent=None)
|
||||
```
|
||||
|
||||
## Drag and Drop
|
||||
|
||||
```python
|
||||
# Drag element to another
|
||||
drag(from_uid="draggable-item", to_uid="drop-zone")
|
||||
```
|
||||
|
||||
## Common LoRa Manager Test Patterns
|
||||
|
||||
### Verify LoRA Cards Loaded
|
||||
|
||||
```python
|
||||
navigate_page(type="url", url="http://127.0.0.1:8188/loras")
|
||||
wait_for(text="LoRAs", timeout=10000)
|
||||
|
||||
# Check if cards loaded
|
||||
result = evaluate_script(function="""
|
||||
() => {
|
||||
const cards = document.querySelectorAll('.lora-card');
|
||||
return {
|
||||
count: cards.length,
|
||||
hasData: cards.length > 0
|
||||
};
|
||||
}
|
||||
""")
|
||||
```
|
||||
|
||||
### Search and Verify Results
|
||||
|
||||
```python
|
||||
fill(uid="search-input", value="character")
|
||||
press_key(key="Enter")
|
||||
wait_for(timeout=2000) # Wait for debounce
|
||||
|
||||
# Check results
|
||||
result = evaluate_script(function="""
|
||||
() => {
|
||||
const cards = document.querySelectorAll('.lora-card');
|
||||
const names = Array.from(cards).map(c => c.dataset.name || c.textContent);
|
||||
return { count: cards.length, names };
|
||||
}
|
||||
""")
|
||||
```
|
||||
|
||||
### Check API Response
|
||||
|
||||
```python
|
||||
# Trigger API call
|
||||
evaluate_script(function="""
|
||||
() => window.loraApiCallPromise = fetch('/loras/api/list').then(r => r.json())
|
||||
""")
|
||||
|
||||
# Wait and get result
|
||||
import time
|
||||
time.sleep(1)
|
||||
|
||||
result = evaluate_script(function="""
|
||||
async () => await window.loraApiCallPromise
|
||||
""")
|
||||
```
|
||||
|
||||
### Monitor Console for Errors
|
||||
|
||||
```python
|
||||
# Before test: clear console (navigate reloads)
|
||||
navigate_page(type="reload")
|
||||
|
||||
# ... perform actions ...
|
||||
|
||||
# Check for errors
|
||||
errors = list_console_messages(types=["error"])
|
||||
assert len(errors) == 0, f"Console errors: {errors}"
|
||||
```
|
||||
272
.agents/skills/lora-manager-e2e/references/test-scenarios.md
Normal file
272
.agents/skills/lora-manager-e2e/references/test-scenarios.md
Normal file
@@ -0,0 +1,272 @@
|
||||
# LoRa Manager E2E Test Scenarios
|
||||
|
||||
This document provides detailed test scenarios for end-to-end validation of LoRa Manager features.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [LoRA List Page](#lora-list-page)
|
||||
2. [Model Details](#model-details)
|
||||
3. [Recipes](#recipes)
|
||||
4. [Settings](#settings)
|
||||
5. [Import/Export](#importexport)
|
||||
|
||||
---
|
||||
|
||||
## LoRA List Page
|
||||
|
||||
### Scenario: Page Load and Display
|
||||
|
||||
**Objective**: Verify the LoRA list page loads correctly and displays models.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to `http://127.0.0.1:8188/loras`
|
||||
2. Wait for page title "LoRAs" to appear
|
||||
3. Take snapshot to verify:
|
||||
- Header with "LoRAs" title is visible
|
||||
- Search/filter controls are present
|
||||
- Grid/list view toggle exists
|
||||
- LoRA cards are displayed (if models exist)
|
||||
- Pagination controls (if applicable)
|
||||
|
||||
**Expected Result**: Page loads without errors, UI elements are present.
|
||||
|
||||
### Scenario: Search Functionality
|
||||
|
||||
**Objective**: Verify search filters LoRA models correctly.
|
||||
|
||||
**Steps**:
|
||||
1. Ensure at least one LoRA exists with known name (e.g., "test-character")
|
||||
2. Navigate to LoRA list page
|
||||
3. Enter search term in search box: "test"
|
||||
4. Press Enter or click search button
|
||||
5. Wait for results to update
|
||||
|
||||
**Expected Result**: Only LoRAs matching search term are displayed.
|
||||
|
||||
**Verification Script**:
|
||||
```python
|
||||
# After search, verify filtered results
|
||||
evaluate_script(function="""
|
||||
() => {
|
||||
const cards = document.querySelectorAll('.lora-card');
|
||||
const names = Array.from(cards).map(c => c.dataset.name);
|
||||
return { count: cards.length, names };
|
||||
}
|
||||
""")
|
||||
```
|
||||
|
||||
### Scenario: Filter by Tags
|
||||
|
||||
**Objective**: Verify tag filtering works correctly.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to LoRA list page
|
||||
2. Click on a tag (e.g., "character", "style")
|
||||
3. Wait for filtered results
|
||||
|
||||
**Expected Result**: Only LoRAs with selected tag are displayed.
|
||||
|
||||
### Scenario: View Mode Toggle
|
||||
|
||||
**Objective**: Verify grid/list view toggle works.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to LoRA list page
|
||||
2. Click list view button
|
||||
3. Verify list layout
|
||||
4. Click grid view button
|
||||
5. Verify grid layout
|
||||
|
||||
**Expected Result**: View mode changes correctly, layout updates.
|
||||
|
||||
---
|
||||
|
||||
## Model Details
|
||||
|
||||
### Scenario: Open Model Details
|
||||
|
||||
**Objective**: Verify clicking a LoRA opens its details.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to LoRA list page
|
||||
2. Click on a LoRA card
|
||||
3. Wait for details panel/modal to open
|
||||
|
||||
**Expected Result**: Details panel shows:
|
||||
- Model name
|
||||
- Preview image
|
||||
- Metadata (trigger words, tags, etc.)
|
||||
- Action buttons (edit, delete, etc.)
|
||||
|
||||
### Scenario: Edit Model Metadata
|
||||
|
||||
**Objective**: Verify metadata editing works end-to-end.
|
||||
|
||||
**Steps**:
|
||||
1. Open a LoRA's details
|
||||
2. Click "Edit" button
|
||||
3. Modify trigger words field
|
||||
4. Add/remove tags
|
||||
5. Save changes
|
||||
6. Refresh page
|
||||
7. Reopen the same LoRA
|
||||
|
||||
**Expected Result**: Changes persist after refresh.
|
||||
|
||||
### Scenario: Delete Model
|
||||
|
||||
**Objective**: Verify model deletion works.
|
||||
|
||||
**Steps**:
|
||||
1. Open a LoRA's details
|
||||
2. Click "Delete" button
|
||||
3. Confirm deletion in dialog
|
||||
4. Wait for removal
|
||||
|
||||
**Expected Result**: Model removed from list, success message shown.
|
||||
|
||||
---
|
||||
|
||||
## Recipes
|
||||
|
||||
### Scenario: Recipe List Display
|
||||
|
||||
**Objective**: Verify recipes page loads and displays recipes.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to `http://127.0.0.1:8188/recipes`
|
||||
2. Wait for "Recipes" title
|
||||
3. Take snapshot
|
||||
|
||||
**Expected Result**: Recipe list displayed with cards/items.
|
||||
|
||||
### Scenario: Create New Recipe
|
||||
|
||||
**Objective**: Verify recipe creation workflow.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to recipes page
|
||||
2. Click "New Recipe" button
|
||||
3. Fill recipe form:
|
||||
- Name: "Test Recipe"
|
||||
- Description: "E2E test recipe"
|
||||
- Add LoRA models
|
||||
4. Save recipe
|
||||
5. Verify recipe appears in list
|
||||
|
||||
**Expected Result**: New recipe created and displayed.
|
||||
|
||||
### Scenario: Apply Recipe
|
||||
|
||||
**Objective**: Verify applying a recipe to ComfyUI.
|
||||
|
||||
**Steps**:
|
||||
1. Open a recipe
|
||||
2. Click "Apply" or "Load in ComfyUI"
|
||||
3. Verify action completes
|
||||
|
||||
**Expected Result**: Recipe applied successfully.
|
||||
|
||||
---
|
||||
|
||||
## Settings
|
||||
|
||||
### Scenario: Settings Page Load
|
||||
|
||||
**Objective**: Verify settings page displays correctly.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to `http://127.0.0.1:8188/settings`
|
||||
2. Wait for "Settings" title
|
||||
3. Take snapshot
|
||||
|
||||
**Expected Result**: Settings form with various options displayed.
|
||||
|
||||
### Scenario: Change Setting and Restart
|
||||
|
||||
**Objective**: Verify settings persist after restart.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to settings page
|
||||
2. Change a setting (e.g., default view mode)
|
||||
3. Save settings
|
||||
4. Restart server: `python scripts/start_server.py --restart --wait`
|
||||
5. Refresh browser page
|
||||
6. Navigate to settings
|
||||
|
||||
**Expected Result**: Changed setting value persists.
|
||||
|
||||
---
|
||||
|
||||
## Import/Export
|
||||
|
||||
### Scenario: Export Models List
|
||||
|
||||
**Objective**: Verify export functionality.
|
||||
|
||||
**Steps**:
|
||||
1. Navigate to LoRA list
|
||||
2. Click "Export" button
|
||||
3. Select format (JSON/CSV)
|
||||
4. Download file
|
||||
|
||||
**Expected Result**: File downloaded with correct data.
|
||||
|
||||
### Scenario: Import Models
|
||||
|
||||
**Objective**: Verify import functionality.
|
||||
|
||||
**Steps**:
|
||||
1. Prepare import file
|
||||
2. Navigate to import page
|
||||
3. Upload file
|
||||
4. Verify import results
|
||||
|
||||
**Expected Result**: Models imported successfully, confirmation shown.
|
||||
|
||||
---
|
||||
|
||||
## API Integration Tests
|
||||
|
||||
### Scenario: Verify API Endpoints
|
||||
|
||||
**Objective**: Verify backend API responds correctly.
|
||||
|
||||
**Test via browser console**:
|
||||
```javascript
|
||||
// List LoRAs
|
||||
fetch('/loras/api/list').then(r => r.json()).then(console.log)
|
||||
|
||||
// Get LoRA details
|
||||
fetch('/loras/api/detail/<id>').then(r => r.json()).then(console.log)
|
||||
|
||||
// Search LoRAs
|
||||
fetch('/loras/api/search?q=test').then(r => r.json()).then(console.log)
|
||||
```
|
||||
|
||||
**Expected Result**: APIs return valid JSON with expected structure.
|
||||
|
||||
---
|
||||
|
||||
## Console Error Monitoring
|
||||
|
||||
During all tests, monitor browser console for errors:
|
||||
|
||||
```python
|
||||
# Check for JavaScript errors
|
||||
messages = list_console_messages(types=["error"])
|
||||
assert len(messages) == 0, f"Console errors found: {messages}"
|
||||
```
|
||||
|
||||
## Network Request Verification
|
||||
|
||||
Verify key API calls are made:
|
||||
|
||||
```python
|
||||
# List XHR requests
|
||||
requests = list_network_requests(resourceTypes=["xhr", "fetch"])
|
||||
|
||||
# Look for specific endpoints
|
||||
lora_list_requests = [r for r in requests if "/api/list" in r.get("url", "")]
|
||||
assert len(lora_list_requests) > 0, "LoRA list API not called"
|
||||
```
|
||||
193
.agents/skills/lora-manager-e2e/scripts/example_e2e_test.py
Executable file
193
.agents/skills/lora-manager-e2e/scripts/example_e2e_test.py
Executable file
@@ -0,0 +1,193 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example E2E test demonstrating LoRa Manager testing workflow.
|
||||
|
||||
This script shows how to:
|
||||
1. Start the standalone server
|
||||
2. Use Chrome DevTools MCP to interact with the UI
|
||||
3. Verify functionality end-to-end
|
||||
|
||||
Note: This is a template. Actual execution requires Chrome DevTools MCP.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def run_test():
|
||||
"""Run example E2E test flow."""
|
||||
|
||||
print("=" * 60)
|
||||
print("LoRa Manager E2E Test Example")
|
||||
print("=" * 60)
|
||||
|
||||
# Step 1: Start server
|
||||
print("\n[1/5] Starting LoRa Manager standalone server...")
|
||||
result = subprocess.run(
|
||||
[sys.executable, "start_server.py", "--port", "8188", "--wait", "--timeout", "30"],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
print(f"Failed to start server: {result.stderr}")
|
||||
return 1
|
||||
print("Server ready!")
|
||||
|
||||
# Step 2: Open Chrome (manual step - show command)
|
||||
print("\n[2/5] Open Chrome with debug mode:")
|
||||
print("google-chrome --remote-debugging-port=9222 --user-data-dir=/tmp/chrome-lora-manager http://127.0.0.1:8188/loras")
|
||||
print("(In actual test, this would be automated via MCP)")
|
||||
|
||||
# Step 3: Navigate and verify page load
|
||||
print("\n[3/5] Page Load Verification:")
|
||||
print("""
|
||||
MCP Commands to execute:
|
||||
1. navigate_page(type="url", url="http://127.0.0.1:8188/loras")
|
||||
2. wait_for(text="LoRAs", timeout=10000)
|
||||
3. snapshot = take_snapshot()
|
||||
""")
|
||||
|
||||
# Step 4: Test search functionality
|
||||
print("\n[4/5] Search Functionality Test:")
|
||||
print("""
|
||||
MCP Commands to execute:
|
||||
1. fill(uid="search-input", value="test")
|
||||
2. press_key(key="Enter")
|
||||
3. wait_for(text="Results", timeout=5000)
|
||||
4. result = evaluate_script(function="""
|
||||
() => {
|
||||
const cards = document.querySelectorAll('.lora-card');
|
||||
return { count: cards.length };
|
||||
}
|
||||
""")
|
||||
""")
|
||||
|
||||
# Step 5: Verify API
|
||||
print("\n[5/5] API Verification:")
|
||||
print("""
|
||||
MCP Commands to execute:
|
||||
1. api_result = evaluate_script(function="""
|
||||
async () => {
|
||||
const response = await fetch('/loras/api/list');
|
||||
const data = await response.json();
|
||||
return { count: data.length, status: response.status };
|
||||
}
|
||||
""")
|
||||
2. Verify api_result['status'] == 200
|
||||
""")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Test flow completed!")
|
||||
print("=" * 60)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def example_restart_flow():
|
||||
"""Example: Testing configuration change that requires restart."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Example: Server Restart Flow")
|
||||
print("=" * 60)
|
||||
|
||||
print("""
|
||||
Scenario: Change setting and verify after restart
|
||||
|
||||
Steps:
|
||||
1. Navigate to settings page
|
||||
- navigate_page(type="url", url="http://127.0.0.1:8188/settings")
|
||||
|
||||
2. Change a setting (e.g., theme)
|
||||
- fill(uid="theme-select", value="dark")
|
||||
- click(uid="save-settings-button")
|
||||
|
||||
3. Restart server
|
||||
- subprocess.run([python, "start_server.py", "--restart", "--wait"])
|
||||
|
||||
4. Refresh browser
|
||||
- navigate_page(type="reload", ignoreCache=True)
|
||||
- wait_for(text="LoRAs", timeout=15000)
|
||||
|
||||
5. Verify setting persisted
|
||||
- navigate_page(type="url", url="http://127.0.0.1:8188/settings")
|
||||
- theme = evaluate_script(function="() => document.querySelector('#theme-select').value")
|
||||
- assert theme == "dark"
|
||||
""")
|
||||
|
||||
|
||||
def example_modal_interaction():
|
||||
"""Example: Testing modal dialog interaction."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Example: Modal Dialog Interaction")
|
||||
print("=" * 60)
|
||||
|
||||
print("""
|
||||
Scenario: Add new LoRA via modal
|
||||
|
||||
Steps:
|
||||
1. Open modal
|
||||
- click(uid="add-lora-button")
|
||||
- wait_for(text="Add LoRA", timeout=3000)
|
||||
|
||||
2. Fill form
|
||||
- fill_form(elements=[
|
||||
{"uid": "lora-name", "value": "Test Character"},
|
||||
{"uid": "lora-path", "value": "/models/test.safetensors"},
|
||||
])
|
||||
|
||||
3. Submit
|
||||
- click(uid="modal-submit-button")
|
||||
|
||||
4. Verify success
|
||||
- wait_for(text="Successfully added", timeout=5000)
|
||||
- snapshot = take_snapshot()
|
||||
""")
|
||||
|
||||
|
||||
def example_network_monitoring():
|
||||
"""Example: Network request monitoring."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Example: Network Request Monitoring")
|
||||
print("=" * 60)
|
||||
|
||||
print("""
|
||||
Scenario: Verify API calls during user interaction
|
||||
|
||||
Steps:
|
||||
1. Clear network log (implicit on navigation)
|
||||
- navigate_page(type="url", url="http://127.0.0.1:8188/loras")
|
||||
|
||||
2. Perform action that triggers API call
|
||||
- fill(uid="search-input", value="character")
|
||||
- press_key(key="Enter")
|
||||
|
||||
3. List network requests
|
||||
- requests = list_network_requests(resourceTypes=["xhr", "fetch"])
|
||||
|
||||
4. Find search API call
|
||||
- search_requests = [r for r in requests if "/api/search" in r.get("url", "")]
|
||||
- assert len(search_requests) > 0, "Search API was not called"
|
||||
|
||||
5. Get request details
|
||||
- if search_requests:
|
||||
details = get_network_request(reqid=search_requests[0]["reqid"])
|
||||
- Verify request method, response status, etc.
|
||||
""")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("LoRa Manager E2E Test Examples\n")
|
||||
print("This script demonstrates E2E testing patterns.\n")
|
||||
print("Note: Actual execution requires Chrome DevTools MCP connection.\n")
|
||||
|
||||
run_test()
|
||||
example_restart_flow()
|
||||
example_modal_interaction()
|
||||
example_network_monitoring()
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("All examples shown!")
|
||||
print("=" * 60)
|
||||
169
.agents/skills/lora-manager-e2e/scripts/start_server.py
Executable file
169
.agents/skills/lora-manager-e2e/scripts/start_server.py
Executable file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Start or restart LoRa Manager standalone server for E2E testing.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import signal
|
||||
import os
|
||||
|
||||
|
||||
def find_server_process(port: int) -> list[int]:
|
||||
"""Find PIDs of processes listening on the given port."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["lsof", "-ti", f":{port}"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return [int(pid) for pid in result.stdout.strip().split("\n") if pid]
|
||||
except FileNotFoundError:
|
||||
# lsof not available, try netstat
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["netstat", "-tlnp"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
pids = []
|
||||
for line in result.stdout.split("\n"):
|
||||
if f":{port}" in line:
|
||||
parts = line.split()
|
||||
for part in parts:
|
||||
if "/" in part:
|
||||
try:
|
||||
pid = int(part.split("/")[0])
|
||||
pids.append(pid)
|
||||
except ValueError:
|
||||
pass
|
||||
return pids
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return []
|
||||
|
||||
|
||||
def kill_server(port: int) -> None:
|
||||
"""Kill processes using the specified port."""
|
||||
pids = find_server_process(port)
|
||||
for pid in pids:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
print(f"Sent SIGTERM to process {pid}")
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# Wait for processes to terminate
|
||||
time.sleep(1)
|
||||
|
||||
# Force kill if still running
|
||||
pids = find_server_process(port)
|
||||
for pid in pids:
|
||||
try:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
print(f"Sent SIGKILL to process {pid}")
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
|
||||
def is_server_ready(port: int, timeout: float = 0.5) -> bool:
|
||||
"""Check if server is accepting connections."""
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", port), timeout=timeout):
|
||||
return True
|
||||
except (socket.timeout, ConnectionRefusedError, OSError):
|
||||
return False
|
||||
|
||||
|
||||
def wait_for_server(port: int, timeout: int = 30) -> bool:
|
||||
"""Wait for server to become ready."""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
if is_server_ready(port):
|
||||
return True
|
||||
time.sleep(0.5)
|
||||
return False
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Start LoRa Manager standalone server for E2E testing"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=8188,
|
||||
help="Server port (default: 8188)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--restart",
|
||||
action="store_true",
|
||||
help="Kill existing server before starting"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wait",
|
||||
action="store_true",
|
||||
help="Wait for server to be ready before exiting"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeout",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Timeout for waiting (default: 30)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get project root (parent of .agents directory)
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
skill_dir = os.path.dirname(script_dir)
|
||||
project_root = os.path.dirname(os.path.dirname(os.path.dirname(skill_dir)))
|
||||
|
||||
# Restart if requested
|
||||
if args.restart:
|
||||
print(f"Killing existing server on port {args.port}...")
|
||||
kill_server(args.port)
|
||||
time.sleep(1)
|
||||
|
||||
# Check if already running
|
||||
if is_server_ready(args.port):
|
||||
print(f"Server already running on port {args.port}")
|
||||
return 0
|
||||
|
||||
# Start server
|
||||
print(f"Starting LoRa Manager standalone server on port {args.port}...")
|
||||
cmd = [sys.executable, "standalone.py", "--port", str(args.port)]
|
||||
|
||||
# Start in background
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
cwd=project_root,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
start_new_session=True
|
||||
)
|
||||
|
||||
print(f"Server process started with PID {process.pid}")
|
||||
|
||||
# Wait for ready if requested
|
||||
if args.wait:
|
||||
print(f"Waiting for server to be ready (timeout: {args.timeout}s)...")
|
||||
if wait_for_server(args.port, args.timeout):
|
||||
print(f"Server ready at http://127.0.0.1:{args.port}/loras")
|
||||
return 0
|
||||
else:
|
||||
print(f"Timeout waiting for server")
|
||||
return 1
|
||||
|
||||
print(f"Server starting at http://127.0.0.1:{args.port}/loras")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
61
.agents/skills/lora-manager-e2e/scripts/wait_for_server.py
Executable file
61
.agents/skills/lora-manager-e2e/scripts/wait_for_server.py
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Wait for LoRa Manager server to become ready.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def is_server_ready(port: int, timeout: float = 0.5) -> bool:
|
||||
"""Check if server is accepting connections."""
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", port), timeout=timeout):
|
||||
return True
|
||||
except (socket.timeout, ConnectionRefusedError, OSError):
|
||||
return False
|
||||
|
||||
|
||||
def wait_for_server(port: int, timeout: int = 30) -> bool:
|
||||
"""Wait for server to become ready."""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
if is_server_ready(port):
|
||||
return True
|
||||
time.sleep(0.5)
|
||||
return False
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Wait for LoRa Manager server to become ready"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
default=8188,
|
||||
help="Server port (default: 8188)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeout",
|
||||
type=int,
|
||||
default=30,
|
||||
help="Timeout in seconds (default: 30)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"Waiting for server on port {args.port} (timeout: {args.timeout}s)...")
|
||||
|
||||
if wait_for_server(args.port, args.timeout):
|
||||
print(f"Server ready at http://127.0.0.1:{args.port}/loras")
|
||||
return 0
|
||||
else:
|
||||
print(f"Timeout: Server not ready after {args.timeout}s")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -19,3 +19,6 @@ model_cache/
|
||||
vue-widgets/node_modules/
|
||||
vue-widgets/.vite/
|
||||
vue-widgets/dist/
|
||||
|
||||
# Hypothesis test cache
|
||||
.hypothesis/
|
||||
|
||||
181
AGENTS.md
181
AGENTS.md
@@ -25,168 +25,127 @@ pytest tests/test_recipes.py::test_function_name
|
||||
|
||||
# Run backend tests with coverage
|
||||
COVERAGE_FILE=coverage/backend/.coverage pytest \
|
||||
--cov=py \
|
||||
--cov=standalone \
|
||||
--cov=py --cov=standalone \
|
||||
--cov-report=term-missing \
|
||||
--cov-report=html:coverage/backend/html \
|
||||
--cov-report=xml:coverage/backend/coverage.xml \
|
||||
--cov-report=json:coverage/backend/coverage.json
|
||||
--cov-report=xml:coverage/backend/coverage.xml
|
||||
```
|
||||
|
||||
### Frontend Development
|
||||
### Frontend Development (Standalone Web UI)
|
||||
|
||||
```bash
|
||||
# Install frontend dependencies
|
||||
npm install
|
||||
npm test # Run all tests (JS + Vue)
|
||||
npm run test:js # Run JS tests only
|
||||
npm run test:watch # Watch mode
|
||||
npm run test:coverage # Generate coverage report
|
||||
```
|
||||
|
||||
# Run frontend tests
|
||||
npm test
|
||||
### Vue Widget Development
|
||||
|
||||
# Run frontend tests in watch mode
|
||||
npm run test:watch
|
||||
|
||||
# Run frontend tests with coverage
|
||||
npm run test:coverage
|
||||
```bash
|
||||
cd vue-widgets
|
||||
npm install
|
||||
npm run dev # Build in watch mode
|
||||
npm run build # Build production bundle
|
||||
npm run typecheck # Run TypeScript type checking
|
||||
npm test # Run Vue widget tests
|
||||
npm run test:watch # Watch mode
|
||||
npm run test:coverage # Generate coverage report
|
||||
```
|
||||
|
||||
## Python Code Style
|
||||
|
||||
### Imports
|
||||
### Imports & Formatting
|
||||
|
||||
- Use `from __future__ import annotations` for forward references in type hints
|
||||
- Group imports: standard library, third-party, local (separated by blank lines)
|
||||
- Use absolute imports within `py/` package: `from ..services import X`
|
||||
- Mock ComfyUI dependencies in tests using `tests/conftest.py` patterns
|
||||
|
||||
### Formatting & Types
|
||||
|
||||
- PEP 8 with 4-space indentation
|
||||
- Type hints required for function signatures and class attributes
|
||||
- Use `TYPE_CHECKING` guard for type-checking-only imports
|
||||
- Prefer dataclasses for simple data containers
|
||||
- Use `Optional[T]` for nullable types, `Union[T, None]` only when necessary
|
||||
- Use `from __future__ import annotations` for forward references
|
||||
- Group imports: standard library, third-party, local (blank line separated)
|
||||
- Absolute imports within `py/`: `from ..services import X`
|
||||
- PEP 8 with 4-space indentation, type hints required
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Files: `snake_case.py` (e.g., `model_scanner.py`, `lora_service.py`)
|
||||
- Classes: `PascalCase` (e.g., `ModelScanner`, `LoraService`)
|
||||
- Functions/variables: `snake_case` (e.g., `get_instance`, `model_type`)
|
||||
- Constants: `UPPER_SNAKE_CASE` (e.g., `VALID_LORA_TYPES`)
|
||||
- Private members: `_single_underscore` (protected), `__double_underscore` (name-mangled)
|
||||
- Files: `snake_case.py`, Classes: `PascalCase`, Functions/vars: `snake_case`
|
||||
- Constants: `UPPER_SNAKE_CASE`, Private: `_protected`, `__mangled`
|
||||
|
||||
### Error Handling
|
||||
### Error Handling & Async
|
||||
|
||||
- Use `logging.getLogger(__name__)` for module-level loggers
|
||||
- Define custom exceptions in `py/services/errors.py`
|
||||
- Use `asyncio.Lock` for thread-safe singleton patterns
|
||||
- Raise specific exceptions with descriptive messages
|
||||
- Log errors at appropriate levels (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
- Use `logging.getLogger(__name__)`, define custom exceptions in `py/services/errors.py`
|
||||
- `async def` for I/O, `@pytest.mark.asyncio` for async tests
|
||||
- Singleton with `asyncio.Lock`: see `ModelScanner.get_instance()`
|
||||
- Return `aiohttp.web.json_response` or `web.Response`
|
||||
|
||||
### Async Patterns
|
||||
### Testing
|
||||
|
||||
- Use `async def` for I/O-bound operations
|
||||
- Mark async tests with `@pytest.mark.asyncio`
|
||||
- Use `async with` for context managers
|
||||
- Singleton pattern with class-level locks: see `ModelScanner.get_instance()`
|
||||
- Use `aiohttp.web.Response` for HTTP responses
|
||||
- `pytest` with `--import-mode=importlib`
|
||||
- Fixtures in `tests/conftest.py`, use `tmp_path_factory` for isolation
|
||||
- Mark tests needing real paths: `@pytest.mark.no_settings_dir_isolation`
|
||||
- Mock ComfyUI dependencies via conftest patterns
|
||||
|
||||
### Testing Patterns
|
||||
|
||||
- Use `pytest` with `--import-mode=importlib`
|
||||
- Fixtures in `tests/conftest.py` handle ComfyUI mocking
|
||||
- Use `@pytest.mark.no_settings_dir_isolation` for tests needing real paths
|
||||
- Test files: `tests/test_*.py`
|
||||
- Use `tmp_path_factory` for temporary directory isolation
|
||||
|
||||
## JavaScript Code Style
|
||||
## JavaScript/TypeScript Code Style
|
||||
|
||||
### Imports & Modules
|
||||
|
||||
- ES modules with `import`/`export`
|
||||
- Use `import { app } from "../../scripts/app.js"` for ComfyUI integration
|
||||
- Export named functions/classes: `export function foo() {}`
|
||||
- Widget files use `*_widget.js` suffix
|
||||
- ES modules: `import { app } from "../../scripts/app.js"` for ComfyUI
|
||||
- Vue: `import { ref, computed } from 'vue'`, type imports: `import type { Foo }`
|
||||
- Export named functions: `export function foo() {}`
|
||||
|
||||
### Naming & Formatting
|
||||
|
||||
- camelCase for functions, variables, object properties
|
||||
- PascalCase for classes/constructors
|
||||
- Constants: `UPPER_SNAKE_CASE` (e.g., `CONVERTED_TYPE`)
|
||||
- Files: `snake_case.js` or `kebab-case.js`
|
||||
- camelCase for functions/vars/props, PascalCase for classes
|
||||
- Constants: `UPPER_SNAKE_CASE`, Files: `snake_case.js` or `kebab-case.js`
|
||||
- 2-space indentation preferred (follow existing file conventions)
|
||||
- Vue Single File Components: `<script setup lang="ts">` preferred
|
||||
|
||||
### Widget Development
|
||||
|
||||
- Use `app.registerExtension()` to register ComfyUI extensions
|
||||
- Use `node.addDOMWidget(name, type, element, options)` for custom widgets
|
||||
- Event handlers attached via `addEventListener` or widget callbacks
|
||||
- See `web/comfyui/utils.js` for shared utilities
|
||||
- ComfyUI: `app.registerExtension()`, `node.addDOMWidget(name, type, element, options)`
|
||||
- Event handlers via `addEventListener` or widget callbacks
|
||||
- Shared utilities: `web/comfyui/utils.js`
|
||||
|
||||
### Vue Composables Pattern
|
||||
|
||||
- Use composition API: `useXxxState(widget)`, return reactive refs and methods
|
||||
- Guard restoration loops with flag: `let isRestoring = false`
|
||||
- Build config from state: `const buildConfig = (): Config => { ... }`
|
||||
|
||||
## Architecture Patterns
|
||||
|
||||
### Service Layer
|
||||
|
||||
- Use `ServiceRegistry` singleton for dependency injection
|
||||
- Services follow singleton pattern via `get_instance()` class method
|
||||
- `ServiceRegistry` singleton for DI, services use `get_instance()` classmethod
|
||||
- Separate scanners (discovery) from services (business logic)
|
||||
- Handlers in `py/routes/handlers/` implement route logic
|
||||
- Handlers in `py/routes/handlers/` are pure functions with deps as params
|
||||
|
||||
### Model Types
|
||||
### Model Types & Routes
|
||||
|
||||
- BaseModelService is abstract base for LoRA, Checkpoint, Embedding services
|
||||
- ModelScanner provides file discovery and hash-based deduplication
|
||||
- Persistent cache in SQLite via `PersistentModelCache`
|
||||
- Metadata sync from CivitAI/CivArchive via `MetadataSyncService`
|
||||
|
||||
### Routes & Handlers
|
||||
|
||||
- Route registrars organize endpoints by domain: `ModelRouteRegistrar`, etc.
|
||||
- Handlers are pure functions taking dependencies as parameters
|
||||
- Use `WebSocketManager` for real-time progress updates
|
||||
- Return `aiohttp.web.json_response` or `web.Response`
|
||||
- `BaseModelService` base for LoRA, Checkpoint, Embedding
|
||||
- `ModelScanner` for file discovery, hash deduplication
|
||||
- `PersistentModelCache` (SQLite) for persistence
|
||||
- Route registrars: `ModelRouteRegistrar`, endpoints: `/loras/*`, `/checkpoints/*`, `/embeddings/*`
|
||||
- WebSocket via `WebSocketManager` for real-time updates
|
||||
|
||||
### Recipe System
|
||||
|
||||
- Base metadata in `py/recipes/base.py`
|
||||
- Enrichment adds model metadata: `RecipeEnrichmentService`
|
||||
- Parsers for different formats in `py/recipes/parsers/`
|
||||
- Base: `py/recipes/base.py`, Enrichment: `RecipeEnrichmentService`
|
||||
- Parsers: `py/recipes/parsers/`
|
||||
|
||||
## Important Notes
|
||||
|
||||
- Always use English for comments (per copilot-instructions.md)
|
||||
- Dual mode: ComfyUI plugin (uses folder_paths) vs standalone (reads settings.json)
|
||||
- ALWAYS use English for comments (per copilot-instructions.md)
|
||||
- Dual mode: ComfyUI plugin (folder_paths) vs standalone (settings.json)
|
||||
- Detection: `os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1"`
|
||||
- Settings auto-saved in user directory or portable mode
|
||||
- WebSocket broadcasts for real-time updates (downloads, scans)
|
||||
- Symlink handling requires normalized paths
|
||||
- API endpoints follow `/loras/*`, `/checkpoints/*`, `/embeddings/*` patterns
|
||||
- Run `python scripts/sync_translation_keys.py` after UI string updates
|
||||
- Symlinks require normalized paths
|
||||
|
||||
## Frontend UI Architecture
|
||||
|
||||
This project has two distinct UI systems:
|
||||
|
||||
### 1. Standalone Lora Manager Web UI
|
||||
### 1. Standalone Web UI
|
||||
- Location: `./static/` and `./templates/`
|
||||
- Purpose: Full-featured web application for managing LoRA models
|
||||
- Tech stack: Vanilla JS + CSS, served by the standalone server
|
||||
- Development: Uses npm for frontend testing (`npm test`, `npm run test:watch`, etc.)
|
||||
- Tech: Vanilla JS + CSS, served by standalone server
|
||||
- Tests via npm in root directory
|
||||
|
||||
### 2. ComfyUI Custom Node Widgets
|
||||
- Location: `./web/comfyui/`
|
||||
- Purpose: Widgets and UI logic that ComfyUI loads as custom node extensions
|
||||
- Tech stack: Vanilla JS + Vue.js widgets (in `./vue-widgets/` and built to `./web/comfyui/vue-widgets/`)
|
||||
- Widget styling: Primary styles in `./web/comfyui/lm_styles.css` (NOT `./static/css/`)
|
||||
- Development: No npm build step for these widgets (Vue widgets use build system)
|
||||
|
||||
### Widget Development Guidelines
|
||||
- Use `app.registerExtension()` to register ComfyUI extensions (ComfyUI integration layer)
|
||||
- Use `node.addDOMWidget()` for custom DOM widgets
|
||||
- Widget styles should follow the patterns in `./web/comfyui/lm_styles.css`
|
||||
- Selected state: `rgba(66, 153, 225, 0.3)` background, `rgba(66, 153, 225, 0.6)` border
|
||||
- Hover state: `rgba(66, 153, 225, 0.2)` background
|
||||
- Color palette matches the Lora Manager accent color (blue #4299e1)
|
||||
- Use oklch() for color values when possible (defined in `./static/css/base.css`)
|
||||
- Vue widget components are in `./vue-widgets/src/components/` and built to `./web/comfyui/vue-widgets/`
|
||||
- When modifying widget styles, check `./web/comfyui/lm_styles.css` for consistency with other ComfyUI widgets
|
||||
|
||||
- Location: `./web/comfyui/` (Vanilla JS) + `./vue-widgets/` (Vue)
|
||||
- Primary styles: `./web/comfyui/lm_styles.css` (NOT `./static/css/`)
|
||||
- Vue builds to `./web/comfyui/vue-widgets/`, typecheck via `vue-tsc`
|
||||
|
||||
276
CLAUDE.md
276
CLAUDE.md
@@ -8,17 +8,22 @@ ComfyUI LoRA Manager is a comprehensive LoRA management system for ComfyUI that
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Backend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
### Backend
|
||||
|
||||
# Install development dependencies (for testing)
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
# Run standalone server (port 8188 by default)
|
||||
python standalone.py --port 8188
|
||||
|
||||
# Run all backend tests
|
||||
pytest
|
||||
|
||||
# Run specific test file or function
|
||||
pytest tests/test_recipes.py
|
||||
pytest tests/test_recipes.py::test_function_name
|
||||
|
||||
# Run backend tests with coverage
|
||||
COVERAGE_FILE=coverage/backend/.coverage pytest \
|
||||
--cov=py \
|
||||
@@ -27,185 +32,158 @@ COVERAGE_FILE=coverage/backend/.coverage pytest \
|
||||
--cov-report=html:coverage/backend/html \
|
||||
--cov-report=xml:coverage/backend/coverage.xml \
|
||||
--cov-report=json:coverage/backend/coverage.json
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/test_recipes.py
|
||||
```
|
||||
|
||||
### Frontend Development
|
||||
```bash
|
||||
# Install frontend dependencies
|
||||
npm install
|
||||
### Frontend
|
||||
|
||||
# Run frontend tests
|
||||
There are three test suites run by `npm test`: vanilla JS tests (vitest at root) and Vue widget tests (`vue-widgets/` vitest).
|
||||
|
||||
```bash
|
||||
npm install
|
||||
cd vue-widgets && npm install && cd ..
|
||||
|
||||
# Run all frontend tests (JS + Vue)
|
||||
npm test
|
||||
|
||||
# Run frontend tests in watch mode
|
||||
# Run only vanilla JS tests
|
||||
npm run test:js
|
||||
|
||||
# Run only Vue widget tests
|
||||
npm run test:vue
|
||||
|
||||
# Watch mode (JS tests only)
|
||||
npm run test:watch
|
||||
|
||||
# Run frontend tests with coverage
|
||||
# Frontend coverage
|
||||
npm run test:coverage
|
||||
|
||||
# Build Vue widgets (output to web/comfyui/vue-widgets/)
|
||||
cd vue-widgets && npm run build
|
||||
|
||||
# Vue widget dev mode (watch + rebuild)
|
||||
cd vue-widgets && npm run dev
|
||||
|
||||
# Typecheck Vue widgets
|
||||
cd vue-widgets && npm run typecheck
|
||||
```
|
||||
|
||||
### Localization
|
||||
|
||||
```bash
|
||||
# Sync translation keys after UI string updates
|
||||
python scripts/sync_translation_keys.py
|
||||
```
|
||||
|
||||
Locale files are in `locales/` (en, zh-CN, zh-TW, ja, ko, fr, de, es, ru, he).
|
||||
|
||||
## Architecture
|
||||
|
||||
### Backend Structure (Python)
|
||||
### Dual Mode Operation
|
||||
|
||||
**Core Entry Points:**
|
||||
- `__init__.py` - ComfyUI plugin entry point, registers nodes and routes
|
||||
- `standalone.py` - Standalone server that mocks ComfyUI dependencies
|
||||
- `py/lora_manager.py` - Main LoraManager class that registers HTTP routes
|
||||
|
||||
**Service Layer** (`py/services/`):
|
||||
- `ServiceRegistry` - Singleton service registry for dependency management
|
||||
- `ModelServiceFactory` - Factory for creating model services (LoRA, Checkpoint, Embedding)
|
||||
- Scanner services (`lora_scanner.py`, `checkpoint_scanner.py`, `embedding_scanner.py`) - Model file discovery and indexing
|
||||
- `model_scanner.py` - Base scanner with hash-based deduplication and metadata extraction
|
||||
- `persistent_model_cache.py` - SQLite-based cache for model metadata
|
||||
- `metadata_sync_service.py` - Syncs metadata from CivitAI/CivArchive APIs
|
||||
- `civitai_client.py` / `civarchive_client.py` - API clients for external services
|
||||
- `downloader.py` / `download_manager.py` - Model download orchestration
|
||||
- `recipe_scanner.py` - Recipe file management and image association
|
||||
- `settings_manager.py` - Application settings with migration support
|
||||
- `websocket_manager.py` - WebSocket broadcasting for real-time updates
|
||||
- `use_cases/` - Business logic orchestration (auto-organize, bulk refresh, downloads)
|
||||
|
||||
**Routes Layer** (`py/routes/`):
|
||||
- Route registrars organize endpoints by domain (models, recipes, previews, example images, updates)
|
||||
- `handlers/` - Request handlers implementing business logic
|
||||
- Routes use aiohttp and integrate with ComfyUI's PromptServer
|
||||
|
||||
**Recipe System** (`py/recipes/`):
|
||||
- `base.py` - Base recipe metadata structure
|
||||
- `enrichment.py` - Enriches recipes with model metadata
|
||||
- `merger.py` - Merges recipe data from multiple sources
|
||||
- `parsers/` - Parsers for different recipe formats (PNG, JSON, workflow)
|
||||
|
||||
**Custom Nodes** (`py/nodes/`):
|
||||
- `lora_loader.py` - LoRA loader nodes with preset support
|
||||
- `save_image.py` - Enhanced save image with pattern-based filenames
|
||||
- `trigger_word_toggle.py` - Toggle trigger words in prompts
|
||||
- `lora_stacker.py` - Stack multiple LoRAs
|
||||
- `prompt.py` - Prompt node with autocomplete
|
||||
- `wanvideo_lora_select.py` - WanVideo-specific LoRA selection
|
||||
|
||||
**Configuration** (`py/config.py`):
|
||||
- Manages folder paths for models, checkpoints, embeddings
|
||||
- Handles symlink mappings for complex directory structures
|
||||
- Auto-saves paths to settings.json in ComfyUI mode
|
||||
|
||||
### Frontend Structure (JavaScript)
|
||||
|
||||
**ComfyUI Widgets** (`web/comfyui/`):
|
||||
- Vanilla JavaScript ES modules extending ComfyUI's LiteGraph-based UI
|
||||
- `loras_widget.js` - Main LoRA selection widget with preview
|
||||
- `loras_widget_events.js` - Event handling for widget interactions
|
||||
- `autocomplete.js` - Autocomplete for trigger words and embeddings
|
||||
- `preview_tooltip.js` - Preview tooltip for model cards
|
||||
- `top_menu_extension.js` - Adds "Launch LoRA Manager" menu item
|
||||
- `trigger_word_highlight.js` - Syntax highlighting for trigger words
|
||||
- `utils.js` - Shared utilities and API helpers
|
||||
|
||||
**Widget Development:**
|
||||
- Widgets use `app.registerExtension` and `getCustomWidgets` hooks
|
||||
- `node.addDOMWidget(name, type, element, options)` embeds HTML in nodes
|
||||
- See `docs/dom_widget_dev_guide.md` for complete DOMWidget development guide
|
||||
|
||||
**Web Source** (`web-src/`):
|
||||
- Modern frontend components (if migrating from static)
|
||||
- `components/` - Reusable UI components
|
||||
- `styles/` - CSS styling
|
||||
|
||||
### Key Patterns
|
||||
|
||||
**Dual Mode Operation:**
|
||||
- ComfyUI plugin mode: Integrates with ComfyUI's PromptServer, uses folder_paths
|
||||
- Standalone mode: Mocks ComfyUI dependencies via `standalone.py`, reads paths from settings.json
|
||||
The system runs in two modes:
|
||||
- **ComfyUI plugin mode**: Integrates with ComfyUI's PromptServer, uses `folder_paths` for model discovery
|
||||
- **Standalone mode**: `standalone.py` mocks ComfyUI dependencies, reads paths from `settings.json`
|
||||
- Detection: `os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1"`
|
||||
|
||||
**Settings Management:**
|
||||
- Settings stored in user directory (via `platformdirs`) or portable mode (in repo)
|
||||
- Migration system tracks settings schema version
|
||||
- Template in `settings.json.example` defines defaults
|
||||
### Backend (Python)
|
||||
|
||||
**Model Scanning Flow:**
|
||||
1. Scanner walks folder paths, computes file hashes
|
||||
2. Hash-based deduplication prevents duplicate processing
|
||||
3. Metadata extracted from safetensors headers
|
||||
4. Persistent cache stores results in SQLite
|
||||
5. Background sync fetches CivitAI/CivArchive metadata
|
||||
6. WebSocket broadcasts updates to connected clients
|
||||
**Entry points:**
|
||||
- `__init__.py` — ComfyUI plugin entry: registers nodes via `NODE_CLASS_MAPPINGS`, sets `WEB_DIRECTORY`, calls `LoraManager.add_routes()`
|
||||
- `standalone.py` — Standalone server: mocks `folder_paths` and node modules, starts aiohttp server
|
||||
- `py/lora_manager.py` — Main `LoraManager` class that registers all HTTP routes
|
||||
|
||||
**Recipe System:**
|
||||
- Recipes store LoRA combinations with parameters
|
||||
- Supports import from workflow JSON, PNG metadata
|
||||
- Images associated with recipes via sibling file detection
|
||||
- Enrichment adds model metadata for display
|
||||
**Service layer** (`py/services/`):
|
||||
- `ServiceRegistry` singleton for dependency injection; services follow `get_instance()` singleton pattern
|
||||
- `BaseModelService` abstract base → `LoraService`, `CheckpointService`, `EmbeddingService`
|
||||
- `ModelScanner` base → `LoraScanner`, `CheckpointScanner`, `EmbeddingScanner` for file discovery with hash-based deduplication
|
||||
- `PersistentModelCache` — SQLite-based metadata cache
|
||||
- `MetadataSyncService` — Background sync from CivitAI/CivArchive APIs
|
||||
- `SettingsManager` — Settings with schema migration support
|
||||
- `WebSocketManager` — Real-time progress broadcasting
|
||||
- `ModelServiceFactory` — Creates the right service for each model type
|
||||
- Use cases in `py/services/use_cases/` orchestrate complex business logic (auto-organize, bulk refresh, downloads)
|
||||
|
||||
**Frontend-Backend Communication:**
|
||||
- REST API for CRUD operations
|
||||
- WebSocket for real-time progress updates (downloads, scans)
|
||||
- API endpoints follow `/loras/*` pattern
|
||||
**Routes** (`py/routes/`):
|
||||
- Route registrars organize endpoints by domain: `ModelRouteRegistrar`, `RecipeRouteRegistrar`, etc.
|
||||
- Request handlers in `py/routes/handlers/` implement route logic
|
||||
- API endpoints follow `/loras/*`, `/checkpoints/*`, `/embeddings/*` patterns
|
||||
- All routes use aiohttp, return `web.json_response` or `web.Response`
|
||||
|
||||
**Recipe system** (`py/recipes/`):
|
||||
- `base.py` — Recipe metadata structure
|
||||
- `enrichment.py` — Enriches recipes with model metadata
|
||||
- `parsers/` — Parsers for PNG metadata, JSON, and workflow formats
|
||||
|
||||
**Custom nodes** (`py/nodes/`):
|
||||
- Each node class has a `NAME` class attribute used as key in `NODE_CLASS_MAPPINGS`
|
||||
- Standard ComfyUI node pattern: `INPUT_TYPES()` classmethod, `RETURN_TYPES`, `FUNCTION`
|
||||
- All nodes registered in `__init__.py`
|
||||
|
||||
**Configuration** (`py/config.py`):
|
||||
- Manages folder paths for models, handles symlink mappings
|
||||
- Auto-saves paths to settings.json in ComfyUI mode
|
||||
|
||||
### Frontend — Two Distinct UI Systems
|
||||
|
||||
#### 1. Standalone Manager Web UI
|
||||
- **Location:** `static/` (JS/CSS) and `templates/` (HTML)
|
||||
- **Tech:** Vanilla JS + CSS, served by standalone server
|
||||
- **Structure:** `static/js/core.js` (shared), `loras.js`, `checkpoints.js`, `embeddings.js`, `recipes.js`, `statistics.js`
|
||||
- **Tests:** `tests/frontend/**/*.test.js` (vitest + jsdom)
|
||||
|
||||
#### 2. ComfyUI Custom Node Widgets
|
||||
- **Vanilla JS widgets:** `web/comfyui/*.js` — ES modules extending ComfyUI's LiteGraph UI
|
||||
- `loras_widget.js` / `loras_widget_events.js` — Main LoRA selection widget
|
||||
- `autocomplete.js` — Trigger word and embedding autocomplete
|
||||
- `preview_tooltip.js` — Model card preview tooltips
|
||||
- `top_menu_extension.js` — "Launch LoRA Manager" menu item
|
||||
- `utils.js` — Shared utilities and API helpers
|
||||
- Widget styling in `web/comfyui/lm_styles.css` (NOT `static/css/`)
|
||||
- **Vue widgets:** `vue-widgets/src/` → built to `web/comfyui/vue-widgets/`
|
||||
- Vue 3 + TypeScript + PrimeVue + vue-i18n
|
||||
- Vite build with CSS-injected-by-JS plugin
|
||||
- Components: `LoraPoolWidget`, `LoraRandomizerWidget`, `LoraCyclerWidget`, `AutocompleteTextWidget`
|
||||
- Auto-built on ComfyUI startup via `py/vue_widget_builder.py`
|
||||
- Tests: `vue-widgets/tests/**/*.test.ts` (vitest)
|
||||
|
||||
**Widget registration pattern:**
|
||||
- Widgets use `app.registerExtension()` and `getCustomWidgets` hooks
|
||||
- `node.addDOMWidget(name, type, element, options)` embeds HTML in LiteGraph nodes
|
||||
- See `docs/dom_widget_dev_guide.md` for DOMWidget development guide
|
||||
|
||||
## Code Style
|
||||
|
||||
**Python:**
|
||||
- PEP 8 with 4-space indentation
|
||||
- snake_case for files, functions, variables
|
||||
- PascalCase for classes
|
||||
- Type hints preferred
|
||||
- English comments only (per copilot-instructions.md)
|
||||
- PEP 8, 4-space indentation, English comments only
|
||||
- Use `from __future__ import annotations` for forward references
|
||||
- Use `TYPE_CHECKING` guard for type-checking-only imports
|
||||
- Loggers via `logging.getLogger(__name__)`
|
||||
- Custom exceptions in `py/services/errors.py`
|
||||
- Async patterns: `async def` for I/O, `@pytest.mark.asyncio` for async tests
|
||||
- Singleton pattern with class-level `asyncio.Lock` (see `ModelScanner.get_instance()`)
|
||||
|
||||
**JavaScript:**
|
||||
- ES modules with camelCase
|
||||
- Files use `*_widget.js` suffix for ComfyUI widgets
|
||||
- Prefer vanilla JS, avoid framework dependencies
|
||||
- ES modules, camelCase functions/variables, PascalCase classes
|
||||
- Widget files use `*_widget.js` suffix
|
||||
- Prefer vanilla JS for `web/comfyui/` widgets, avoid framework dependencies (except Vue widgets)
|
||||
|
||||
## Testing
|
||||
|
||||
**Backend Tests:**
|
||||
- pytest with `--import-mode=importlib`
|
||||
- Test files: `tests/test_*.py`
|
||||
- Fixtures in `tests/conftest.py`
|
||||
- Mock ComfyUI dependencies using standalone.py patterns
|
||||
- Markers: `@pytest.mark.asyncio` for async tests, `@pytest.mark.no_settings_dir_isolation` for real paths
|
||||
**Backend (pytest):**
|
||||
- Config in `pytest.ini`: `--import-mode=importlib`, testpaths=`tests`
|
||||
- Fixtures in `tests/conftest.py` handle ComfyUI dependency mocking
|
||||
- Markers: `@pytest.mark.asyncio`, `@pytest.mark.no_settings_dir_isolation`
|
||||
- Uses `tmp_path_factory` for directory isolation
|
||||
|
||||
**Frontend Tests:**
|
||||
- Vitest with jsdom environment
|
||||
- Test files: `tests/frontend/**/*.test.js`
|
||||
**Frontend (vitest):**
|
||||
- Vanilla JS tests: `tests/frontend/**/*.test.js` with jsdom
|
||||
- Vue widget tests: `vue-widgets/tests/**/*.test.ts` with jsdom + @vue/test-utils
|
||||
- Setup in `tests/frontend/setup.js`
|
||||
- Coverage via `npm run test:coverage`
|
||||
|
||||
## Important Notes
|
||||
## Key Integration Points
|
||||
|
||||
**Settings Location:**
|
||||
- ComfyUI mode: Auto-saves folder paths to user settings directory
|
||||
- Standalone mode: Use `settings.json` (copy from `settings.json.example`)
|
||||
- Portable mode: Set `"use_portable_settings": true` in settings.json
|
||||
|
||||
**API Integration:**
|
||||
- CivitAI API key required for downloads (add to settings)
|
||||
- CivArchive API used as fallback for deleted models
|
||||
- Metadata archive database available for offline metadata
|
||||
|
||||
**Symlink Handling:**
|
||||
- Config scans symlinks to map virtual paths to physical locations
|
||||
- Preview validation uses normalized preview root paths
|
||||
- Fingerprinting prevents redundant symlink rescans
|
||||
|
||||
**ComfyUI Node Development:**
|
||||
- Nodes defined in `py/nodes/`, registered in `__init__.py`
|
||||
- Frontend widgets in `web/comfyui/`, matched by node type
|
||||
- Use `WEB_DIRECTORY = "./web/comfyui"` convention
|
||||
|
||||
**Recipe Image Association:**
|
||||
- Recipes scan for sibling images in same directory
|
||||
- Supports repair/migration of recipe image paths
|
||||
- See `py/services/recipe_scanner.py` for implementation details
|
||||
- **Settings:** Stored in user directory (via `platformdirs`) or portable mode (`"use_portable_settings": true`)
|
||||
- **CivitAI/CivArchive:** API clients for metadata sync and model downloads; CivitAI API key in settings
|
||||
- **Symlink handling:** Config scans symlinks to map virtual→physical paths; fingerprinting prevents redundant rescans
|
||||
- **WebSocket:** Broadcasts real-time progress for downloads, scans, and metadata sync
|
||||
- **Model scanning flow:** Walk folders → compute hashes → deduplicate → extract safetensors metadata → cache in SQLite → background CivitAI sync → WebSocket broadcast
|
||||
|
||||
17
README.md
17
README.md
@@ -34,6 +34,23 @@ Enhance your Civitai browsing experience with our companion browser extension! S
|
||||
|
||||
## Release Notes
|
||||
|
||||
### v0.9.16
|
||||
* **Duplicate Detection Enhancement** - The model duplicates mode now respects filter configurations, making it easier to find duplicate groups within specific filtered results.
|
||||
* **Tag Logic Toggle** - Added OR/AND toggle for include tags filtering in the filters panel, providing more flexible tag-based model searches.
|
||||
* **Metadata Refresh Skip Paths** - New setting to exclude specific paths from metadata refresh operations. Models under these paths will be skipped when fetching metadata from remote sources.
|
||||
* **Dynamic Trigger Words in Prompt Node** - Prompt node now supports dynamic numbers of trigger word inputs for greater flexibility.
|
||||
* **Early Access Updates** - Model updates now display Early Access information, with a new setting to ignore Early Access updates if desired.
|
||||
* **LM Civitai Extension Integration** - Added integration with the LM Civitai Extension. Clicking the download button in model updates now sends downloads to the extension's download queue for seamless one-click downloads.
|
||||
|
||||
### v0.9.15
|
||||
* **Filter Presets** - Save filter combinations as presets for quick switching and reapplication.
|
||||
* **Bug Fixes** - Fixed various bugs for improved stability.
|
||||
|
||||
### v0.9.14
|
||||
* **LoRA Cycler Node** - Introduced a new LoRA Cycler node that enables iteration through specified LoRAs with support for repeat count and pause iteration functionality. Refer to the new "Lora Cycler" template workflow for concrete example.
|
||||
* **Enhanced Prompt Node with Tag Autocomplete** - Enhanced the Prompt node with comprehensive tag autocomplete based on merged Danbooru + e621 tags. Supports tag search and autocomplete functionality. Implemented a command system with shortcuts like `/char` or `/artist` for category-specific tag searching. Added `/ac` or `/noac` commands to quickly enable or disable autocomplete. Refer to the "Lora Manager Basic" template workflow in ComfyUI -> Templates -> ComfyUI-Lora-Manager for detailed tips.
|
||||
* **Bug Fixes & Stability** - Addressed multiple bugs and improved overall stability.
|
||||
|
||||
### v0.9.12
|
||||
* **LoRA Randomizer System** - Introduced a comprehensive LoRA randomization system featuring LoRA Pool and LoRA Randomizer nodes for flexible and dynamic generation workflows.
|
||||
* **LoRA Randomizer Template** - Refer to the new "LoRA Randomizer" template workflow for detailed examples of flexible randomization modes, lock & reuse options, and other features.
|
||||
|
||||
70
__init__.py
70
__init__.py
@@ -1,15 +1,17 @@
|
||||
try: # pragma: no cover - import fallback for pytest collection
|
||||
from .py.lora_manager import LoraManager
|
||||
from .py.nodes.lora_loader import LoraManagerLoader, LoraManagerTextLoader
|
||||
from .py.nodes.trigger_word_toggle import TriggerWordToggle
|
||||
from .py.nodes.prompt import PromptLoraManager
|
||||
from .py.nodes.lora_stacker import LoraStacker
|
||||
from .py.nodes.lora_loader import LoraLoaderLM, LoraTextLoaderLM
|
||||
from .py.nodes.trigger_word_toggle import TriggerWordToggleLM
|
||||
from .py.nodes.prompt import PromptLM
|
||||
from .py.nodes.text import TextLM
|
||||
from .py.nodes.lora_stacker import LoraStackerLM
|
||||
from .py.nodes.save_image import SaveImageLM
|
||||
from .py.nodes.debug_metadata import DebugMetadata
|
||||
from .py.nodes.debug_metadata import DebugMetadataLM
|
||||
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelectLM
|
||||
from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraSelectFromText
|
||||
from .py.nodes.lora_pool import LoraPoolNode
|
||||
from .py.nodes.lora_randomizer import LoraRandomizerNode
|
||||
from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraTextSelectLM
|
||||
from .py.nodes.lora_pool import LoraPoolLM
|
||||
from .py.nodes.lora_randomizer import LoraRandomizerLM
|
||||
from .py.nodes.lora_cycler import LoraCyclerLM
|
||||
from .py.metadata_collector import init as init_metadata_collector
|
||||
except (
|
||||
ImportError
|
||||
@@ -22,44 +24,50 @@ except (
|
||||
if str(package_root) not in sys.path:
|
||||
sys.path.append(str(package_root))
|
||||
|
||||
PromptLoraManager = importlib.import_module("py.nodes.prompt").PromptLoraManager
|
||||
PromptLM = importlib.import_module("py.nodes.prompt").PromptLM
|
||||
TextLM = importlib.import_module("py.nodes.text").TextLM
|
||||
LoraManager = importlib.import_module("py.lora_manager").LoraManager
|
||||
LoraManagerLoader = importlib.import_module(
|
||||
LoraLoaderLM = importlib.import_module(
|
||||
"py.nodes.lora_loader"
|
||||
).LoraManagerLoader
|
||||
LoraManagerTextLoader = importlib.import_module(
|
||||
).LoraLoaderLM
|
||||
LoraTextLoaderLM = importlib.import_module(
|
||||
"py.nodes.lora_loader"
|
||||
).LoraManagerTextLoader
|
||||
TriggerWordToggle = importlib.import_module(
|
||||
).LoraTextLoaderLM
|
||||
TriggerWordToggleLM = importlib.import_module(
|
||||
"py.nodes.trigger_word_toggle"
|
||||
).TriggerWordToggle
|
||||
LoraStacker = importlib.import_module("py.nodes.lora_stacker").LoraStacker
|
||||
).TriggerWordToggleLM
|
||||
LoraStackerLM = importlib.import_module("py.nodes.lora_stacker").LoraStackerLM
|
||||
SaveImageLM = importlib.import_module("py.nodes.save_image").SaveImageLM
|
||||
DebugMetadata = importlib.import_module("py.nodes.debug_metadata").DebugMetadata
|
||||
DebugMetadataLM = importlib.import_module("py.nodes.debug_metadata").DebugMetadataLM
|
||||
WanVideoLoraSelectLM = importlib.import_module(
|
||||
"py.nodes.wanvideo_lora_select"
|
||||
).WanVideoLoraSelectLM
|
||||
WanVideoLoraSelectFromText = importlib.import_module(
|
||||
WanVideoLoraTextSelectLM = importlib.import_module(
|
||||
"py.nodes.wanvideo_lora_select_from_text"
|
||||
).WanVideoLoraSelectFromText
|
||||
LoraPoolNode = importlib.import_module("py.nodes.lora_pool").LoraPoolNode
|
||||
LoraRandomizerNode = importlib.import_module(
|
||||
).WanVideoLoraTextSelectLM
|
||||
LoraPoolLM = importlib.import_module("py.nodes.lora_pool").LoraPoolLM
|
||||
LoraRandomizerLM = importlib.import_module(
|
||||
"py.nodes.lora_randomizer"
|
||||
).LoraRandomizerNode
|
||||
).LoraRandomizerLM
|
||||
LoraCyclerLM = importlib.import_module(
|
||||
"py.nodes.lora_cycler"
|
||||
).LoraCyclerLM
|
||||
init_metadata_collector = importlib.import_module("py.metadata_collector").init
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
PromptLoraManager.NAME: PromptLoraManager,
|
||||
LoraManagerLoader.NAME: LoraManagerLoader,
|
||||
LoraManagerTextLoader.NAME: LoraManagerTextLoader,
|
||||
TriggerWordToggle.NAME: TriggerWordToggle,
|
||||
LoraStacker.NAME: LoraStacker,
|
||||
PromptLM.NAME: PromptLM,
|
||||
TextLM.NAME: TextLM,
|
||||
LoraLoaderLM.NAME: LoraLoaderLM,
|
||||
LoraTextLoaderLM.NAME: LoraTextLoaderLM,
|
||||
TriggerWordToggleLM.NAME: TriggerWordToggleLM,
|
||||
LoraStackerLM.NAME: LoraStackerLM,
|
||||
SaveImageLM.NAME: SaveImageLM,
|
||||
DebugMetadata.NAME: DebugMetadata,
|
||||
DebugMetadataLM.NAME: DebugMetadataLM,
|
||||
WanVideoLoraSelectLM.NAME: WanVideoLoraSelectLM,
|
||||
WanVideoLoraSelectFromText.NAME: WanVideoLoraSelectFromText,
|
||||
LoraPoolNode.NAME: LoraPoolNode,
|
||||
LoraRandomizerNode.NAME: LoraRandomizerNode,
|
||||
WanVideoLoraTextSelectLM.NAME: WanVideoLoraTextSelectLM,
|
||||
LoraPoolLM.NAME: LoraPoolLM,
|
||||
LoraRandomizerLM.NAME: LoraRandomizerLM,
|
||||
LoraCyclerLM.NAME: LoraCyclerLM,
|
||||
}
|
||||
|
||||
WEB_DIRECTORY = "./web/comfyui"
|
||||
|
||||
@@ -1,31 +1,27 @@
|
||||
## Overview
|
||||
|
||||
The **LoRA Manager Civitai Extension** is a Browser extension designed to work seamlessly with [LoRA Manager](https://github.com/willmiao/ComfyUI-Lora-Manager) to significantly enhance your browsing experience on [Civitai](https://civitai.com).
|
||||
It also supports browsing on [CivArchive](https://civarchive.com/) (formerly CivitaiArchive).
|
||||
|
||||
With this extension, you can:
|
||||
The **LoRA Manager Civitai Extension** is a Browser extension designed to work seamlessly with [LoRA Manager](https://github.com/willmiao/ComfyUI-Lora-Manager) to significantly enhance your browsing experience on [Civitai](https://civitai.com). With this extension, you can:
|
||||
|
||||
✅ Instantly see which models are already present in your local library
|
||||
✅ Download new models with a single click
|
||||
✅ Manage downloads efficiently with queue and parallel download support
|
||||
✅ Keep your downloaded models automatically organized according to your custom settings
|
||||
|
||||

|
||||

|
||||
|
||||
**Update:** It now also supports browsing on [CivArchive](https://civarchive.com/) (formerly CivitaiArchive).
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Why Are All Features for Supporters Only?
|
||||
## Why Supporter Access?
|
||||
|
||||
I love building tools for the Stable Diffusion and ComfyUI communities, and LoRA Manager is a passion project that I've poured countless hours into. When I created this companion extension, my hope was to offer its core features for free, as a thank-you to all of you.
|
||||
LoRA Manager is built with love for the Stable Diffusion and ComfyUI communities. Your support makes it possible for me to keep improving and maintaining the tool full-time.
|
||||
|
||||
Unfortunately, I've reached a point where I need to be realistic. The level of support from the free model has been far lower than what's needed to justify the continuous development and maintenance for both projects. It was a difficult decision, but I've chosen to make the extension's features exclusive to supporters.
|
||||
Supporter-exclusive features help ensure the long-term sustainability of LoRA Manager, allowing continuous updates, new features, and better performance for everyone.
|
||||
|
||||
This change is crucial for me to be able to continue dedicating my time to improving the free and open-source LoRA Manager, which I'm committed to keeping available for everyone.
|
||||
|
||||
Your support does more than just unlock a few features—it allows me to keep innovating and ensures the core LoRA Manager project thrives. I'm incredibly grateful for your understanding and any support you can offer. ❤️
|
||||
|
||||
(_For those who previously supported me on Ko-fi with a one-time donation, I'll be sending out license keys individually as a thank-you._)
|
||||
Every contribution directly fuels development and keeps the core LoRA Manager free and open-source. In addition to monthly supporters, one-time donation supporters will also receive a license key, with the duration scaling according to the contribution amount. Thank you for helping keep this project alive and growing. ❤️
|
||||
|
||||
|
||||
---
|
||||
@@ -90,20 +86,27 @@ Clicking the download button adds the corresponding model version to the downloa
|
||||
|
||||
On a specific model page, visual indicators also appear on version buttons, showing which versions are already in your local library.
|
||||
|
||||
When switching to a specific version by clicking a version button:
|
||||
**Starting from v0.4.8**, model pages use a dedicated download button for better compatibility. When switching to a specific version by clicking a version button:
|
||||
|
||||
- Clicking the download button will open a dropdown:
|
||||
- Download via **LoRA Manager**
|
||||
- Download via **Original Download** (browser download)
|
||||
|
||||
You can check **Remember my choice** to set your preferred default. You can change this setting anytime in the extension's settings.
|
||||
- The new **dedicated download button** directly triggers download via **LoRA Manager**
|
||||
- The **original download button** remains unchanged for standard browser downloads
|
||||
|
||||

|
||||
|
||||
### Resources on Image Pages (2025-08-05) — now shows in-library indicators for image resources. ‘Import image as recipe’ coming soon!
|
||||
### Hide Models Already in Library (Beta)
|
||||
|
||||
**New in v0.4.8**: A new **Hide models already in library (Beta)** option makes it easier to focus on models you haven't added yet. It can be enabled from Settings, or toggled quickly using **Ctrl + Shift + H** (macOS: **Command + Shift + H**).
|
||||
|
||||
### Resources on Image Pages — now shows in-library indicators for image resources plus one-click recipe import
|
||||
|
||||
- **One-Click Import Civitai Image as Recipe** — Import any Civitai image as a recipe with a single click in the Resources Used panel.
|
||||
- **Auto-Queue Missing Assets** — In Settings you can decide if LoRAs or checkpoints referenced by that image should automatically be added to your download queue.
|
||||
- **More Accurate Metadata** — Importing directly from the page is faster than copying inside LM and keeps on-site tags and other metadata perfectly aligned.
|
||||
|
||||

|
||||
|
||||
[](https://github.com/user-attachments/assets/41fd4240-c949-4f83-bde7-8f3124c09494)
|
||||
|
||||
---
|
||||
|
||||
## Model Download Location & LoRA Manager Settings
|
||||
@@ -170,11 +173,11 @@ _Thanks to user **Temikus** for sharing this solution!_
|
||||
The extension will evolve alongside **LoRA Manager** improvements. Planned features include:
|
||||
|
||||
- [x] Support for **additional model types** (e.g., embeddings)
|
||||
- [ ] One-click **Recipe Import**
|
||||
- [x] Display of in-library status for all resources in the **Resources Used** section of the image page
|
||||
- [x] One-click **Recipe Import**
|
||||
- [x] Display of in-library status for all resources in the **Resources Used** section of the image page
|
||||
- [x] One-click **Auto-organize Models**
|
||||
- [x] **Hide models already in library (Beta)** - Focus on models you haven't added yet
|
||||
|
||||
**Stay tuned — and thank you for your support!**
|
||||
|
||||
---
|
||||
|
||||
|
||||
28
docs/dom-widgets/README.md
Normal file
28
docs/dom-widgets/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# DOM Widgets Documentation
|
||||
|
||||
Documentation for custom DOM widget development in ComfyUI LoRA Manager.
|
||||
|
||||
## Files
|
||||
|
||||
- **[Value Persistence Best Practices](value-persistence-best-practices.md)** - Essential guide for implementing text input DOM widgets that persist values correctly
|
||||
|
||||
## Key Lessons
|
||||
|
||||
### Common Anti-Patterns
|
||||
|
||||
❌ **Don't**: Create internal state variables
|
||||
❌ **Don't**: Use v-model for text inputs
|
||||
❌ **Don't**: Add serializeValue, onSetValue callbacks
|
||||
❌ **Don't**: Watch props.widget.value
|
||||
|
||||
### Best Practices
|
||||
|
||||
✅ **Do**: Use DOM element as single source of truth
|
||||
✅ **Do**: Store DOM reference on widget.inputEl
|
||||
✅ **Do**: Direct getValue/setValue to DOM
|
||||
✅ **Do**: Clean up reference on unmount
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [DOM Widget Development Guide](../dom_widget_dev_guide.md) - Comprehensive guide for building DOM widgets
|
||||
- [ComfyUI Built-in Example](../../../../code/ComfyUI_frontend/src/renderer/extensions/vueNodes/widgets/composables/useStringWidget.ts) - Reference implementation
|
||||
225
docs/dom-widgets/value-persistence-best-practices.md
Normal file
225
docs/dom-widgets/value-persistence-best-practices.md
Normal file
@@ -0,0 +1,225 @@
|
||||
# DOM Widget Value Persistence - Best Practices
|
||||
|
||||
## Overview
|
||||
|
||||
DOM widgets require different persistence patterns depending on their complexity. This document covers two patterns:
|
||||
|
||||
1. **Simple Text Widgets**: DOM element as source of truth (e.g., textarea, input)
|
||||
2. **Complex Widgets**: Internal value with `widget.callback` (e.g., LoraPoolWidget, RandomizerWidget)
|
||||
|
||||
## Understanding ComfyUI's Built-in Callback Mechanism
|
||||
|
||||
When `widget.value` is set (e.g., during workflow load), ComfyUI's `domWidget.ts` triggers this flow:
|
||||
|
||||
```typescript
|
||||
// From ComfyUI_frontend/src/scripts/domWidget.ts:146-149
|
||||
set value(v: V) {
|
||||
this.options.setValue?.(v) // 1. Update internal state
|
||||
this.callback?.(this.value) // 2. Notify listeners for UI updates
|
||||
}
|
||||
```
|
||||
|
||||
This means:
|
||||
- `setValue()` handles storing the value
|
||||
- `widget.callback()` is automatically called to notify the UI
|
||||
- You don't need custom callback mechanisms like `onSetValue`
|
||||
|
||||
---
|
||||
|
||||
## Pattern 1: Simple Text Input Widgets
|
||||
|
||||
For widgets where the value IS the DOM element's text content (textarea, input fields).
|
||||
|
||||
### When to Use
|
||||
|
||||
- Single text input/textarea widgets
|
||||
- Value is a simple string
|
||||
- No complex state management needed
|
||||
|
||||
### Implementation
|
||||
|
||||
**main.ts:**
|
||||
```typescript
|
||||
const widget = node.addDOMWidget(name, type, container, {
|
||||
getValue() {
|
||||
return widget.inputEl?.value ?? ''
|
||||
},
|
||||
setValue(v: string) {
|
||||
if (widget.inputEl) {
|
||||
widget.inputEl.value = v ?? ''
|
||||
}
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
**Vue Component:**
|
||||
```typescript
|
||||
onMounted(() => {
|
||||
if (textareaRef.value) {
|
||||
props.widget.inputEl = textareaRef.value
|
||||
}
|
||||
})
|
||||
|
||||
onUnmounted(() => {
|
||||
if (props.widget.inputEl === textareaRef.value) {
|
||||
props.widget.inputEl = undefined
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
### Why This Works
|
||||
|
||||
- Single source of truth: the DOM element
|
||||
- `getValue()` reads directly from DOM
|
||||
- `setValue()` writes directly to DOM
|
||||
- No sync issues between multiple state variables
|
||||
|
||||
---
|
||||
|
||||
## Pattern 2: Complex Widgets
|
||||
|
||||
For widgets with structured data (JSON configs, arrays, objects) where the value cannot be stored in a DOM element.
|
||||
|
||||
### When to Use
|
||||
|
||||
- Value is a complex object/array (e.g., `{ loras: [...], settings: {...} }`)
|
||||
- Multiple UI elements contribute to the value
|
||||
- Vue reactive state manages the UI
|
||||
|
||||
### Implementation
|
||||
|
||||
**main.ts:**
|
||||
```typescript
|
||||
let internalValue: MyConfig | undefined
|
||||
|
||||
const widget = node.addDOMWidget(name, type, container, {
|
||||
getValue() {
|
||||
return internalValue
|
||||
},
|
||||
setValue(v: MyConfig) {
|
||||
internalValue = v
|
||||
// NO custom onSetValue needed - widget.callback is called automatically
|
||||
},
|
||||
serialize: true // Ensure value is saved with workflow
|
||||
})
|
||||
```
|
||||
|
||||
**Vue Component:**
|
||||
```typescript
|
||||
const config = ref<MyConfig>(getDefaultConfig())
|
||||
|
||||
onMounted(() => {
|
||||
// Set up callback for UI updates when widget.value changes externally
|
||||
// (e.g., workflow load, undo/redo)
|
||||
props.widget.callback = (newValue: MyConfig) => {
|
||||
if (newValue) {
|
||||
config.value = newValue
|
||||
}
|
||||
}
|
||||
|
||||
// Restore initial value if workflow was already loaded
|
||||
if (props.widget.value) {
|
||||
config.value = props.widget.value
|
||||
}
|
||||
})
|
||||
|
||||
// When UI changes, update widget value
|
||||
function onConfigChange(newConfig: MyConfig) {
|
||||
config.value = newConfig
|
||||
props.widget.value = newConfig // This also triggers callback
|
||||
}
|
||||
```
|
||||
|
||||
### Why This Works
|
||||
|
||||
1. **Clear separation**: `internalValue` stores the data, Vue ref manages the UI
|
||||
2. **Built-in callback**: ComfyUI calls `widget.callback()` automatically after `setValue()`
|
||||
3. **Bidirectional sync**:
|
||||
- External → UI: `setValue()` updates `internalValue`, `callback()` updates Vue ref
|
||||
- UI → External: User interaction updates Vue ref, which updates `widget.value`
|
||||
|
||||
---
|
||||
|
||||
## Common Mistakes
|
||||
|
||||
### ❌ Creating custom callback mechanisms
|
||||
|
||||
```typescript
|
||||
// Wrong - unnecessary complexity
|
||||
setValue(v: MyConfig) {
|
||||
internalValue = v
|
||||
widget.onSetValue?.(v) // Don't add this - use widget.callback instead
|
||||
}
|
||||
```
|
||||
|
||||
Use the built-in `widget.callback` instead.
|
||||
|
||||
### ❌ Using v-model for simple text inputs in DOM widgets
|
||||
|
||||
```html
|
||||
<!-- Wrong - creates sync issues -->
|
||||
<textarea v-model="textValue" />
|
||||
|
||||
<!-- Right for simple text widgets -->
|
||||
<textarea ref="textareaRef" @input="onInput" />
|
||||
```
|
||||
|
||||
### ❌ Watching props.widget.value
|
||||
|
||||
```typescript
|
||||
// Wrong - creates race conditions
|
||||
watch(() => props.widget.value, (newValue) => {
|
||||
config.value = newValue
|
||||
})
|
||||
```
|
||||
|
||||
Use `widget.callback` instead - it's called at the right time in the lifecycle.
|
||||
|
||||
### ❌ Multiple sources of truth
|
||||
|
||||
```typescript
|
||||
// Wrong - who is the source of truth?
|
||||
let internalValue = '' // State 1
|
||||
const textValue = ref('') // State 2
|
||||
const domElement = textarea // State 3
|
||||
props.widget.value // State 4
|
||||
```
|
||||
|
||||
Choose ONE source of truth:
|
||||
- **Simple widgets**: DOM element
|
||||
- **Complex widgets**: `internalValue` (with Vue ref as derived UI state)
|
||||
|
||||
### ❌ Adding serializeValue for simple widgets
|
||||
|
||||
```typescript
|
||||
// Wrong - getValue/setValue handle serialization
|
||||
props.widget.serializeValue = async () => textValue.value
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Decision Guide
|
||||
|
||||
| Widget Type | Source of Truth | Use `widget.callback` | Example |
|
||||
|-------------|-----------------|----------------------|---------|
|
||||
| Simple text input | DOM element (`inputEl`) | Optional | AutocompleteTextWidget |
|
||||
| Complex config | `internalValue` | Yes, for UI sync | LoraPoolWidget |
|
||||
| Vue component widget | Vue ref + `internalValue` | Yes | RandomizerWidget |
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist
|
||||
|
||||
- [ ] Load workflow - value restores correctly
|
||||
- [ ] Switch workflow - value persists
|
||||
- [ ] Reload page - value persists
|
||||
- [ ] UI interaction - value updates
|
||||
- [ ] Undo/redo - value syncs with UI
|
||||
- [ ] No console errors
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- ComfyUI DOMWidget implementation: `ComfyUI_frontend/src/scripts/domWidget.ts`
|
||||
- Simple text widget example: `ComfyUI_frontend/src/renderer/extensions/vueNodes/widgets/composables/useStringWidget.ts`
|
||||
@@ -240,6 +240,8 @@ inputEl.addEventListener("change", () => {
|
||||
});
|
||||
```
|
||||
|
||||
> **⚠️ Important**: For Vue-based DOM widgets with text inputs, follow the [Value Persistence Best Practices](dom-widgets/value-persistence-best-practices.md) to avoid sync issues. Key takeaway: use DOM element as single source of truth, avoid internal state variables and v-model.
|
||||
|
||||
### 5.3 The Restoration Mechanism (`configure`)
|
||||
|
||||
* **`configure(data)`**: When a Workflow is loaded, `LGraphNode` calls its `configure(data)` method.
|
||||
|
||||
69
docs/reference/danbooru_e621_categories.md
Normal file
69
docs/reference/danbooru_e621_categories.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Danbooru/E621 Tag Categories Reference
|
||||
|
||||
Reference for category values used in `danbooru_e621_merged.csv` tag files.
|
||||
|
||||
## Category Value Mapping
|
||||
|
||||
### Danbooru Categories
|
||||
|
||||
| Value | Description |
|
||||
|-------|-------------|
|
||||
| 0 | General |
|
||||
| 1 | Artist |
|
||||
| 2 | *(unused)* |
|
||||
| 3 | Copyright |
|
||||
| 4 | Character |
|
||||
| 5 | Meta |
|
||||
|
||||
### e621 Categories
|
||||
|
||||
| Value | Description |
|
||||
|-------|-------------|
|
||||
| 6 | *(unused)* |
|
||||
| 7 | General |
|
||||
| 8 | Artist |
|
||||
| 9 | Contributor |
|
||||
| 10 | Copyright |
|
||||
| 11 | Character |
|
||||
| 12 | Species |
|
||||
| 13 | *(unused)* |
|
||||
| 14 | Meta |
|
||||
| 15 | Lore |
|
||||
|
||||
## Danbooru Category Colors
|
||||
|
||||
| Description | Normal Color | Hover Color |
|
||||
|-------------|--------------|-------------|
|
||||
| General | #009be6 | #4bb4ff |
|
||||
| Artist | #ff8a8b | #ffc3c3 |
|
||||
| Copyright | #c797ff | #ddc9fb |
|
||||
| Character | #35c64a | #93e49a |
|
||||
| Meta | #ead084 | #f7e7c3 |
|
||||
|
||||
## CSV Column Structure
|
||||
|
||||
Each row in the merged CSV file contains 4 columns:
|
||||
|
||||
| Column | Description | Example |
|
||||
|--------|-------------|---------|
|
||||
| 1 | Tag name | `1girl`, `highres`, `solo` |
|
||||
| 2 | Category value (0-15) | `0`, `5`, `7` |
|
||||
| 3 | Post count | `6008644`, `5256195` |
|
||||
| 4 | Aliases (comma-separated, quoted) | `"1girls,sole_female"`, empty string |
|
||||
|
||||
### Sample Data
|
||||
|
||||
```
|
||||
1girl,0,6008644,"1girls,sole_female"
|
||||
highres,5,5256195,"high_res,high_resolution,hires"
|
||||
solo,0,5000954,"alone,female_solo,single,solo_female"
|
||||
long_hair,0,4350743,"/lh,longhair"
|
||||
mammal,12,3437444,"cetancodont,cetancodontamorph,feralmammal"
|
||||
anthro,7,3381927,"adult_anthro,anhtro,antho,anthro_horse"
|
||||
skirt,0,1557883,
|
||||
```
|
||||
|
||||
## Source
|
||||
|
||||
- [PR #312: Add danbooru_e621_merged.csv](https://github.com/DominikDoom/a1111-sd-webui-tagcomplete/pull/312)
|
||||
- [DraconicDragon/dbr-e621-lists-archive](https://github.com/DraconicDragon/dbr-e621-lists-archive)
|
||||
191
docs/technical/model_type_refactoring_todo.md
Normal file
191
docs/technical/model_type_refactoring_todo.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# Model Type 字段重构 - 遗留工作清单
|
||||
|
||||
> **状态**: Phase 1-4 已完成 | **创建日期**: 2026-01-30
|
||||
> **相关文件**: `py/utils/models.py`, `py/services/model_query.py`, `py/services/checkpoint_scanner.py`, etc.
|
||||
|
||||
---
|
||||
|
||||
## 概述
|
||||
|
||||
本次重构旨在解决 `model_type` 字段语义不统一的问题。系统中有两个层面的"类型"概念:
|
||||
|
||||
1. **Scanner Type** (`scanner_type`): 架构层面的大类 - `lora`, `checkpoint`, `embedding`
|
||||
2. **Sub Type** (`sub_type`): 业务层面的细分类型 - `lora`/`locon`/`dora`, `checkpoint`/`diffusion_model`, `embedding`
|
||||
|
||||
重构目标是统一使用 `sub_type` 表示细分类型,保留 `model_type` 作为向后兼容的别名。
|
||||
|
||||
---
|
||||
|
||||
## 已完成工作 ✅
|
||||
|
||||
### Phase 1: 后端字段重命名
|
||||
- [x] `CheckpointMetadata.model_type` → `sub_type`
|
||||
- [x] `EmbeddingMetadata.model_type` → `sub_type`
|
||||
- [x] `model_scanner.py` `_build_cache_entry()` 同时处理 `sub_type` 和 `model_type`
|
||||
|
||||
### Phase 2: 查询逻辑更新
|
||||
- [x] `model_query.py` 新增 `resolve_sub_type()` 和 `normalize_sub_type()`
|
||||
- [x] ~~保持向后兼容的别名 `resolve_civitai_model_type`, `normalize_civitai_model_type`~~ (已在 Phase 5 移除)
|
||||
- [x] `ModelFilterSet.apply()` 更新为使用新的解析函数
|
||||
|
||||
### Phase 3: API 响应更新
|
||||
- [x] `LoraService.format_response()` 返回 `sub_type` ~~+ `model_type`~~ (已移除 `model_type`)
|
||||
- [x] `CheckpointService.format_response()` 返回 `sub_type` ~~+ `model_type`~~ (已移除 `model_type`)
|
||||
- [x] `EmbeddingService.format_response()` 返回 `sub_type` ~~+ `model_type`~~ (已移除 `model_type`)
|
||||
|
||||
### Phase 4: 前端更新
|
||||
- [x] `constants.js` 新增 `MODEL_SUBTYPE_DISPLAY_NAMES`
|
||||
- [x] `MODEL_TYPE_DISPLAY_NAMES` 作为别名保留
|
||||
|
||||
### Phase 5: 清理废弃代码 ✅
|
||||
- [x] 从 `ModelScanner._build_cache_entry()` 中移除 `model_type` 向后兼容代码
|
||||
- [x] 从 `CheckpointScanner` 中移除 `model_type` 兼容处理
|
||||
- [x] 从 `model_query.py` 中移除 `resolve_civitai_model_type` 和 `normalize_civitai_model_type` 别名
|
||||
- [x] 更新前端 `FilterManager.js` 使用 `sub_type` (已在使用 `MODEL_SUBTYPE_DISPLAY_NAMES`)
|
||||
- [x] 更新所有相关测试
|
||||
|
||||
---
|
||||
|
||||
## 遗留工作 ⏳
|
||||
|
||||
### Phase 5: 清理废弃代码 ✅ **已完成**
|
||||
|
||||
所有 Phase 5 的清理工作已完成:
|
||||
|
||||
#### 5.1 移除 `model_type` 字段的向后兼容代码 ✅
|
||||
- 从 `ModelScanner._build_cache_entry()` 中移除了 `model_type` 的设置
|
||||
- 现在只设置 `sub_type` 字段
|
||||
|
||||
#### 5.2 移除 CheckpointScanner 的 model_type 兼容处理 ✅
|
||||
- `adjust_metadata()` 现在只处理 `sub_type`
|
||||
- `adjust_cached_entry()` 现在只设置 `sub_type`
|
||||
|
||||
#### 5.3 移除 model_query 中的向后兼容别名 ✅
|
||||
- 移除了 `resolve_civitai_model_type = resolve_sub_type`
|
||||
- 移除了 `normalize_civitai_model_type = normalize_sub_type`
|
||||
|
||||
#### 5.4 前端清理 ✅
|
||||
- `FilterManager.js` 已经在使用 `MODEL_SUBTYPE_DISPLAY_NAMES` (通过别名 `MODEL_TYPE_DISPLAY_NAMES`)
|
||||
- API list endpoint 现在只返回 `sub_type`,不再返回 `model_type`
|
||||
- `ModelCard.js` 现在设置 `card.dataset.sub_type` (所有模型类型通用)
|
||||
- `CheckpointContextMenu.js` 现在读取 `card.dataset.sub_type`
|
||||
- `MoveManager.js` 现在处理 `cache_entry.sub_type`
|
||||
- `RecipeModal.js` 现在读取 `checkpoint.sub_type`
|
||||
|
||||
---
|
||||
|
||||
## 数据库迁移评估
|
||||
|
||||
### 当前状态
|
||||
- `persistent_model_cache.py` 使用 `civitai_model_type` 列存储 CivitAI 原始类型
|
||||
- 缓存 entry 中的 `sub_type` 在运行期动态计算
|
||||
- 数据库 schema **无需立即修改**
|
||||
|
||||
### 未来可选优化
|
||||
```sql
|
||||
-- 可选:在 models 表中添加 sub_type 列(与 civitai_model_type 保持一致但语义更清晰)
|
||||
ALTER TABLE models ADD COLUMN sub_type TEXT;
|
||||
|
||||
-- 数据迁移
|
||||
UPDATE models SET sub_type = civitai_model_type WHERE sub_type IS NULL;
|
||||
```
|
||||
|
||||
**建议**: 如果决定添加 `sub_type` 列,应与 Phase 5 一起进行。
|
||||
|
||||
---
|
||||
|
||||
## 测试覆盖率
|
||||
|
||||
### 新增/更新测试文件(已全部通过 ✅)
|
||||
|
||||
| 测试文件 | 数量 | 覆盖内容 |
|
||||
|---------|------|---------|
|
||||
| `tests/utils/test_models_sub_type.py` | 7 | Metadata sub_type 字段 |
|
||||
| `tests/services/test_model_query_sub_type.py` | 19 | sub_type 解析和过滤 |
|
||||
| `tests/services/test_checkpoint_scanner_sub_type.py` | 6 | CheckpointScanner sub_type |
|
||||
| `tests/services/test_service_format_response_sub_type.py` | 6 | API 响应 sub_type 包含 |
|
||||
| `tests/services/test_checkpoint_scanner.py` | 1 | Checkpoint 缓存 sub_type |
|
||||
| `tests/services/test_model_scanner.py` | 1 | adjust_cached_entry hook |
|
||||
| `tests/services/test_download_manager.py` | 1 | Checkpoint 下载 sub_type |
|
||||
|
||||
### 需要补充的测试(可选)
|
||||
|
||||
- [ ] 集成测试:验证前端过滤使用 sub_type 字段
|
||||
- [ ] 数据库迁移测试(如果执行可选优化)
|
||||
- [ ] 性能测试:确认 resolve_sub_type 的优先级查找没有显著性能影响
|
||||
|
||||
---
|
||||
|
||||
## 兼容性检查清单
|
||||
|
||||
### 已完成 ✅
|
||||
|
||||
- [x] 前端代码已全部改用 `sub_type` 字段
|
||||
- [x] API list endpoint 已移除 `model_type`,只返回 `sub_type`
|
||||
- [x] 后端 cache entry 已移除 `model_type`,只保留 `sub_type`
|
||||
- [x] 所有测试已更新通过
|
||||
- [x] 文档已更新
|
||||
|
||||
---
|
||||
|
||||
## 相关文件清单
|
||||
|
||||
### 核心文件
|
||||
```
|
||||
py/utils/models.py
|
||||
py/utils/constants.py
|
||||
py/services/model_scanner.py
|
||||
py/services/model_query.py
|
||||
py/services/checkpoint_scanner.py
|
||||
py/services/base_model_service.py
|
||||
py/services/lora_service.py
|
||||
py/services/checkpoint_service.py
|
||||
py/services/embedding_service.py
|
||||
```
|
||||
|
||||
### 前端文件
|
||||
```
|
||||
static/js/utils/constants.js
|
||||
static/js/managers/FilterManager.js
|
||||
static/js/managers/MoveManager.js
|
||||
static/js/components/shared/ModelCard.js
|
||||
static/js/components/ContextMenu/CheckpointContextMenu.js
|
||||
static/js/components/RecipeModal.js
|
||||
```
|
||||
|
||||
### 测试文件
|
||||
```
|
||||
tests/utils/test_models_sub_type.py
|
||||
tests/services/test_model_query_sub_type.py
|
||||
tests/services/test_checkpoint_scanner_sub_type.py
|
||||
tests/services/test_service_format_response_sub_type.py
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 风险评估
|
||||
|
||||
| 风险项 | 影响 | 缓解措施 |
|
||||
|-------|------|---------|
|
||||
| ~~第三方代码依赖 `model_type`~~ | ~~高~~ | ~~保持别名至少 1 个 major 版本~~ ✅ 已完成移除 |
|
||||
| ~~数据库 schema 变更~~ | ~~中~~ | ~~暂缓 schema 变更,仅运行时计算~~ ✅ 无需变更 |
|
||||
| ~~前端过滤失效~~ | ~~中~~ | ~~全面的集成测试覆盖~~ ✅ 测试通过 |
|
||||
| CivitAI API 变化 | 低 | 保持多源解析策略 |
|
||||
|
||||
---
|
||||
|
||||
## 时间线
|
||||
|
||||
- **v1.x**: Phase 1-4 已完成,保持向后兼容
|
||||
- **v2.0 (当前)**: ✅ Phase 5 已完成 - `model_type` 兼容代码已移除
|
||||
- API list endpoint 只返回 `sub_type`
|
||||
- Cache entry 只保留 `sub_type`
|
||||
- 移除了 `resolve_civitai_model_type` 和 `normalize_civitai_model_type` 别名
|
||||
|
||||
---
|
||||
|
||||
## 备注
|
||||
|
||||
- 重构期间发现 `civitai_model_type` 数据库列命名尚可,但语义上应理解为存储 CivitAI API 返回的原始类型值
|
||||
- Checkpoint 的 `diffusion_model` sub_type 不能通过 CivitAI API 获取,必须通过文件路径(model root)判断
|
||||
- LoRA 的 sub_type(lora/locon/dora)直接来自 CivitAI API 的 `version_info.model.type`
|
||||
678
docs/testing/backend-testing-improvement-plan.md
Normal file
678
docs/testing/backend-testing-improvement-plan.md
Normal file
@@ -0,0 +1,678 @@
|
||||
# Backend Testing Improvement Plan
|
||||
|
||||
**Status:** Phase 4 Complete ✅
|
||||
**Created:** 2026-02-11
|
||||
**Updated:** 2026-02-11
|
||||
**Priority:** P0 - Critical
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document outlines a comprehensive plan to improve the quality, coverage, and maintainability of the LoRa Manager backend test suite. Recent critical bugs (_handle_download_task_done and get_status methods missing) were not caught by existing tests, highlighting significant gaps in the testing strategy.
|
||||
|
||||
## Current State Assessment
|
||||
|
||||
### Test Statistics
|
||||
- **Total Python Test Files:** 80+
|
||||
- **Total JavaScript Test Files:** 29
|
||||
- **Test Lines of Code:** ~15,000
|
||||
- **Current Pass Rate:** 100% (but missing critical edge cases)
|
||||
|
||||
### Key Findings
|
||||
1. **Coverage Gaps:** Critical modules have no direct tests
|
||||
2. **Mocking Issues:** Over-mocking hides real bugs
|
||||
3. **Integration Deficit:** Missing end-to-end tests
|
||||
4. **Async Inconsistency:** Multiple patterns for async tests
|
||||
5. **Maintenance Burden:** Large, complex test files with duplication
|
||||
|
||||
---
|
||||
|
||||
## Phase 2 Completion Summary (2026-02-11)
|
||||
|
||||
### Completed Items
|
||||
|
||||
1. **Integration Test Framework** ✅
|
||||
- Created `tests/integration/` directory structure
|
||||
- Added `tests/integration/conftest.py` with shared fixtures
|
||||
- Added `tests/integration/__init__.py` for package organization
|
||||
|
||||
2. **Download Flow Integration Tests** ✅
|
||||
- Created `tests/integration/test_download_flow.py` with 7 tests
|
||||
- Tests cover:
|
||||
- Download with mocked network (2 tests)
|
||||
- Progress broadcast verification (1 test)
|
||||
- Error handling (1 test)
|
||||
- Cancellation flow (1 test)
|
||||
- Concurrent download management (1 test)
|
||||
- Route endpoint validation (1 test)
|
||||
|
||||
3. **Recipe Flow Integration Tests** ✅
|
||||
- Created `tests/integration/test_recipe_flow.py` with 9 tests
|
||||
- Tests cover:
|
||||
- Recipe save and retrieve flow (1 test)
|
||||
- Recipe update flow (1 test)
|
||||
- Recipe delete flow (1 test)
|
||||
- Recipe model extraction (1 test)
|
||||
- Generation parameters handling (1 test)
|
||||
- Concurrent recipe reads (1 test)
|
||||
- Concurrent read/write operations (1 test)
|
||||
- Recipe list endpoint (1 test)
|
||||
- Recipe metadata parsing (1 test)
|
||||
|
||||
4. **ModelLifecycleService Coverage** ✅
|
||||
- Added 12 new tests to `tests/services/test_model_lifecycle_service.py`
|
||||
- Tests cover:
|
||||
- `exclude_model` functionality (3 tests)
|
||||
- `bulk_delete_models` functionality (2 tests)
|
||||
- Error path tests (5 tests)
|
||||
- `_extract_model_id_from_payload` utility (3 tests)
|
||||
- Total: 18 tests (up from 6)
|
||||
|
||||
5. **PersistentRecipeCache Concurrent Access** ✅
|
||||
- Added 5 new concurrent access tests to `tests/test_persistent_recipe_cache.py`
|
||||
- Tests cover:
|
||||
- Concurrent reads without corruption (1 test)
|
||||
- Concurrent write and read operations (1 test)
|
||||
- Concurrent updates to same recipe (1 test)
|
||||
- Schema initialization thread safety (1 test)
|
||||
- Concurrent save and remove operations (1 test)
|
||||
- Total: 17 tests (up from 12)
|
||||
|
||||
### Test Results
|
||||
- **Integration Tests:** 16/16 passing
|
||||
- **ModelLifecycleService Tests:** 18/18 passing
|
||||
- **PersistentRecipeCache Tests:** 17/17 passing
|
||||
- **Total New Tests Added:** 28 tests
|
||||
|
||||
---
|
||||
|
||||
## Phase 1 Completion Summary (2026-02-11)
|
||||
|
||||
### Completed Items
|
||||
|
||||
1. **pytest-asyncio Integration** ✅
|
||||
- Added `pytest-asyncio>=0.21.0` to `requirements-dev.txt`
|
||||
- Updated `pytest.ini` with `asyncio_mode = auto` and `asyncio_default_fixture_loop_scope = function`
|
||||
- Removed custom `pytest_pyfunc_call` handler from `tests/conftest.py`
|
||||
- Added `@pytest.mark.asyncio` decorator to 21 async test functions in `tests/services/test_download_manager.py`
|
||||
|
||||
2. **Error Path Tests** ✅
|
||||
- Created `tests/services/test_downloader_error_paths.py` with 19 new tests
|
||||
- Tests cover:
|
||||
- DownloadStreamControl state management (6 tests)
|
||||
- Downloader configuration and initialization (4 tests)
|
||||
- DownloadProgress dataclass (1 test)
|
||||
- Custom exceptions (2 tests)
|
||||
- Authentication headers (3 tests)
|
||||
- Session management (3 tests)
|
||||
|
||||
3. **Test Results**
|
||||
- All 45 tests pass (26 in test_download_manager.py + 19 in test_downloader_error_paths.py)
|
||||
- No regressions introduced
|
||||
|
||||
### Notes
|
||||
- Over-mocking fix in `test_download_manager.py` deferred to Phase 2 as it requires significant refactoring
|
||||
- Error path tests focus on unit-level testing of downloader components rather than complex integration scenarios
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Critical Fixes (P0) - Week 1-2
|
||||
|
||||
### 1.1 Fix Over-Mocking Issues
|
||||
|
||||
**Problem:** Tests mock the methods they purport to test, hiding real bugs.
|
||||
|
||||
**Affected Files:**
|
||||
- `tests/services/test_download_manager.py` - Mocks `_execute_download`
|
||||
- `tests/utils/test_example_images_download_manager_unit.py` - Mocks callbacks
|
||||
- `tests/routes/test_base_model_routes_smoke.py` - Uses fake service stubs
|
||||
|
||||
**Actions:**
|
||||
1. Refactor `test_download_manager.py` to test actual download logic
|
||||
2. Replace method-level mocks with dependency injection
|
||||
3. Add integration tests that verify real behavior
|
||||
|
||||
**Example Fix:**
|
||||
```python
|
||||
# BEFORE (Bad - mocks method under test)
|
||||
async def fake_execute_download(self, **kwargs):
|
||||
return {"success": True}
|
||||
monkeypatch.setattr(DownloadManager, "_execute_download", fake_execute_download)
|
||||
|
||||
# AFTER (Good - tests actual logic with injected dependencies)
|
||||
async def test_download_executes_with_real_logic(
|
||||
tmp_path, mock_downloader, mock_websocket
|
||||
):
|
||||
manager = DownloadManager(
|
||||
downloader=mock_downloader,
|
||||
ws_manager=mock_websocket
|
||||
)
|
||||
result = await manager._execute_download(urls=["http://test.com/file.safetensors"])
|
||||
assert result.success is True
|
||||
assert mock_downloader.download_calls == 1
|
||||
```
|
||||
|
||||
### 1.2 Add Missing Error Path Tests
|
||||
|
||||
**Problem:** Error handling code is not tested, leading to production failures.
|
||||
|
||||
**Required Tests:**
|
||||
|
||||
| Error Type | Module | Priority |
|
||||
|------------|--------|----------|
|
||||
| Network timeout | `downloader.py` | P0 |
|
||||
| Disk full | `download_manager.py` | P0 |
|
||||
| Permission denied | `example_images_download_manager.py` | P0 |
|
||||
| Session refresh failure | `downloader.py` | P1 |
|
||||
| Partial file cleanup | `download_manager.py` | P1 |
|
||||
|
||||
**Implementation:**
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_download_handles_network_timeout():
|
||||
"""Verify download retries on timeout and eventually fails gracefully."""
|
||||
# Arrange
|
||||
downloader = Downloader()
|
||||
mock_session = AsyncMock()
|
||||
mock_session.get.side_effect = asyncio.TimeoutError()
|
||||
|
||||
# Act
|
||||
success, message = await downloader.download_file(
|
||||
url="http://test.com/file.safetensors",
|
||||
target_path=tmp_path / "test.safetensors",
|
||||
session=mock_session
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert success is False
|
||||
assert "timeout" in message.lower()
|
||||
assert mock_session.get.call_count == MAX_RETRIES
|
||||
```
|
||||
|
||||
### 1.3 Standardize Async Test Patterns
|
||||
|
||||
**Problem:** Inconsistent async test patterns across codebase.
|
||||
|
||||
**Current State:**
|
||||
- Some use `@pytest.mark.asyncio`
|
||||
- Some rely on custom `pytest_pyfunc_call` in conftest.py
|
||||
- Some use bare async functions
|
||||
|
||||
**Solution:**
|
||||
1. Add `pytest-asyncio` to requirements-dev.txt
|
||||
2. Update `pytest.ini`:
|
||||
```ini
|
||||
[pytest]
|
||||
asyncio_mode = auto
|
||||
asyncio_default_fixture_loop_scope = function
|
||||
```
|
||||
3. Remove custom `pytest_pyfunc_call` handler from conftest.py
|
||||
4. Bulk update all async tests to use `@pytest.mark.asyncio`
|
||||
|
||||
**Migration Script:**
|
||||
```bash
|
||||
# Find all async test functions missing decorator
|
||||
rg "^async def test_" tests/ --type py -A1 | grep -B1 "@pytest.mark" | grep "async def"
|
||||
|
||||
# Add decorator (manual review required)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Integration & Coverage (P1) - Week 3-4
|
||||
|
||||
### 2.1 Add Critical Module Tests
|
||||
|
||||
**Priority 1: `py/services/model_lifecycle_service.py`**
|
||||
```python
|
||||
# tests/services/test_model_lifecycle_service.py
|
||||
class TestModelLifecycleService:
|
||||
async def test_create_model_registers_in_cache(self):
|
||||
"""Verify new model is registered in both cache and database."""
|
||||
|
||||
async def test_delete_model_cleans_up_files_and_cache(self):
|
||||
"""Verify deletion removes files and updates all indexes."""
|
||||
|
||||
async def test_update_model_metadata_propagates_changes(self):
|
||||
"""Verify metadata updates reach all subscribers."""
|
||||
```
|
||||
|
||||
**Priority 2: `py/services/persistent_recipe_cache.py`**
|
||||
```python
|
||||
# tests/services/test_persistent_recipe_cache.py
|
||||
class TestPersistentRecipeCache:
|
||||
def test_initialization_creates_schema(self):
|
||||
"""Verify SQLite schema is created on first use."""
|
||||
|
||||
async def test_save_recipe_persists_to_sqlite(self):
|
||||
"""Verify recipe data is saved correctly."""
|
||||
|
||||
async def test_concurrent_access_does_not_corrupt_database(self):
|
||||
"""Verify thread safety under concurrent writes."""
|
||||
```
|
||||
|
||||
**Priority 3: Route Handler Tests**
|
||||
- `py/routes/handlers/preview_handlers.py`
|
||||
- `py/routes/handlers/misc_handlers.py`
|
||||
- `py/routes/handlers/model_handlers.py`
|
||||
|
||||
### 2.2 Add End-to-End Integration Tests
|
||||
|
||||
**Download Flow Integration Test:**
|
||||
```python
|
||||
# tests/integration/test_download_flow.py
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
async def test_complete_download_flow(tmp_path, test_server):
|
||||
"""
|
||||
Integration test covering:
|
||||
1. Route receives download request
|
||||
2. DownloadCoordinator schedules it
|
||||
3. DownloadManager executes actual download
|
||||
4. Downloader makes HTTP request (to test server)
|
||||
5. Progress is broadcast via WebSocket
|
||||
6. File is saved and cache updated
|
||||
"""
|
||||
# Setup test server with known file
|
||||
test_file = tmp_path / "test_model.safetensors"
|
||||
test_file.write_bytes(b"fake model data")
|
||||
|
||||
# Start download
|
||||
async with aiohttp.ClientSession() as session:
|
||||
response = await session.post(
|
||||
"http://localhost:8188/api/lm/download",
|
||||
json={"urls": [f"http://localhost:{test_server.port}/test_model.safetensors"]}
|
||||
)
|
||||
assert response.status == 200
|
||||
|
||||
# Verify file downloaded
|
||||
downloaded = tmp_path / "downloads" / "test_model.safetensors"
|
||||
assert downloaded.exists()
|
||||
assert downloaded.read_bytes() == b"fake model data"
|
||||
|
||||
# Verify WebSocket progress updates
|
||||
assert len(ws_manager.broadcasts) > 0
|
||||
assert any(b["status"] == "completed" for b in ws_manager.broadcasts)
|
||||
```
|
||||
|
||||
**Recipe Flow Integration Test:**
|
||||
```python
|
||||
# tests/integration/test_recipe_flow.py
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.asyncio
|
||||
async def test_recipe_analysis_and_save_flow(tmp_path):
|
||||
"""
|
||||
Integration test covering:
|
||||
1. Import recipe from image
|
||||
2. Parse metadata and extract models
|
||||
3. Save to cache and database
|
||||
4. Retrieve and display
|
||||
"""
|
||||
```
|
||||
|
||||
### 2.3 Strengthen Assertions
|
||||
|
||||
**Replace loose assertions:**
|
||||
```python
|
||||
# BEFORE
|
||||
assert "mismatch" in message.lower()
|
||||
|
||||
# AFTER
|
||||
assert message == "File size mismatch. Expected: 1000 bytes, Got: 500 bytes"
|
||||
assert not target_path.exists()
|
||||
assert not Path(str(target_path) + ".part").exists()
|
||||
assert len(downloader.retry_history) == 3
|
||||
```
|
||||
|
||||
**Add state verification:**
|
||||
```python
|
||||
# BEFORE
|
||||
assert result is True
|
||||
|
||||
# AFTER
|
||||
assert result is True
|
||||
assert model["status"] == "downloaded"
|
||||
assert model["file_path"].exists()
|
||||
assert cache.get_by_hash(model["sha256"]) is not None
|
||||
assert len(ws_manager.payloads) >= 2 # Started + completed
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4 Completion Summary (2026-02-11)
|
||||
|
||||
### Completed Items
|
||||
|
||||
1. **Property-Based Tests (Hypothesis)** ✅
|
||||
- Created `tests/utils/test_utils_hypothesis.py` with 19 property-based tests
|
||||
- Tests cover:
|
||||
- `sanitize_folder_name` idempotency and invalid character handling (4 tests)
|
||||
- `_sanitize_library_name` idempotency and safe character filtering (2 tests)
|
||||
- `normalize_path` idempotency and forward slash usage (2 tests)
|
||||
- `fuzzy_match` edge cases and threshold behavior (3 tests)
|
||||
- `determine_base_model` return type guarantees (2 tests)
|
||||
- `get_preview_extension` return type validation (2 tests)
|
||||
- `calculate_recipe_fingerprint` determinism and ordering (4 tests)
|
||||
- Fixed Hypothesis plugin compatibility issue by creating a `MockModule` class in `conftest.py` that is hashable (unlike `types.SimpleNamespace`)
|
||||
|
||||
2. **Snapshot Tests (Syrupy)** ✅
|
||||
- Created `tests/routes/test_api_snapshots.py` with 7 snapshot tests
|
||||
- Tests cover:
|
||||
- SettingsHandler response formats (2 tests)
|
||||
- NodeRegistryHandler response formats (2 tests)
|
||||
- Utility function output verification (2 tests)
|
||||
- ModelLibraryHandler empty response format (1 test)
|
||||
- All snapshots generated and tests passing (7/7)
|
||||
|
||||
3. **Performance Benchmarks** ✅
|
||||
- Created `tests/performance/test_cache_performance.py` with 11 benchmark tests
|
||||
- Tests cover:
|
||||
- Hash index lookup performance (100, 1K, 10K models) - 3 tests
|
||||
- Hash index add entry performance (100, 10K existing) - 2 tests
|
||||
- Fuzzy matching performance (short text, long text, many words) - 3 tests
|
||||
- Recipe fingerprint calculation (5, 50, 200 LoRAs) - 3 tests
|
||||
- All benchmarks passing with performance metrics (11/11)
|
||||
|
||||
4. **Package Dependencies** ✅
|
||||
- Added `hypothesis>=6.0` to `requirements-dev.txt`
|
||||
- Added `syrupy>=5.0` to `requirements-dev.txt`
|
||||
- Added `pytest-benchmark>=5.0` to `requirements-dev.txt`
|
||||
|
||||
### Test Results
|
||||
- **Property-Based Tests:** 19/19 passing
|
||||
- **Snapshot Tests:** 7/7 passing
|
||||
- **Performance Benchmarks:** 11/11 passing
|
||||
- **Total New Tests Added:** 37 tests
|
||||
- **Full Test Suite:** 947/947 passing
|
||||
|
||||
---
|
||||
|
||||
## Phase 3 Completion Summary (2026-02-11)
|
||||
|
||||
### Completed Items
|
||||
|
||||
1. **Centralized Test Fixtures** ✅
|
||||
- Added `mock_downloader` fixture to `tests/conftest.py`
|
||||
- Configurable mock with `should_fail` and `return_value` attributes
|
||||
- Records all download calls for verification
|
||||
- Added `mock_websocket_manager` fixture to `tests/conftest.py`
|
||||
- Recording WebSocket manager that captures all broadcast payloads
|
||||
- Includes helper method `get_payloads_by_type()` for filtering
|
||||
- Added `reset_singletons` autouse fixture to `tests/conftest.py`
|
||||
- Resets DownloadManager, ServiceRegistry, ModelScanner, and SettingsManager
|
||||
- Ensures test isolation and prevents singleton pollution
|
||||
|
||||
2. **Split Large Test Files** ✅
|
||||
- Split `tests/services/test_download_manager.py` (1422 lines) into:
|
||||
- `test_download_manager_basic.py` - Core functionality (12 tests)
|
||||
- `test_download_manager_error.py` - Error handling and execution (15 tests)
|
||||
- `test_download_manager_concurrent.py` - Advanced scenarios (6 tests)
|
||||
- Split `tests/utils/test_cache_paths.py` (530 lines) into:
|
||||
- `test_cache_paths_resolution.py` - Path resolution and CacheType tests (11 tests)
|
||||
- `test_cache_paths_validation.py` - Legacy path validation and cleanup (9 tests)
|
||||
- `test_cache_paths_migration.py` - Migration scenarios and auto-cleanup (9 tests)
|
||||
|
||||
3. **Complex Test Refactoring** ✅
|
||||
- Reviewed `test_example_images_download_manager_unit.py`
|
||||
- Existing async event-based patterns are appropriate for testing concurrent behavior
|
||||
- No refactoring needed - tests follow consistent patterns and are maintainable
|
||||
|
||||
### Test Results
|
||||
- **Download Manager Tests:** 33/33 passing across 3 files
|
||||
- **Cache Paths Tests:** 29/29 passing across 3 files
|
||||
- **Total Tests Maintained:** All existing tests preserved and organized
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Architecture & Maintainability (P2) - Week 5-6
|
||||
|
||||
### 3.1 Centralize Test Fixtures
|
||||
|
||||
**Create `tests/conftest.py` improvements:**
|
||||
|
||||
```python
|
||||
# tests/conftest.py additions
|
||||
|
||||
@pytest.fixture
|
||||
def mock_downloader():
|
||||
"""Provide a configurable mock downloader."""
|
||||
class MockDownloader:
|
||||
def __init__(self):
|
||||
self.download_calls = []
|
||||
self.should_fail = False
|
||||
|
||||
async def download_file(self, url, target_path, **kwargs):
|
||||
self.download_calls.append({"url": url, "target_path": target_path})
|
||||
if self.should_fail:
|
||||
return False, "Download failed"
|
||||
return True, str(target_path)
|
||||
|
||||
return MockDownloader()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_websocket_manager():
|
||||
"""Provide a recording WebSocket manager."""
|
||||
class RecordingWebSocketManager:
|
||||
def __init__(self):
|
||||
self.payloads = []
|
||||
|
||||
async def broadcast(self, payload):
|
||||
self.payloads.append(payload)
|
||||
|
||||
return RecordingWebSocketManager()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_scanner():
|
||||
"""Provide a mock model scanner with configurable cache."""
|
||||
# ... existing MockScanner but improved ...
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_singletons():
|
||||
"""Reset all singletons before each test."""
|
||||
# Centralized singleton reset
|
||||
DownloadManager._instance = None
|
||||
ServiceRegistry.clear_services()
|
||||
ModelScanner._instances.clear()
|
||||
yield
|
||||
# Cleanup
|
||||
DownloadManager._instance = None
|
||||
ServiceRegistry.clear_services()
|
||||
ModelScanner._instances.clear()
|
||||
```
|
||||
|
||||
### 3.2 Split Large Test Files
|
||||
|
||||
**Target Files:**
|
||||
- `tests/services/test_download_manager.py` (1000+ lines) → Split into:
|
||||
- `test_download_manager_basic.py` - Core functionality
|
||||
- `test_download_manager_error.py` - Error handling
|
||||
- `test_download_manager_concurrent.py` - Concurrent operations
|
||||
|
||||
- `tests/utils/test_cache_paths.py` (529 lines) → Split into:
|
||||
- `test_cache_paths_resolution.py`
|
||||
- `test_cache_paths_validation.py`
|
||||
- `test_cache_paths_migration.py`
|
||||
|
||||
### 3.3 Refactor Complex Tests
|
||||
|
||||
**Example: Simplify test setup in `test_example_images_download_manager_unit.py`**
|
||||
|
||||
**Current (Complex):**
|
||||
```python
|
||||
async def test_start_download_bootstraps_progress_and_task(
|
||||
monkeypatch: pytest.MonkeyPatch, tmp_path
|
||||
):
|
||||
# 40+ lines of setup
|
||||
started = asyncio.Event()
|
||||
release = asyncio.Event()
|
||||
|
||||
async def fake_download(self, ...):
|
||||
started.set()
|
||||
await release.wait()
|
||||
# ... more logic ...
|
||||
```
|
||||
|
||||
**Improved (Using fixtures):**
|
||||
```python
|
||||
async def test_start_download_bootstraps_progress_and_task(
|
||||
download_manager_with_fake_backend, release_event
|
||||
):
|
||||
# Setup in fixtures, test is clean
|
||||
manager = download_manager_with_fake_backend
|
||||
result = await manager.start_download({"model_types": ["lora"]})
|
||||
assert result["success"] is True
|
||||
assert manager._is_downloading is True
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Advanced Testing (P3) - Week 7-8
|
||||
|
||||
### 4.1 Add Property-Based Tests (Hypothesis)
|
||||
|
||||
**Install:** `pip install hypothesis`
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# tests/utils/test_hash_utils_hypothesis.py
|
||||
from hypothesis import given, strategies as st
|
||||
|
||||
@given(st.text(min_size=1, max_size=100))
|
||||
def test_hash_normalization_idempotent(name):
|
||||
"""Hash normalization should be idempotent."""
|
||||
normalized = normalize_hash(name)
|
||||
assert normalize_hash(normalized) == normalized
|
||||
|
||||
@given(st.lists(st.dictionaries(st.text(), st.text()), min_size=0, max_size=1000))
|
||||
def test_model_cache_handles_any_model_list(models):
|
||||
"""Cache should handle any list of models without crashing."""
|
||||
cache = ModelCache()
|
||||
cache.raw_data = models
|
||||
# Should not raise
|
||||
list(cache.iter_models())
|
||||
```
|
||||
|
||||
### 4.2 Add Snapshot Tests (Syrupy)
|
||||
|
||||
**Install:** `pip install syrupy`
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# tests/routes/test_api_snapshots.py
|
||||
import pytest
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_lora_list_response_format(snapshot, client):
|
||||
"""Verify API response format matches snapshot."""
|
||||
response = await client.get("/api/lm/loras")
|
||||
data = await response.json()
|
||||
assert data == snapshot # Syrupy handles this
|
||||
```
|
||||
|
||||
### 4.3 Add Performance Benchmarks
|
||||
|
||||
**Install:** `pip install pytest-benchmark`
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# tests/performance/test_cache_performance.py
|
||||
import pytest
|
||||
|
||||
def test_cache_lookup_performance(benchmark):
|
||||
"""Benchmark cache lookup with 10,000 models."""
|
||||
cache = create_cache_with_n_models(10000)
|
||||
|
||||
result = benchmark(lambda: cache.get_by_hash("abc123"))
|
||||
# Benchmark automatically collects timing stats
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Checklist
|
||||
|
||||
### Week 1-2: Critical Fixes
|
||||
- [x] Fix over-mocking in `test_download_manager.py` (Skipped - requires major refactoring, see Phase 2)
|
||||
- [x] Add network timeout tests (Added `test_downloader_error_paths.py` with 19 error path tests)
|
||||
- [x] Add disk full error tests (Covered in error path tests)
|
||||
- [x] Add permission denied tests (Covered in error path tests)
|
||||
- [x] Install and configure pytest-asyncio (Added to requirements-dev.txt and pytest.ini)
|
||||
- [x] Remove custom pytest_pyfunc_call handler (Removed from conftest.py)
|
||||
- [x] Add `@pytest.mark.asyncio` to all async tests (Added to 21 async test functions in test_download_manager.py)
|
||||
|
||||
### Week 3-4: Integration & Coverage
|
||||
- [x] Create `test_model_lifecycle_service.py` tests (12 new tests added)
|
||||
- [x] Create `test_persistent_recipe_cache.py` tests (5 new concurrent access tests added)
|
||||
- [x] Create `tests/integration/` directory (created with conftest.py)
|
||||
- [x] Add download flow integration test (7 tests added)
|
||||
- [x] Add recipe flow integration test (9 tests added)
|
||||
- [x] Add route handler tests for preview_handlers.py (already exists in test_preview_routes.py)
|
||||
- [x] Strengthen assertions across integration tests (comprehensive assertions added)
|
||||
|
||||
### Week 5-6: Architecture
|
||||
- [x] Add centralized fixtures to conftest.py
|
||||
- [x] Split `test_download_manager.py` into 3 files
|
||||
- [x] Split `test_cache_paths.py` into 3 files
|
||||
- [x] Refactor complex test setups (reviewed - no changes needed)
|
||||
- [x] Remove duplicate singleton reset fixtures (consolidated in conftest.py)
|
||||
|
||||
### Week 7-8: Advanced Testing
|
||||
- [x] Install hypothesis (Added to requirements-dev.txt)
|
||||
- [x] Add 10 property-based tests (Created 19 tests in test_utils_hypothesis.py)
|
||||
- [x] Install syrupy (Added to requirements-dev.txt)
|
||||
- [x] Add 5 snapshot tests (Created 7 tests in test_api_snapshots.py)
|
||||
- [x] Install pytest-benchmark (Added to requirements-dev.txt)
|
||||
- [x] Add 3 performance benchmarks (Created 11 tests in test_cache_performance.py)
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
### Quantitative
|
||||
- **Code Coverage:** Increase from ~70% to >90%
|
||||
- **Test Count:** Increase from 400+ to 600+
|
||||
- **Assertion Strength:** Replace 50+ weak assertions
|
||||
- **Integration Test Ratio:** Increase from 5% to 20%
|
||||
|
||||
### Qualitative
|
||||
- **Bug Escape Rate:** Reduce by 80%
|
||||
- **Test Maintenance Time:** Reduce by 50%
|
||||
- **Time to Write New Tests:** Reduce by 30%
|
||||
- **CI Pipeline Speed:** Maintain <5 minutes
|
||||
|
||||
---
|
||||
|
||||
## Risk Mitigation
|
||||
|
||||
| Risk | Mitigation |
|
||||
|------|------------|
|
||||
| Breaking existing tests | Run full test suite after each change |
|
||||
| Increased CI time | Optimize tests, parallelize execution |
|
||||
| Developer resistance | Provide training, pair programming |
|
||||
| Maintenance burden | Document patterns, provide templates |
|
||||
| Coverage gaps | Use coverage.py in CI, fail on <90% |
|
||||
|
||||
---
|
||||
|
||||
## Related Documents
|
||||
|
||||
- `docs/testing/frontend-testing-roadmap.md` - Frontend testing plan
|
||||
- `docs/AGENTS.md` - Development guidelines
|
||||
- `pytest.ini` - Test configuration
|
||||
- `tests/conftest.py` - Shared fixtures
|
||||
|
||||
---
|
||||
|
||||
## Approval
|
||||
|
||||
| Role | Name | Date | Signature |
|
||||
|------|------|------|-----------|
|
||||
| Tech Lead | | | |
|
||||
| QA Lead | | | |
|
||||
| Product Owner | | | |
|
||||
|
||||
---
|
||||
|
||||
**Next Review Date:** 2026-02-25
|
||||
|
||||
**Document Owner:** Backend Team
|
||||
BIN
example_workflows/Lora_Cycler.jpg
Normal file
BIN
example_workflows/Lora_Cycler.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 657 KiB |
1
example_workflows/Lora_Cycler.json
Normal file
1
example_workflows/Lora_Cycler.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -10,7 +10,8 @@
|
||||
"next": "Weiter",
|
||||
"backToTop": "Nach oben",
|
||||
"settings": "Einstellungen",
|
||||
"help": "Hilfe"
|
||||
"help": "Hilfe",
|
||||
"add": "Hinzufügen"
|
||||
},
|
||||
"status": {
|
||||
"loading": "Wird geladen...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "Update",
|
||||
"updateAvailable": "Update verfügbar"
|
||||
"updateAvailable": "Update verfügbar",
|
||||
"skipRefresh": "Metadaten-Aktualisierung übersprungen"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "Verwendungsanzahl"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "Modelle filtern",
|
||||
"presets": "Voreinstellungen",
|
||||
"savePreset": "Aktive Filter als neue Voreinstellung speichern.",
|
||||
"savePresetDisabledActive": "Speichern nicht möglich: Eine Voreinstellung ist bereits aktiv. Ändern Sie die Filter, um eine neue Voreinstellung zu speichern",
|
||||
"savePresetDisabledNoFilters": "Wählen Sie zuerst Filter aus, um als Voreinstellung zu speichern",
|
||||
"savePresetPrompt": "Voreinstellungsname eingeben:",
|
||||
"presetClickTooltip": "Voreinstellung \"{name}\" anwenden",
|
||||
"presetDeleteTooltip": "Voreinstellung löschen",
|
||||
"presetDeleteConfirm": "Voreinstellung \"{name}\" löschen?",
|
||||
"presetDeleteConfirmClick": "Zum Bestätigen erneut klicken",
|
||||
"presetOverwriteConfirm": "Voreinstellung \"{name}\" existiert bereits. Überschreiben?",
|
||||
"presetNamePlaceholder": "Voreinstellungsname...",
|
||||
"baseModel": "Basis-Modell",
|
||||
"modelTags": "Tags (Top 20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "Kein Credit erforderlich",
|
||||
"allowSellingGeneratedContent": "Verkauf erlaubt",
|
||||
"noTags": "Keine Tags",
|
||||
"clearAll": "Alle Filter löschen"
|
||||
"clearAll": "Alle Filter löschen",
|
||||
"any": "Beliebig",
|
||||
"all": "Alle",
|
||||
"tagLogicAny": "Jedes Tag abgleichen (ODER)",
|
||||
"tagLogicAll": "Alle Tags abgleichen (UND)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "Theme wechseln",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "Fehler beim Speichern der Ausschlüsse: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "Metadaten-Aktualisierung: Übersprungene Pfade",
|
||||
"placeholder": "Beispiel: temp, archived/old, test_models",
|
||||
"help": "Modelle in diesen Verzeichnispfaden bei der Massenaktualisierung der Metadaten (\"Alle Metadaten abrufen\") überspringen. Geben Sie Ordnerpfade relativ zum Modell-Stammverzeichnis ein, getrennt durch Kommas.",
|
||||
"validation": {
|
||||
"noPaths": "Geben Sie mindestens einen durch Kommas getrennten Pfad ein.",
|
||||
"saveFailed": "Übersprungene Pfade konnten nicht gespeichert werden: {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Anzeige-Dichte",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "Jede verfügbare Aktualisierung markieren"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "Früher Zugriff Updates ausblenden",
|
||||
"help": "Nur Early-Access-Updates"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "Trigger Words in LoRA-Syntax einschließen",
|
||||
"includeTriggerWordsHelp": "Trainierte Trigger Words beim Kopieren der LoRA-Syntax in die Zwischenablage einschließen"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "Auswahl auf Updates prüfen",
|
||||
"moveAll": "Alle in Ordner verschieben",
|
||||
"autoOrganize": "Automatisch organisieren",
|
||||
"skipMetadataRefresh": "Metadaten-Aktualisierung für ausgewählte Modelle überspringen",
|
||||
"resumeMetadataRefresh": "Metadaten-Aktualisierung für ausgewählte Modelle fortsetzen",
|
||||
"deleteAll": "Alle Modelle löschen",
|
||||
"clear": "Auswahl löschen",
|
||||
"skipMetadataRefreshCount": "Überspringen({count} Modelle)",
|
||||
"resumeMetadataRefreshCount": "Fortsetzen({count} Modelle)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Automatische Organisation wird initialisiert...",
|
||||
"starting": "Automatische Organisation für {type} wird gestartet...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "Unbenannte Version",
|
||||
"noDetails": "Keine zusätzlichen Details"
|
||||
"noDetails": "Keine zusätzlichen Details",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "bald endend",
|
||||
"hours": "in {count}h",
|
||||
"days": "in {count}d"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Aktuelle Version",
|
||||
"inLibrary": "In der Bibliothek",
|
||||
"newer": "Neuere Version",
|
||||
"earlyAccess": "Früher Zugriff",
|
||||
"ignored": "Ignoriert"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "Löschen",
|
||||
"ignore": "Ignorieren",
|
||||
"unignore": "Ignorierung aufheben",
|
||||
"earlyAccessTooltip": "Erfordert Early-Access-Kauf",
|
||||
"resumeModelUpdates": "Aktualisierungen für dieses Modell fortsetzen",
|
||||
"ignoreModelUpdates": "Aktualisierungen für dieses Modell ignorieren",
|
||||
"viewLocalVersions": "Alle lokalen Versionen anzeigen",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "Beispielbilder-Ordner geöffnet",
|
||||
"openingFolder": "Beispielbilder-Ordner wird geöffnet",
|
||||
"failedToOpen": "Fehler beim Öffnen des Beispielbilder-Ordners"
|
||||
"failedToOpen": "Fehler beim Öffnen des Beispielbilder-Ordners",
|
||||
"setupRequired": "Beispielbilder-Speicher",
|
||||
"setupDescription": "Um benutzerdefinierte Beispielbilder hinzuzufügen, müssen Sie zuerst einen Download-Speicherort festlegen.",
|
||||
"setupUsage": "Dieser Pfad wird sowohl für heruntergeladene als auch für benutzerdefinierte Beispielbilder verwendet.",
|
||||
"openSettings": "Einstellungen öffnen"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "Nach Updates wird gesucht...",
|
||||
"checkingMessage": "Bitte warten Sie, während wir nach der neuesten Version suchen.",
|
||||
"showNotifications": "Update-Benachrichtigungen anzeigen",
|
||||
"latestBadge": "Neueste",
|
||||
"updateProgress": {
|
||||
"preparing": "Update wird vorbereitet...",
|
||||
"installing": "Update wird installiert...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "Basis-Modell erfolgreich für {count} Modell(e) aktualisiert",
|
||||
"bulkBaseModelUpdatePartial": "{success} Modelle aktualisiert, {failed} fehlgeschlagen",
|
||||
"bulkBaseModelUpdateFailed": "Aktualisierung des Basis-Modells für ausgewählte Modelle fehlgeschlagen",
|
||||
"skipMetadataRefreshUpdating": "Aktualisiere Metadaten-Aktualisierungs-Flag für {count} Modell(e)...",
|
||||
"skipMetadataRefreshSet": "Metadaten-Aktualisierung für {count} Modell(e) übersprungen",
|
||||
"skipMetadataRefreshCleared": "Metadaten-Aktualisierung für {count} Modell(e) fortgesetzt",
|
||||
"skipMetadataRefreshPartial": "{success} Modell(e) aktualisiert, {failed} fehlgeschlagen",
|
||||
"skipMetadataRefreshFailed": "Fehler beim Aktualisieren des Metadaten-Aktualisierungs-Flags für ausgewählte Modelle",
|
||||
"bulkContentRatingUpdating": "Inhaltsbewertung wird für {count} Modell(e) aktualisiert...",
|
||||
"bulkContentRatingSet": "Inhaltsbewertung auf {level} für {count} Modell(e) gesetzt",
|
||||
"bulkContentRatingPartial": "Inhaltsbewertung auf {level} für {success} Modell(e) gesetzt, {failed} fehlgeschlagen",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "Filter gelöscht",
|
||||
"noCustomFilterToClear": "Kein benutzerdefinierter Filter zum Löschen"
|
||||
"noCustomFilterToClear": "Kein benutzerdefinierter Filter zum Löschen",
|
||||
"noActiveFilters": "Keine aktiven Filter zum Speichern"
|
||||
},
|
||||
"presets": {
|
||||
"created": "Voreinstellung \"{name}\" erstellt",
|
||||
"deleted": "Voreinstellung \"{name}\" gelöscht",
|
||||
"applied": "Voreinstellung \"{name}\" angewendet",
|
||||
"overwritten": "Voreinstellung \"{name}\" überschrieben",
|
||||
"restored": "Standard-Voreinstellungen wiederhergestellt"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "Voreinstellungsname darf nicht leer sein",
|
||||
"presetNameTooLong": "Voreinstellungsname darf maximal {max} Zeichen haben",
|
||||
"presetNameInvalidChars": "Voreinstellungsname enthält ungültige Zeichen",
|
||||
"presetNameExists": "Eine Voreinstellung mit diesem Namen existiert bereits",
|
||||
"maxPresetsReached": "Maximal {max} Voreinstellungen erlaubt. Löschen Sie eine, um weitere hinzuzufügen.",
|
||||
"presetNotFound": "Voreinstellung nicht gefunden",
|
||||
"invalidPreset": "Ungültige Voreinstellungsdaten",
|
||||
"deletePresetFailed": "Fehler beim Löschen der Voreinstellung",
|
||||
"applyPresetFailed": "Fehler beim Anwenden der Voreinstellung"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "Beispielbilder {action} abgeschlossen",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "Fehler beim Laden des Ordnerbaums",
|
||||
"folderTreeError": "Fehler beim Laden des Ordnerbaums",
|
||||
"imagesImported": "Beispielbilder erfolgreich importiert",
|
||||
"imagesPartial": "{success} Bild(er) importiert, {failed} fehlgeschlagen",
|
||||
"importFailed": "Fehler beim Importieren der Beispielbilder: {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "Cache-Korruption erkannt"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "Cache-Probleme erkannt"
|
||||
},
|
||||
"content": "{invalid} von {total} Cache-Einträgen sind ungültig ({rate}). Dies kann zu fehlenden Modellen oder Fehlern führen. Ein Neuaufbau des Caches wird empfohlen.",
|
||||
"rebuildCache": "Cache neu aufbauen",
|
||||
"dismiss": "Verwerfen",
|
||||
"rebuilding": "Cache wird neu aufgebaut...",
|
||||
"rebuildFailed": "Fehler beim Neuaufbau des Caches: {error}",
|
||||
"retry": "Wiederholen"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "Next",
|
||||
"backToTop": "Back to top",
|
||||
"settings": "Settings",
|
||||
"help": "Help"
|
||||
"help": "Help",
|
||||
"add": "Add"
|
||||
},
|
||||
"status": {
|
||||
"loading": "Loading...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "Update",
|
||||
"updateAvailable": "Update available"
|
||||
"updateAvailable": "Update available",
|
||||
"skipRefresh": "Metadata refresh skipped"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "Times used"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "Filter Models",
|
||||
"presets": "Presets",
|
||||
"savePreset": "Save current active filters as a new preset.",
|
||||
"savePresetDisabledActive": "Cannot save: A preset is already active. Modify filters to save new preset.",
|
||||
"savePresetDisabledNoFilters": "Select filters first to save as preset",
|
||||
"savePresetPrompt": "Enter preset name:",
|
||||
"presetClickTooltip": "Click to apply preset \"{name}\"",
|
||||
"presetDeleteTooltip": "Delete preset",
|
||||
"presetDeleteConfirm": "Delete preset \"{name}\"?",
|
||||
"presetDeleteConfirmClick": "Click again to confirm",
|
||||
"presetOverwriteConfirm": "Preset \"{name}\" already exists. Overwrite?",
|
||||
"presetNamePlaceholder": "Preset name...",
|
||||
"baseModel": "Base Model",
|
||||
"modelTags": "Tags (Top 20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "No Credit Required",
|
||||
"allowSellingGeneratedContent": "Allow Selling",
|
||||
"noTags": "No tags",
|
||||
"clearAll": "Clear All Filters"
|
||||
"clearAll": "Clear All Filters",
|
||||
"any": "Any",
|
||||
"all": "All",
|
||||
"tagLogicAny": "Match any tag (OR)",
|
||||
"tagLogicAll": "Match all tags (AND)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "Toggle theme",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "Unable to save exclusions: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "Metadata refresh skip paths",
|
||||
"placeholder": "Example: temp, archived/old, test_models",
|
||||
"help": "Skip models in these directory paths during bulk metadata refresh (\"Fetch All Metadata\"). Enter folder paths relative to your model root directory, separated by commas.",
|
||||
"validation": {
|
||||
"noPaths": "Enter at least one path separated by commas.",
|
||||
"saveFailed": "Unable to save skip paths: {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Display Density",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "Flag any available update"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "Hide Early Access Updates",
|
||||
"help": "When enabled, models with only early access updates will not show 'Update available' badge"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "Include Trigger Words in LoRA Syntax",
|
||||
"includeTriggerWordsHelp": "Include trained trigger words when copying LoRA syntax to clipboard"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "Check Updates for Selected",
|
||||
"moveAll": "Move Selected to Folder",
|
||||
"autoOrganize": "Auto-Organize Selected",
|
||||
"skipMetadataRefresh": "Skip Metadata Refresh for Selected",
|
||||
"resumeMetadataRefresh": "Resume Metadata Refresh for Selected",
|
||||
"deleteAll": "Delete Selected Models",
|
||||
"clear": "Clear Selection",
|
||||
"skipMetadataRefreshCount": "Skip ({count} models)",
|
||||
"resumeMetadataRefreshCount": "Resume ({count} models)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Initializing auto-organize...",
|
||||
"starting": "Starting auto-organize for {type}...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "Untitled Version",
|
||||
"noDetails": "No additional details"
|
||||
"noDetails": "No additional details",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "ending soon",
|
||||
"hours": "in {count}h",
|
||||
"days": "in {count}d"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Current Version",
|
||||
"inLibrary": "In Library",
|
||||
"newer": "Newer Version",
|
||||
"earlyAccess": "Early Access",
|
||||
"ignored": "Ignored"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "Delete",
|
||||
"ignore": "Ignore",
|
||||
"unignore": "Unignore",
|
||||
"earlyAccessTooltip": "Requires early access purchase",
|
||||
"resumeModelUpdates": "Resume updates for this model",
|
||||
"ignoreModelUpdates": "Ignore updates for this model",
|
||||
"viewLocalVersions": "View all local versions",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "Example images folder opened",
|
||||
"openingFolder": "Opening example images folder",
|
||||
"failedToOpen": "Failed to open example images folder"
|
||||
"failedToOpen": "Failed to open example images folder",
|
||||
"setupRequired": "Example Images Storage",
|
||||
"setupDescription": "To add custom example images, you need to set a download location first.",
|
||||
"setupUsage": "This path is used for both downloaded and custom example images.",
|
||||
"openSettings": "Open Settings"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "Checking for updates...",
|
||||
"checkingMessage": "Please wait while we check for the latest version.",
|
||||
"showNotifications": "Show update notifications",
|
||||
"latestBadge": "Latest",
|
||||
"updateProgress": {
|
||||
"preparing": "Preparing update...",
|
||||
"installing": "Installing update...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "Successfully updated base model for {count} model(s)",
|
||||
"bulkBaseModelUpdatePartial": "Updated {success} model(s), failed {failed} model(s)",
|
||||
"bulkBaseModelUpdateFailed": "Failed to update base model for selected models",
|
||||
"skipMetadataRefreshUpdating": "Updating metadata refresh flag for {count} model(s)...",
|
||||
"skipMetadataRefreshSet": "Metadata refresh skipped for {count} model(s)",
|
||||
"skipMetadataRefreshCleared": "Metadata refresh resumed for {count} model(s)",
|
||||
"skipMetadataRefreshPartial": "Updated {success} model(s), {failed} failed",
|
||||
"skipMetadataRefreshFailed": "Failed to update metadata refresh flag for selected models",
|
||||
"bulkContentRatingUpdating": "Updating content rating for {count} model(s)...",
|
||||
"bulkContentRatingSet": "Set content rating to {level} for {count} model(s)",
|
||||
"bulkContentRatingPartial": "Set content rating to {level} for {success} model(s), {failed} failed",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "Filters cleared",
|
||||
"noCustomFilterToClear": "No custom filter to clear"
|
||||
"noCustomFilterToClear": "No custom filter to clear",
|
||||
"noActiveFilters": "No active filters to save"
|
||||
},
|
||||
"presets": {
|
||||
"created": "Preset \"{name}\" created",
|
||||
"deleted": "Preset \"{name}\" deleted",
|
||||
"applied": "Preset \"{name}\" applied",
|
||||
"overwritten": "Preset \"{name}\" overwritten",
|
||||
"restored": "Default presets restored"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "Preset name cannot be empty",
|
||||
"presetNameTooLong": "Preset name must be {max} characters or less",
|
||||
"presetNameInvalidChars": "Preset name contains invalid characters",
|
||||
"presetNameExists": "A preset with this name already exists",
|
||||
"maxPresetsReached": "Maximum {max} presets allowed. Delete one to add more.",
|
||||
"presetNotFound": "Preset not found",
|
||||
"invalidPreset": "Invalid preset data",
|
||||
"deletePresetFailed": "Failed to delete preset",
|
||||
"applyPresetFailed": "Failed to apply preset"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "Example images {action} completed",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "Failed to load folder tree",
|
||||
"folderTreeError": "Error loading folder tree",
|
||||
"imagesImported": "Example images imported successfully",
|
||||
"imagesPartial": "{success} image(s) imported, {failed} failed",
|
||||
"importFailed": "Failed to import example images: {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "Cache Corruption Detected"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "Cache Issues Detected"
|
||||
},
|
||||
"content": "{invalid} of {total} cache entries are invalid ({rate}). This may cause missing models or errors. Rebuilding the cache is recommended.",
|
||||
"rebuildCache": "Rebuild Cache",
|
||||
"dismiss": "Dismiss",
|
||||
"rebuilding": "Rebuilding cache...",
|
||||
"rebuildFailed": "Failed to rebuild cache: {error}",
|
||||
"retry": "Retry"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "Siguiente",
|
||||
"backToTop": "Volver arriba",
|
||||
"settings": "Configuración",
|
||||
"help": "Ayuda"
|
||||
"help": "Ayuda",
|
||||
"add": "Añadir"
|
||||
},
|
||||
"status": {
|
||||
"loading": "Cargando...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "Actualización",
|
||||
"updateAvailable": "Actualización disponible"
|
||||
"updateAvailable": "Actualización disponible",
|
||||
"skipRefresh": "Actualización de metadatos omitida"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "Veces usado"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "Filtrar modelos",
|
||||
"presets": "Preajustes",
|
||||
"savePreset": "Guardar filtros activos como nuevo preajuste.",
|
||||
"savePresetDisabledActive": "No se puede guardar: Ya hay un preajuste activo. Modifique los filtros para guardar un nuevo preajuste",
|
||||
"savePresetDisabledNoFilters": "Seleccione filtros primero para guardar como preajuste",
|
||||
"savePresetPrompt": "Ingrese el nombre del preajuste:",
|
||||
"presetClickTooltip": "Hacer clic para aplicar preajuste \"{name}\"",
|
||||
"presetDeleteTooltip": "Eliminar preajuste",
|
||||
"presetDeleteConfirm": "¿Eliminar preajuste \"{name}\"?",
|
||||
"presetDeleteConfirmClick": "Haga clic de nuevo para confirmar",
|
||||
"presetOverwriteConfirm": "El preset \"{name}\" ya existe. ¿Sobrescribir?",
|
||||
"presetNamePlaceholder": "Nombre del preajuste...",
|
||||
"baseModel": "Modelo base",
|
||||
"modelTags": "Etiquetas (Top 20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "Sin crédito requerido",
|
||||
"allowSellingGeneratedContent": "Venta permitida",
|
||||
"noTags": "Sin etiquetas",
|
||||
"clearAll": "Limpiar todos los filtros"
|
||||
"clearAll": "Limpiar todos los filtros",
|
||||
"any": "Cualquiera",
|
||||
"all": "Todos",
|
||||
"tagLogicAny": "Coincidir con cualquier etiqueta (O)",
|
||||
"tagLogicAll": "Coincidir con todas las etiquetas (Y)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "Cambiar tema",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "No se pudieron guardar las exclusiones: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "Rutas a omitir en la actualización de metadatos",
|
||||
"placeholder": "Ejemplo: temp, archived/old, test_models",
|
||||
"help": "Omitir modelos en estas rutas de directorio durante la actualización masiva de metadatos (\"Obtener todos los metadatos\"). Ingrese rutas de carpetas relativas al directorio raíz de modelos, separadas por comas.",
|
||||
"validation": {
|
||||
"noPaths": "Ingrese al menos una ruta separada por comas.",
|
||||
"saveFailed": "No se pudieron guardar las rutas a omitir: {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Densidad de visualización",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "Marcar cualquier actualización disponible"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "Ocultar actualizaciones de acceso temprano",
|
||||
"help": "Solo actualizaciones de acceso temprano"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "Incluir palabras clave en la sintaxis de LoRA",
|
||||
"includeTriggerWordsHelp": "Incluir palabras clave entrenadas al copiar la sintaxis de LoRA al portapapeles"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "Comprobar actualizaciones para la selección",
|
||||
"moveAll": "Mover todos a carpeta",
|
||||
"autoOrganize": "Auto-organizar seleccionados",
|
||||
"skipMetadataRefresh": "Omitir actualización de metadatos para seleccionados",
|
||||
"resumeMetadataRefresh": "Reanudar actualización de metadatos para seleccionados",
|
||||
"deleteAll": "Eliminar todos los modelos",
|
||||
"clear": "Limpiar selección",
|
||||
"skipMetadataRefreshCount": "Omitir({count} modelos)",
|
||||
"resumeMetadataRefreshCount": "Reanudar({count} modelos)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Inicializando auto-organización...",
|
||||
"starting": "Iniciando auto-organización para {type}...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "Versión sin nombre",
|
||||
"noDetails": "Sin detalles adicionales"
|
||||
"noDetails": "Sin detalles adicionales",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "terminando pronto",
|
||||
"hours": "en {count}h",
|
||||
"days": "en {count}d"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Versión actual",
|
||||
"inLibrary": "En la biblioteca",
|
||||
"newer": "Versión más reciente",
|
||||
"earlyAccess": "Acceso temprano",
|
||||
"ignored": "Ignorada"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "Eliminar",
|
||||
"ignore": "Ignorar",
|
||||
"unignore": "Dejar de ignorar",
|
||||
"earlyAccessTooltip": "Requiere compra de acceso temprano",
|
||||
"resumeModelUpdates": "Reanudar actualizaciones para este modelo",
|
||||
"ignoreModelUpdates": "Ignorar actualizaciones para este modelo",
|
||||
"viewLocalVersions": "Ver todas las versiones locales",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "Carpeta de imágenes de ejemplo abierta",
|
||||
"openingFolder": "Abriendo carpeta de imágenes de ejemplo",
|
||||
"failedToOpen": "Error al abrir carpeta de imágenes de ejemplo"
|
||||
"failedToOpen": "Error al abrir carpeta de imágenes de ejemplo",
|
||||
"setupRequired": "Almacenamiento de imágenes de ejemplo",
|
||||
"setupDescription": "Para agregar imágenes de ejemplo personalizadas, primero necesita establecer una ubicación de descarga.",
|
||||
"setupUsage": "Esta ruta se utiliza tanto para imágenes de ejemplo descargadas como personalizadas.",
|
||||
"openSettings": "Abrir configuración"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "Comprobando actualizaciones...",
|
||||
"checkingMessage": "Por favor espera mientras comprobamos la última versión.",
|
||||
"showNotifications": "Mostrar notificaciones de actualización",
|
||||
"latestBadge": "Último",
|
||||
"updateProgress": {
|
||||
"preparing": "Preparando actualización...",
|
||||
"installing": "Instalando actualización...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "Modelo base actualizado exitosamente para {count} modelo(s)",
|
||||
"bulkBaseModelUpdatePartial": "Actualizados {success} modelo(s), fallaron {failed} modelo(s)",
|
||||
"bulkBaseModelUpdateFailed": "Error al actualizar el modelo base para los modelos seleccionados",
|
||||
"skipMetadataRefreshUpdating": "Actualizando flag de actualización de metadatos para {count} modelo(s)...",
|
||||
"skipMetadataRefreshSet": "Actualización de metadatos omitida para {count} modelo(s)",
|
||||
"skipMetadataRefreshCleared": "Actualización de metadatos reanudada para {count} modelo(s)",
|
||||
"skipMetadataRefreshPartial": "{success} modelo(s) actualizados, {failed} fallaron",
|
||||
"skipMetadataRefreshFailed": "Error al actualizar flag de actualización de metadatos para los modelos seleccionados",
|
||||
"bulkContentRatingUpdating": "Actualizando la clasificación de contenido para {count} modelo(s)...",
|
||||
"bulkContentRatingSet": "Clasificación de contenido establecida en {level} para {count} modelo(s)",
|
||||
"bulkContentRatingPartial": "Clasificación de contenido establecida en {level} para {success} modelo(s), {failed} fallaron",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "Filtros limpiados",
|
||||
"noCustomFilterToClear": "No hay filtro personalizado para limpiar"
|
||||
"noCustomFilterToClear": "No hay filtro personalizado para limpiar",
|
||||
"noActiveFilters": "No hay filtros activos para guardar"
|
||||
},
|
||||
"presets": {
|
||||
"created": "Preajuste \"{name}\" creado",
|
||||
"deleted": "Preajuste \"{name}\" eliminado",
|
||||
"applied": "Preajuste \"{name}\" aplicado",
|
||||
"overwritten": "Preset \"{name}\" sobrescrito",
|
||||
"restored": "Presets predeterminados restaurados"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "El nombre del preajuste no puede estar vacío",
|
||||
"presetNameTooLong": "El nombre del preajuste debe tener {max} caracteres o menos",
|
||||
"presetNameInvalidChars": "El nombre del preajuste contiene caracteres inválidos",
|
||||
"presetNameExists": "Ya existe un preajuste con este nombre",
|
||||
"maxPresetsReached": "Máximo {max} preajustes permitidos. Elimine uno para agregar más.",
|
||||
"presetNotFound": "Preajuste no encontrado",
|
||||
"invalidPreset": "Datos de preajuste inválidos",
|
||||
"deletePresetFailed": "Error al eliminar el preajuste",
|
||||
"applyPresetFailed": "Error al aplicar el preajuste"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "Imágenes de ejemplo {action} completadas",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "Error al cargar árbol de carpetas",
|
||||
"folderTreeError": "Error al cargar árbol de carpetas",
|
||||
"imagesImported": "Imágenes de ejemplo importadas exitosamente",
|
||||
"imagesPartial": "{success} imagen(es) importada(s), {failed} fallida(s)",
|
||||
"importFailed": "Error al importar imágenes de ejemplo: {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "Corrupción de caché detectada"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "Problemas de caché detectados"
|
||||
},
|
||||
"content": "{invalid} de {total} entradas de caché son inválidas ({rate}). Esto puede causar modelos faltantes o errores. Se recomienda reconstruir la caché.",
|
||||
"rebuildCache": "Reconstruir caché",
|
||||
"dismiss": "Descartar",
|
||||
"rebuilding": "Reconstruyendo caché...",
|
||||
"rebuildFailed": "Error al reconstruir la caché: {error}",
|
||||
"retry": "Reintentar"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "Suivant",
|
||||
"backToTop": "Retour en haut",
|
||||
"settings": "Paramètres",
|
||||
"help": "Aide"
|
||||
"help": "Aide",
|
||||
"add": "Ajouter"
|
||||
},
|
||||
"status": {
|
||||
"loading": "Chargement...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "Mise à jour",
|
||||
"updateAvailable": "Mise à jour disponible"
|
||||
"updateAvailable": "Mise à jour disponible",
|
||||
"skipRefresh": "Actualisation des métadonnées ignorée"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "Nombre d'utilisations"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "Filtrer les modèles",
|
||||
"presets": "Préréglages",
|
||||
"savePreset": "Enregistrer les filtres actifs comme nouveau préréglage.",
|
||||
"savePresetDisabledActive": "Impossible d'enregistrer : Un préréglage est déjà actif. Modifiez les filtres pour enregistrer un nouveau préréglage",
|
||||
"savePresetDisabledNoFilters": "Sélectionnez d'abord des filtres à enregistrer comme préréglage",
|
||||
"savePresetPrompt": "Entrez le nom du préréglage :",
|
||||
"presetClickTooltip": "Cliquer pour appliquer le préréglage \"{name}\"",
|
||||
"presetDeleteTooltip": "Supprimer le préréglage",
|
||||
"presetDeleteConfirm": "Supprimer le préréglage \"{name}\" ?",
|
||||
"presetDeleteConfirmClick": "Cliquez à nouveau pour confirmer",
|
||||
"presetOverwriteConfirm": "Le préréglage \"{name}\" existe déjà. Remplacer?",
|
||||
"presetNamePlaceholder": "Nom du préréglage...",
|
||||
"baseModel": "Modèle de base",
|
||||
"modelTags": "Tags (Top 20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "Crédit non requis",
|
||||
"allowSellingGeneratedContent": "Vente autorisée",
|
||||
"noTags": "Aucun tag",
|
||||
"clearAll": "Effacer tous les filtres"
|
||||
"clearAll": "Effacer tous les filtres",
|
||||
"any": "N'importe quel",
|
||||
"all": "Tous",
|
||||
"tagLogicAny": "Correspondre à n'importe quel tag (OU)",
|
||||
"tagLogicAll": "Correspondre à tous les tags (ET)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "Basculer le thème",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "Impossible d'enregistrer les exclusions : {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "Chemins à ignorer pour l'actualisation des métadonnées",
|
||||
"placeholder": "Exemple : temp, archived/old, test_models",
|
||||
"help": "Ignorer les modèles dans ces chemins de répertoires lors de l'actualisation groupée des métadonnées (\"Récupérer toutes les métadonnées\"). Entrez les chemins de dossiers relatifs au répertoire racine des modèles, séparés par des virgules.",
|
||||
"validation": {
|
||||
"noPaths": "Entrez au moins un chemin séparé par des virgules.",
|
||||
"saveFailed": "Impossible d'enregistrer les chemins à ignorer : {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Densité d'affichage",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "Signaler n’importe quelle mise à jour disponible"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "Masquer les mises à jour en accès anticipé",
|
||||
"help": "Seulement les mises à jour en accès anticipé"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "Inclure les mots-clés dans la syntaxe LoRA",
|
||||
"includeTriggerWordsHelp": "Inclure les mots-clés d'entraînement lors de la copie de la syntaxe LoRA dans le presse-papiers"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "Vérifier les mises à jour pour la sélection",
|
||||
"moveAll": "Déplacer tout vers un dossier",
|
||||
"autoOrganize": "Auto-organiser la sélection",
|
||||
"skipMetadataRefresh": "Ignorer l'actualisation des métadonnées pour la sélection",
|
||||
"resumeMetadataRefresh": "Reprendre l'actualisation des métadonnées pour la sélection",
|
||||
"deleteAll": "Supprimer tous les modèles",
|
||||
"clear": "Effacer la sélection",
|
||||
"skipMetadataRefreshCount": "Ignorer({count} modèles)",
|
||||
"resumeMetadataRefreshCount": "Reprendre({count} modèles)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Initialisation de l'auto-organisation...",
|
||||
"starting": "Démarrage de l'auto-organisation pour {type}...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "Version sans nom",
|
||||
"noDetails": "Aucun détail supplémentaire"
|
||||
"noDetails": "Aucun détail supplémentaire",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "se termine bientôt",
|
||||
"hours": "dans {count}h",
|
||||
"days": "dans {count}j"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Version actuelle",
|
||||
"inLibrary": "Dans la bibliothèque",
|
||||
"newer": "Version plus récente",
|
||||
"earlyAccess": "Accès anticipé",
|
||||
"ignored": "Ignorée"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "Supprimer",
|
||||
"ignore": "Ignorer",
|
||||
"unignore": "Ne plus ignorer",
|
||||
"earlyAccessTooltip": "Nécessite l'achat de l'accès anticipé",
|
||||
"resumeModelUpdates": "Reprendre les mises à jour pour ce modèle",
|
||||
"ignoreModelUpdates": "Ignorer les mises à jour pour ce modèle",
|
||||
"viewLocalVersions": "Voir toutes les versions locales",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "Dossier d'images d'exemple ouvert",
|
||||
"openingFolder": "Ouverture du dossier d'images d'exemple",
|
||||
"failedToOpen": "Échec de l'ouverture du dossier d'images d'exemple"
|
||||
"failedToOpen": "Échec de l'ouverture du dossier d'images d'exemple",
|
||||
"setupRequired": "Stockage d'images d'exemple",
|
||||
"setupDescription": "Pour ajouter des images d'exemple personnalisées, vous devez d'abord définir un emplacement de téléchargement.",
|
||||
"setupUsage": "Ce chemin est utilisé pour les images d'exemple téléchargées et personnalisées.",
|
||||
"openSettings": "Ouvrir les paramètres"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "Vérification des mises à jour...",
|
||||
"checkingMessage": "Veuillez patienter pendant la vérification de la dernière version.",
|
||||
"showNotifications": "Afficher les notifications de mise à jour",
|
||||
"latestBadge": "Dernier",
|
||||
"updateProgress": {
|
||||
"preparing": "Préparation de la mise à jour...",
|
||||
"installing": "Installation de la mise à jour...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "Modèle de base mis à jour avec succès pour {count} modèle(s)",
|
||||
"bulkBaseModelUpdatePartial": "{success} modèle(s) mis à jour, {failed} modèle(s) en échec",
|
||||
"bulkBaseModelUpdateFailed": "Échec de la mise à jour du modèle de base pour les modèles sélectionnés",
|
||||
"skipMetadataRefreshUpdating": "Mise à jour du flag d'actualisation des métadonnées pour {count} modèle(s)...",
|
||||
"skipMetadataRefreshSet": "Actualisation des métadonnées ignorée pour {count} modèle(s)",
|
||||
"skipMetadataRefreshCleared": "Actualisation des métadonnées reprise pour {count} modèle(s)",
|
||||
"skipMetadataRefreshPartial": "{success} modèle(s) mis à jour, {failed} échoué(s)",
|
||||
"skipMetadataRefreshFailed": "Échec de la mise à jour du flag d'actualisation des métadonnées pour les modèles sélectionnés",
|
||||
"bulkContentRatingUpdating": "Mise à jour de la classification du contenu pour {count} modèle(s)...",
|
||||
"bulkContentRatingSet": "Classification du contenu définie sur {level} pour {count} modèle(s)",
|
||||
"bulkContentRatingPartial": "Classification du contenu définie sur {level} pour {success} modèle(s), {failed} échec(s)",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "Filtres effacés",
|
||||
"noCustomFilterToClear": "Aucun filtre personnalisé à effacer"
|
||||
"noCustomFilterToClear": "Aucun filtre personnalisé à effacer",
|
||||
"noActiveFilters": "Aucun filtre actif à enregistrer"
|
||||
},
|
||||
"presets": {
|
||||
"created": "Préréglage \"{name}\" créé",
|
||||
"deleted": "Préréglage \"{name}\" supprimé",
|
||||
"applied": "Préréglage \"{name}\" appliqué",
|
||||
"overwritten": "Préréglage \"{name}\" remplacé",
|
||||
"restored": "Paramètres par défaut restaurés"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "Le nom du préréglage ne peut pas être vide",
|
||||
"presetNameTooLong": "Le nom du préréglage doit contenir au maximum {max} caractères",
|
||||
"presetNameInvalidChars": "Le nom du préréglage contient des caractères invalides",
|
||||
"presetNameExists": "Un préréglage avec ce nom existe déjà",
|
||||
"maxPresetsReached": "Maximum {max} préréglages autorisés. Supprimez-en un pour en ajouter plus.",
|
||||
"presetNotFound": "Préréglage non trouvé",
|
||||
"invalidPreset": "Données de préréglage invalides",
|
||||
"deletePresetFailed": "Échec de la suppression du préréglage",
|
||||
"applyPresetFailed": "Échec de l'application du préréglage"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "Images d'exemple {action} terminées",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "Échec du chargement de l'arborescence des dossiers",
|
||||
"folderTreeError": "Erreur lors du chargement de l'arborescence des dossiers",
|
||||
"imagesImported": "Images d'exemple importées avec succès",
|
||||
"imagesPartial": "{success} image(s) importée(s), {failed} échouée(s)",
|
||||
"importFailed": "Échec de l'importation des images d'exemple : {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "Corruption du cache détectée"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "Problèmes de cache détectés"
|
||||
},
|
||||
"content": "{invalid} des {total} entrées de cache sont invalides ({rate}). Cela peut provoquer des modèles manquants ou des erreurs. Il est recommandé de reconstruire le cache.",
|
||||
"rebuildCache": "Reconstruire le cache",
|
||||
"dismiss": "Ignorer",
|
||||
"rebuilding": "Reconstruction du cache...",
|
||||
"rebuildFailed": "Échec de la reconstruction du cache : {error}",
|
||||
"retry": "Réessayer"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "הבא",
|
||||
"backToTop": "חזור למעלה",
|
||||
"settings": "הגדרות",
|
||||
"help": "עזרה"
|
||||
"help": "עזרה",
|
||||
"add": "הוסף"
|
||||
},
|
||||
"status": {
|
||||
"loading": "טוען...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "עדכון",
|
||||
"updateAvailable": "עדכון זמין"
|
||||
"updateAvailable": "עדכון זמין",
|
||||
"skipRefresh": "רענון המטא-נתונים דולג"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "מספר שימושים"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "סנן מודלים",
|
||||
"presets": "קביעות מראש",
|
||||
"savePreset": "שמור מסננים פעילים כקביעה מראש חדשה.",
|
||||
"savePresetDisabledActive": "לא ניתן לשמור: קביעה מראש כבר פעילה. שנה מסננים כדי לשמור קביעה מראש חדשה",
|
||||
"savePresetDisabledNoFilters": "בחר מסננים תחילה כדי לשמור כקביעה מראש",
|
||||
"savePresetPrompt": "הזן שם קביעה מראש:",
|
||||
"presetClickTooltip": "לחץ כדי להפעיל קביעה מראש \"{name}\"",
|
||||
"presetDeleteTooltip": "מחק קביעה מראש",
|
||||
"presetDeleteConfirm": "למחוק קביעה מראש \"{name}\"?",
|
||||
"presetDeleteConfirmClick": "לחץ שוב לאישור",
|
||||
"presetOverwriteConfirm": "הפריסט \"{name}\" כבר קיים. לדרוס?",
|
||||
"presetNamePlaceholder": "שם קביעה מראש...",
|
||||
"baseModel": "מודל בסיס",
|
||||
"modelTags": "תגיות (20 המובילות)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "ללא קרדיט נדרש",
|
||||
"allowSellingGeneratedContent": "אפשר מכירה",
|
||||
"noTags": "ללא תגיות",
|
||||
"clearAll": "נקה את כל המסננים"
|
||||
"clearAll": "נקה את כל המסננים",
|
||||
"any": "כלשהו",
|
||||
"all": "כל התגים",
|
||||
"tagLogicAny": "התאם כל תג (או)",
|
||||
"tagLogicAll": "התאם את כל התגים (וגם)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "החלף ערכת נושא",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "לא ניתן לשמור את ההוצאות: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "נתיבים לדילוג ברענון מטא-נתונים",
|
||||
"placeholder": "דוגמה: temp, archived/old, test_models",
|
||||
"help": "דלג על מודלים בנתיבי תיקיות אלה בעת רענון מטא-נתונים המוני (\"אחזר את כל המטא-נתונים\"). הזן נתיבי תיקיות יחסית לספריית השורש של המודל, מופרדים בפסיקים.",
|
||||
"validation": {
|
||||
"noPaths": "הזן לפחות נתיב אחד מופרד בפסיקים.",
|
||||
"saveFailed": "לא ניתן לשמור נתיבי דילוג: {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "צפיפות תצוגה",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "תוויות לכל עדכון זמין"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "הסתר עדכוני גישה מוקדמת",
|
||||
"help": "רק עדכוני גישה מוקדמת"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "כלול מילות טריגר בתחביר LoRA",
|
||||
"includeTriggerWordsHelp": "כלול מילות טריגר מאומנות בעת העתקת תחביר LoRA ללוח"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "בדוק עדכונים לבחירה",
|
||||
"moveAll": "העבר הכל לתיקייה",
|
||||
"autoOrganize": "ארגן אוטומטית נבחרים",
|
||||
"skipMetadataRefresh": "דילוג על רענון מטא-נתונים לנבחרים",
|
||||
"resumeMetadataRefresh": "המשך רענון מטא-נתונים לנבחרים",
|
||||
"deleteAll": "מחק את כל המודלים",
|
||||
"clear": "נקה בחירה",
|
||||
"skipMetadataRefreshCount": "דילוג({count} מודלים)",
|
||||
"resumeMetadataRefreshCount": "המשך({count} מודלים)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "מאתחל ארגון אוטומטי...",
|
||||
"starting": "מתחיל ארגון אוטומטי עבור {type}...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "גרסה ללא שם",
|
||||
"noDetails": "אין פרטים נוספים"
|
||||
"noDetails": "אין פרטים נוספים",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "מסתיים בקרוב",
|
||||
"hours": "בעוד {count} שעות",
|
||||
"days": "בעוד {count} ימים"
|
||||
},
|
||||
"badges": {
|
||||
"current": "גרסה נוכחית",
|
||||
"inLibrary": "בספרייה",
|
||||
"newer": "גרסה חדשה יותר",
|
||||
"earlyAccess": "גישה מוקדמת",
|
||||
"ignored": "התעלם"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "מחיקה",
|
||||
"ignore": "התעלם",
|
||||
"unignore": "בטל התעלמות",
|
||||
"earlyAccessTooltip": "נדרש רכישת גישה מוקדמת",
|
||||
"resumeModelUpdates": "המשך עדכונים עבור מודל זה",
|
||||
"ignoreModelUpdates": "התעלם מעדכונים עבור מודל זה",
|
||||
"viewLocalVersions": "הצג את כל הגרסאות המקומיות",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "תיקיית תמונות הדוגמה נפתחה",
|
||||
"openingFolder": "פותח תיקיית תמונות דוגמה",
|
||||
"failedToOpen": "פתיחת תיקיית תמונות הדוגמה נכשלה"
|
||||
"failedToOpen": "פתיחת תיקיית תמונות הדוגמה נכשלה",
|
||||
"setupRequired": "אחסון תמונות דוגמה",
|
||||
"setupDescription": "כדי להוסיף תמונות דוגמה מותאמות אישית, עליך קודם להגדיר מיקום הורדה.",
|
||||
"setupUsage": "נתיב זה משמש הן עבור תמונות דוגמה שהורדו והן עבור תמונות מותאמות אישית.",
|
||||
"openSettings": "פתח הגדרות"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "בודק עדכונים...",
|
||||
"checkingMessage": "אנא המתן בזמן שאנו בודקים את הגרסה האחרונה.",
|
||||
"showNotifications": "הצג התראות עדכון",
|
||||
"latestBadge": "עדכן",
|
||||
"updateProgress": {
|
||||
"preparing": "מכין עדכון...",
|
||||
"installing": "מתקין עדכון...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "עודכן בהצלחה מודל הבסיס עבור {count} מודל(ים)",
|
||||
"bulkBaseModelUpdatePartial": "עודכנו {success} מודל(ים), נכשלו {failed} מודל(ים)",
|
||||
"bulkBaseModelUpdateFailed": "עדכון מודל הבסיס עבור המודלים שנבחרו נכשל",
|
||||
"skipMetadataRefreshUpdating": "מעדכן דגל רענון מטא-נתונים עבור {count} מודל(ים)...",
|
||||
"skipMetadataRefreshSet": "רענון מטא-נתונים דולג עבור {count} מודל(ים)",
|
||||
"skipMetadataRefreshCleared": "רענון מטא-נתונים התחדש עבור {count} מודל(ים)",
|
||||
"skipMetadataRefreshPartial": "{success} מודל(ים) עודכנו, {failed} נכשלו",
|
||||
"skipMetadataRefreshFailed": "נכשל בעדכון דגל רענון מטא-נתונים עבור המודלים הנבחרים",
|
||||
"bulkContentRatingUpdating": "מעדכן דירוג תוכן עבור {count} מודלים...",
|
||||
"bulkContentRatingSet": "דירוג התוכן הוגדר ל-{level} עבור {count} מודלים",
|
||||
"bulkContentRatingPartial": "דירוג התוכן הוגדר ל-{level} עבור {success} מודלים, {failed} נכשלו",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "המסננים נוקו",
|
||||
"noCustomFilterToClear": "אין מסנן מותאם אישית לניקוי"
|
||||
"noCustomFilterToClear": "אין מסנן מותאם אישית לניקוי",
|
||||
"noActiveFilters": "אין מסננים פעילים לשמירה"
|
||||
},
|
||||
"presets": {
|
||||
"created": "קביעה מראש \"{name}\" נוצרה",
|
||||
"deleted": "קביעה מראש \"{name}\" נמחקה",
|
||||
"applied": "קביעה מראש \"{name}\" הופעלה",
|
||||
"overwritten": "קביעה מראש \"{name}\" נדרסה",
|
||||
"restored": "ברירות המחדל שוחזרו"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "שם קביעה מראש לא יכול להיות ריק",
|
||||
"presetNameTooLong": "שם קביעה מראש חייב להיות {max} תווים או פחות",
|
||||
"presetNameInvalidChars": "שם קביעה מראש מכיל תווים לא חוקיים",
|
||||
"presetNameExists": "קביעה מראש עם שם זה כבר קיימת",
|
||||
"maxPresetsReached": "מותר מקסימום {max} קביעות מראש. מחק אחת כדי להוסיף עוד.",
|
||||
"presetNotFound": "קביעה מראש לא נמצאה",
|
||||
"invalidPreset": "נתוני קביעה מראש לא חוקיים",
|
||||
"deletePresetFailed": "מחיקת קביעה מראש נכשלה",
|
||||
"applyPresetFailed": "הפעלת קביעה מראש נכשלה"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "{action} תמונות הדוגמה הושלם",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "טעינת עץ התיקיות נכשלה",
|
||||
"folderTreeError": "שגיאה בטעינת עץ התיקיות",
|
||||
"imagesImported": "תמונות הדוגמה יובאו בהצלחה",
|
||||
"imagesPartial": "{success} תמונה/ות יובאו, {failed} נכשלו",
|
||||
"importFailed": "ייבוא תמונות הדוגמה נכשל: {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "זוהתה שחיתות במטמון"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "זוהו בעיות במטמון"
|
||||
},
|
||||
"content": "{invalid} מתוך {total} רשומות מטמון אינן תקינות ({rate}). זה עלול לגרום לדגמים חסרים או לשגיאות. מומלץ לבנות מחדש את המטמון.",
|
||||
"rebuildCache": "בניית מטמון מחדש",
|
||||
"dismiss": "ביטול",
|
||||
"rebuilding": "בונה מחדש את המטמון...",
|
||||
"rebuildFailed": "נכשלה בניית המטמון מחדש: {error}",
|
||||
"retry": "נסה שוב"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "次へ",
|
||||
"backToTop": "トップに戻る",
|
||||
"settings": "設定",
|
||||
"help": "ヘルプ"
|
||||
"help": "ヘルプ",
|
||||
"add": "追加"
|
||||
},
|
||||
"status": {
|
||||
"loading": "読み込み中...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "アップデート",
|
||||
"updateAvailable": "アップデートがあります"
|
||||
"updateAvailable": "アップデートがあります",
|
||||
"skipRefresh": "メタデータの更新がスキップされました"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "使用回数"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "モデルをフィルタ",
|
||||
"presets": "プリセット",
|
||||
"savePreset": "現在のアクティブフィルタを新しいプリセットとして保存。",
|
||||
"savePresetDisabledActive": "保存できません:プリセットがすでにアクティブです。フィルタを変更して新しいプリセットを保存してください",
|
||||
"savePresetDisabledNoFilters": "先にフィルタを選択してからプリセットとして保存",
|
||||
"savePresetPrompt": "プリセット名を入力:",
|
||||
"presetClickTooltip": "プリセット \"{name}\" を適用するにはクリック",
|
||||
"presetDeleteTooltip": "プリセットを削除",
|
||||
"presetDeleteConfirm": "プリセット \"{name}\" を削除しますか?",
|
||||
"presetDeleteConfirmClick": "もう一度クリックして確認",
|
||||
"presetOverwriteConfirm": "プリセット「{name}」は既に存在します。上書きしますか?",
|
||||
"presetNamePlaceholder": "プリセット名...",
|
||||
"baseModel": "ベースモデル",
|
||||
"modelTags": "タグ(上位20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "クレジット不要",
|
||||
"allowSellingGeneratedContent": "販売許可",
|
||||
"noTags": "タグなし",
|
||||
"clearAll": "すべてのフィルタをクリア"
|
||||
"clearAll": "すべてのフィルタをクリア",
|
||||
"any": "いずれか",
|
||||
"all": "すべて",
|
||||
"tagLogicAny": "いずれかのタグに一致 (OR)",
|
||||
"tagLogicAll": "すべてのタグに一致 (AND)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "テーマの切り替え",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "除外設定を保存できませんでした: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "メタデータ更新スキップパス",
|
||||
"placeholder": "例:temp, archived/old, test_models",
|
||||
"help": "一括メタデータ更新(「すべてのメタデータを取得」)時にこれらのディレクトリパス内のモデルをスキップします。モデルルートディレクトリからの相対フォルダパスをカンマ区切りで入力してください。",
|
||||
"validation": {
|
||||
"noPaths": "カンマで区切って少なくとも1つのパスを入力してください。",
|
||||
"saveFailed": "スキップパスの保存に失敗しました:{message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "表示密度",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "利用可能な更新すべてを表示"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "早期アクセス更新を非表示",
|
||||
"help": "早期アクセスのみの更新"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "LoRA構文にトリガーワードを含める",
|
||||
"includeTriggerWordsHelp": "LoRA構文をクリップボードにコピーする際、学習済みトリガーワードを含めます"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "選択項目の更新を確認",
|
||||
"moveAll": "すべてをフォルダに移動",
|
||||
"autoOrganize": "自動整理を実行",
|
||||
"skipMetadataRefresh": "選択したモデルのメタデータ更新をスキップ",
|
||||
"resumeMetadataRefresh": "選択したモデルのメタデータ更新を再開",
|
||||
"deleteAll": "すべてのモデルを削除",
|
||||
"clear": "選択をクリア",
|
||||
"skipMetadataRefreshCount": "スキップ({count}モデル)",
|
||||
"resumeMetadataRefreshCount": "再開({count}モデル)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "自動整理を初期化中...",
|
||||
"starting": "{type}の自動整理を開始中...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "名前のないバージョン",
|
||||
"noDetails": "追加情報なし"
|
||||
"noDetails": "追加情報なし",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "まもなく終了",
|
||||
"hours": "{count}時間後",
|
||||
"days": "{count}日後"
|
||||
},
|
||||
"badges": {
|
||||
"current": "現在のバージョン",
|
||||
"inLibrary": "ライブラリにあります",
|
||||
"newer": "新しいバージョン",
|
||||
"earlyAccess": "早期アクセス",
|
||||
"ignored": "無視中"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "削除",
|
||||
"ignore": "無視",
|
||||
"unignore": "無視を解除",
|
||||
"earlyAccessTooltip": "早期アクセス購入が必要",
|
||||
"resumeModelUpdates": "このモデルの更新を再開",
|
||||
"ignoreModelUpdates": "このモデルの更新を無視",
|
||||
"viewLocalVersions": "ローカルの全バージョンを表示",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "例画像フォルダが開かれました",
|
||||
"openingFolder": "例画像フォルダを開いています",
|
||||
"failedToOpen": "例画像フォルダを開くのに失敗しました"
|
||||
"failedToOpen": "例画像フォルダを開くのに失敗しました",
|
||||
"setupRequired": "例画像ストレージ",
|
||||
"setupDescription": "カスタム例画像を追加するには、まずダウンロード場所を設定する必要があります。",
|
||||
"setupUsage": "このパスは、ダウンロードした例画像とカスタム画像の両方に使用されます。",
|
||||
"openSettings": "設定を開く"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "更新を確認中...",
|
||||
"checkingMessage": "最新バージョンを確認しています。お待ちください。",
|
||||
"showNotifications": "更新通知を表示",
|
||||
"latestBadge": "最新",
|
||||
"updateProgress": {
|
||||
"preparing": "更新を準備中...",
|
||||
"installing": "更新をインストール中...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "{count} モデルのベースモデルが正常に更新されました",
|
||||
"bulkBaseModelUpdatePartial": "{success} モデルを更新、{failed} モデルは失敗しました",
|
||||
"bulkBaseModelUpdateFailed": "選択したモデルのベースモデルの更新に失敗しました",
|
||||
"skipMetadataRefreshUpdating": "{count}モデルのメタデータ更新フラグを更新中...",
|
||||
"skipMetadataRefreshSet": "{count}モデルのメタデータ更新をスキップしました",
|
||||
"skipMetadataRefreshCleared": "{count}モデルのメタデータ更新を再開しました",
|
||||
"skipMetadataRefreshPartial": "{success}モデルを更新しました。{failed}モデルで失敗しました",
|
||||
"skipMetadataRefreshFailed": "選択したモデルのメタデータ更新フラグの更新に失敗しました",
|
||||
"bulkContentRatingUpdating": "{count} 件のモデルのコンテンツレーティングを更新中...",
|
||||
"bulkContentRatingSet": "{count} 件のモデルのコンテンツレーティングを {level} に設定しました",
|
||||
"bulkContentRatingPartial": "{success} 件のモデルのコンテンツレーティングを {level} に設定、{failed} 件は失敗しました",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "フィルタがクリアされました",
|
||||
"noCustomFilterToClear": "クリアするカスタムフィルタがありません"
|
||||
"noCustomFilterToClear": "クリアするカスタムフィルタがありません",
|
||||
"noActiveFilters": "保存するアクティブフィルタがありません"
|
||||
},
|
||||
"presets": {
|
||||
"created": "プリセット \"{name}\" が作成されました",
|
||||
"deleted": "プリセット \"{name}\" が削除されました",
|
||||
"applied": "プリセット \"{name}\" が適用されました",
|
||||
"overwritten": "プリセット「{name}」を上書きしました",
|
||||
"restored": "デフォルトのプリセットを復元しました"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "プリセット名を入力してください",
|
||||
"presetNameTooLong": "プリセット名は{max}文字以内にしてください",
|
||||
"presetNameInvalidChars": "プリセット名に使用できない文字が含まれています",
|
||||
"presetNameExists": "同じ名前のプリセットが既に存在します",
|
||||
"maxPresetsReached": "プリセットは最大{max}個までです。追加するには既存のものを削除してください。",
|
||||
"presetNotFound": "プリセットが見つかりません",
|
||||
"invalidPreset": "無効なプリセットデータです",
|
||||
"deletePresetFailed": "プリセットの削除に失敗しました",
|
||||
"applyPresetFailed": "プリセットの適用に失敗しました"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "例画像 {action} が完了しました",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "フォルダツリーの読み込みに失敗しました",
|
||||
"folderTreeError": "フォルダツリー読み込みエラー",
|
||||
"imagesImported": "例画像が正常にインポートされました",
|
||||
"imagesPartial": "{success} 件の画像をインポート、{failed} 件失敗",
|
||||
"importFailed": "例画像のインポートに失敗しました:{message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "キャッシュの破損が検出されました"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "キャッシュの問題が検出されました"
|
||||
},
|
||||
"content": "{total}個のキャッシュエントリのうち{invalid}個が無効です({rate})。モデルが見つからない原因になったり、エラーが発生する可能性があります。キャッシュの再構築を推奨します。",
|
||||
"rebuildCache": "キャッシュを再構築",
|
||||
"dismiss": "閉じる",
|
||||
"rebuilding": "キャッシュを再構築中...",
|
||||
"rebuildFailed": "キャッシュの再構築に失敗しました: {error}",
|
||||
"retry": "再試行"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
100
locales/ko.json
100
locales/ko.json
@@ -10,7 +10,8 @@
|
||||
"next": "다음",
|
||||
"backToTop": "맨 위로",
|
||||
"settings": "설정",
|
||||
"help": "도움말"
|
||||
"help": "도움말",
|
||||
"add": "추가"
|
||||
},
|
||||
"status": {
|
||||
"loading": "로딩 중...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "업데이트",
|
||||
"updateAvailable": "업데이트 가능"
|
||||
"updateAvailable": "업데이트 가능",
|
||||
"skipRefresh": "메타데이터 새로고침 건너뜀"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "사용 횟수"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "모델 필터",
|
||||
"presets": "프리셋",
|
||||
"savePreset": "현재 활성 필터를 새 프리셋으로 저장.",
|
||||
"savePresetDisabledActive": "저장할 수 없음: 프리셋이 이미 활성화되어 있습니다. 필터를 수정한 후 새 프리셋을 저장하세요",
|
||||
"savePresetDisabledNoFilters": "먼저 필터를 선택한 후 프리셋으로 저장",
|
||||
"savePresetPrompt": "프리셋 이름 입력:",
|
||||
"presetClickTooltip": "프리셋 \"{name}\" 적용하려면 클릭",
|
||||
"presetDeleteTooltip": "프리셋 삭제",
|
||||
"presetDeleteConfirm": "프리셋 \"{name}\" 삭제하시겠습니까?",
|
||||
"presetDeleteConfirmClick": "다시 클릭하여 확인",
|
||||
"presetOverwriteConfirm": "프리셋 \"{name}\"이(가) 이미 존재합니다. 덮어쓰시겠습니까?",
|
||||
"presetNamePlaceholder": "프리셋 이름...",
|
||||
"baseModel": "베이스 모델",
|
||||
"modelTags": "태그 (상위 20개)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "크레딧 표기 없음",
|
||||
"allowSellingGeneratedContent": "판매 허용",
|
||||
"noTags": "태그 없음",
|
||||
"clearAll": "모든 필터 지우기"
|
||||
"clearAll": "모든 필터 지우기",
|
||||
"any": "아무",
|
||||
"all": "모두",
|
||||
"tagLogicAny": "모든 태그 일치 (OR)",
|
||||
"tagLogicAll": "모든 태그 일치 (AND)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "테마 토글",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "제외 항목을 저장할 수 없습니다: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "메타데이터 새로고침 건너뛰기 경로",
|
||||
"placeholder": "예: temp, archived/old, test_models",
|
||||
"help": "일괄 메타데이터 새로고침(\"모든 메타데이터 가져오기\") 시 이 디렉터리 경로의 모델을 건너뜁니다. 모델 루트 디렉터리를 기준으로 한 폴 더 경로를 쉼표로 구분하여 입력하세요.",
|
||||
"validation": {
|
||||
"noPaths": "쉼표로 구분하여 하나 이상의 경로를 입력하세요.",
|
||||
"saveFailed": "건너뛰기 경로를 저장할 수 없습니다: {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "표시 밀도",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "사용 가능한 모든 업데이트 표시"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "얼리 액세스 업데이트 숨기기",
|
||||
"help": "얼리 액세스 업데이트만"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "LoRA 문법에 트리거 단어 포함",
|
||||
"includeTriggerWordsHelp": "LoRA 문법을 클립보드에 복사할 때 학습된 트리거 단어를 포함합니다"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "선택 항목 업데이트 확인",
|
||||
"moveAll": "모두 폴더로 이동",
|
||||
"autoOrganize": "자동 정리 선택",
|
||||
"skipMetadataRefresh": "선택한 모델의 메타데이터 새로고침 건너뛰기",
|
||||
"resumeMetadataRefresh": "선택한 모델의 메타데이터 새로고침 재개",
|
||||
"deleteAll": "모든 모델 삭제",
|
||||
"clear": "선택 지우기",
|
||||
"skipMetadataRefreshCount": "건너뛰기({count}개 모델)",
|
||||
"resumeMetadataRefreshCount": "재개({count}개 모델)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "자동 정리 초기화 중...",
|
||||
"starting": "{type}에 대한 자동 정리 시작...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "이름 없는 버전",
|
||||
"noDetails": "추가 정보 없음"
|
||||
"noDetails": "추가 정보 없음",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "곧 종료",
|
||||
"hours": "{count}시간 후",
|
||||
"days": "{count}일 후"
|
||||
},
|
||||
"badges": {
|
||||
"current": "현재 버전",
|
||||
"inLibrary": "라이브러리에 있음",
|
||||
"newer": "최신 버전",
|
||||
"earlyAccess": "얼리 액세스",
|
||||
"ignored": "무시됨"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "삭제",
|
||||
"ignore": "무시",
|
||||
"unignore": "무시 해제",
|
||||
"earlyAccessTooltip": "얼리 액세스 구매 필요",
|
||||
"resumeModelUpdates": "이 모델 업데이트 재개",
|
||||
"ignoreModelUpdates": "이 모델 업데이트 무시",
|
||||
"viewLocalVersions": "로컬 버전 모두 보기",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "예시 이미지 폴더가 열렸습니다",
|
||||
"openingFolder": "예시 이미지 폴더를 여는 중",
|
||||
"failedToOpen": "예시 이미지 폴더 열기 실패"
|
||||
"failedToOpen": "예시 이미지 폴더 열기 실패",
|
||||
"setupRequired": "예시 이미지 저장소",
|
||||
"setupDescription": "사용자 지정 예시 이미지를 추가하려면 먼저 다운로드 위치를 설정해야 합니다.",
|
||||
"setupUsage": "이 경로는 다운로드한 예시 이미지와 사용자 지정 이미지 모두에 사용됩니다.",
|
||||
"openSettings": "설정 열기"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "업데이트 확인 중...",
|
||||
"checkingMessage": "최신 버전을 확인하는 동안 잠시 기다려주세요.",
|
||||
"showNotifications": "업데이트 알림 표시",
|
||||
"latestBadge": "최신",
|
||||
"updateProgress": {
|
||||
"preparing": "업데이트 준비 중...",
|
||||
"installing": "업데이트 설치 중...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "{count}개의 모델에 베이스 모델이 성공적으로 업데이트되었습니다",
|
||||
"bulkBaseModelUpdatePartial": "{success}개의 모델이 업데이트되었고, {failed}개의 모델이 실패했습니다",
|
||||
"bulkBaseModelUpdateFailed": "선택한 모델의 베이스 모델 업데이트에 실패했습니다",
|
||||
"skipMetadataRefreshUpdating": "{count}개 모델의 메타데이터 새로고침 플래그를 업데이트하는 중...",
|
||||
"skipMetadataRefreshSet": "{count}개 모델의 메타데이터 새로고침을 건너뛰었습니다",
|
||||
"skipMetadataRefreshCleared": "{count}개 모델의 메타데이터 새로고침을 재개했습니다",
|
||||
"skipMetadataRefreshPartial": "{success}개 모델을 업데이트했습니다. {failed}개 실패",
|
||||
"skipMetadataRefreshFailed": "선택한 모델의 메타데이터 새로고침 플래그 업데이트 실패",
|
||||
"bulkContentRatingUpdating": "{count}개 모델의 콘텐츠 등급을 업데이트하는 중...",
|
||||
"bulkContentRatingSet": "{count}개 모델의 콘텐츠 등급을 {level}(으)로 설정했습니다",
|
||||
"bulkContentRatingPartial": "{success}개 모델의 콘텐츠 등급을 {level}(으)로 설정했고, {failed}개는 실패했습니다",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "필터가 지워졌습니다",
|
||||
"noCustomFilterToClear": "지울 사용자 정의 필터가 없습니다"
|
||||
"noCustomFilterToClear": "지울 사용자 정의 필터가 없습니다",
|
||||
"noActiveFilters": "저장할 활성 필터가 없습니다"
|
||||
},
|
||||
"presets": {
|
||||
"created": "프리셋 \"{name}\" 생성됨",
|
||||
"deleted": "프리셋 \"{name}\" 삭제됨",
|
||||
"applied": "프리셋 \"{name}\" 적용됨",
|
||||
"overwritten": "프리셋 \"{name}\" 덮어쓰기 완료",
|
||||
"restored": "기본 프리셋 복원 완료"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "프리셋 이름을 입력하세요",
|
||||
"presetNameTooLong": "프리셋 이름은 {max}자 이하여야 합니다",
|
||||
"presetNameInvalidChars": "프리셋 이름에 유효하지 않은 문자가 포함되어 있습니다",
|
||||
"presetNameExists": "동일한 이름의 프리셋이 이미 존재합니다",
|
||||
"maxPresetsReached": "최대 {max}개의 프리셋만 허용됩니다. 더 추가하려면 기존 것을 삭제하세요.",
|
||||
"presetNotFound": "프리셋을 찾을 수 없습니다",
|
||||
"invalidPreset": "잘못된 프리셋 데이터입니다",
|
||||
"deletePresetFailed": "프리셋 삭제에 실패했습니다",
|
||||
"applyPresetFailed": "프리셋 적용에 실패했습니다"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "예시 이미지 {action}이(가) 완료되었습니다",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "폴더 트리 로딩 실패",
|
||||
"folderTreeError": "폴더 트리 로딩 오류",
|
||||
"imagesImported": "예시 이미지가 성공적으로 가져와졌습니다",
|
||||
"imagesPartial": "{success}개 이미지 가져오기 성공, {failed}개 실패",
|
||||
"importFailed": "예시 이미지 가져오기 실패: {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "캐시 손상이 감지되었습니다"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "캐시 문제가 감지되었습니다"
|
||||
},
|
||||
"content": "{total}개의 캐시 항목 중 {invalid}개가 유효하지 않습니다 ({rate}). 모델 누락이나 오류가 발생할 수 있습니다. 캐시를 재구축하는 것이 좋습니다.",
|
||||
"rebuildCache": "캐시 재구축",
|
||||
"dismiss": "무시",
|
||||
"rebuilding": "캐시 재구축 중...",
|
||||
"rebuildFailed": "캐시 재구축 실패: {error}",
|
||||
"retry": "다시 시도"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "Далее",
|
||||
"backToTop": "Наверх",
|
||||
"settings": "Настройки",
|
||||
"help": "Справка"
|
||||
"help": "Справка",
|
||||
"add": "Добавить"
|
||||
},
|
||||
"status": {
|
||||
"loading": "Загрузка...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "Обновление",
|
||||
"updateAvailable": "Доступно обновление"
|
||||
"updateAvailable": "Доступно обновление",
|
||||
"skipRefresh": "Обновление метаданных пропущено"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "Количество использований"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "Фильтр моделей",
|
||||
"presets": "Пресеты",
|
||||
"savePreset": "Сохранить текущие активные фильтры как новый пресет.",
|
||||
"savePresetDisabledActive": "Невозможно сохранить: Пресет уже активен. Измените фильтры, чтобы сохранить новый пресет",
|
||||
"savePresetDisabledNoFilters": "Сначала выберите фильтры для сохранения как пресет",
|
||||
"savePresetPrompt": "Введите имя пресета:",
|
||||
"presetClickTooltip": "Нажмите чтобы применить пресет \"{name}\"",
|
||||
"presetDeleteTooltip": "Удалить пресет",
|
||||
"presetDeleteConfirm": "Удалить пресет \"{name}\"?",
|
||||
"presetDeleteConfirmClick": "Нажмите еще раз для подтверждения",
|
||||
"presetOverwriteConfirm": "Пресет \"{name}\" уже существует. Перезаписать?",
|
||||
"presetNamePlaceholder": "Имя пресета...",
|
||||
"baseModel": "Базовая модель",
|
||||
"modelTags": "Теги (Топ 20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "Без указания авторства",
|
||||
"allowSellingGeneratedContent": "Продажа разрешена",
|
||||
"noTags": "Без тегов",
|
||||
"clearAll": "Очистить все фильтры"
|
||||
"clearAll": "Очистить все фильтры",
|
||||
"any": "Любой",
|
||||
"all": "Все",
|
||||
"tagLogicAny": "Совпадение с любым тегом (ИЛИ)",
|
||||
"tagLogicAll": "Совпадение со всеми тегами (И)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "Переключить тему",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "Не удалось сохранить исключения: {message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "Пути для пропуска обновления метаданных",
|
||||
"placeholder": "Пример: temp, archived/old, test_models",
|
||||
"help": "Пропускать модели в этих каталогах при массовом обновлении метаданных («Получить все метаданные»). Введите пути к папкам относительно корневого каталога моделей, разделённые запятой.",
|
||||
"validation": {
|
||||
"noPaths": "Введите хотя бы один путь, разделённый запятыми.",
|
||||
"saveFailed": "Не удалось сохранить пути для пропуска: {message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "Плотность отображения",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "Отмечать любые доступные обновления"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "Скрыть обновления раннего доступа",
|
||||
"help": "Только обновления раннего доступа"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "Включать триггерные слова в синтаксис LoRA",
|
||||
"includeTriggerWordsHelp": "Включать обученные триггерные слова при копировании синтаксиса LoRA в буфер обмена"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "Проверить обновления для выбранных",
|
||||
"moveAll": "Переместить все в папку",
|
||||
"autoOrganize": "Автоматически организовать выбранные",
|
||||
"skipMetadataRefresh": "Пропустить обновление метаданных для выбранных",
|
||||
"resumeMetadataRefresh": "Возобновить обновление метаданных для выбранных",
|
||||
"deleteAll": "Удалить все модели",
|
||||
"clear": "Очистить выбор",
|
||||
"skipMetadataRefreshCount": "Пропустить({count} моделей)",
|
||||
"resumeMetadataRefreshCount": "Возобновить({count} моделей)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Инициализация автоматической организации...",
|
||||
"starting": "Запуск автоматической организации для {type}...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "Версия без названия",
|
||||
"noDetails": "Дополнительная информация отсутствует"
|
||||
"noDetails": "Дополнительная информация отсутствует",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "скоро заканчивается",
|
||||
"hours": "через {count}ч",
|
||||
"days": "через {count}д"
|
||||
},
|
||||
"badges": {
|
||||
"current": "Текущая версия",
|
||||
"inLibrary": "В библиотеке",
|
||||
"newer": "Более новая версия",
|
||||
"earlyAccess": "Ранний доступ",
|
||||
"ignored": "Игнорируется"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "Удалить",
|
||||
"ignore": "Игнорировать",
|
||||
"unignore": "Перестать игнорировать",
|
||||
"earlyAccessTooltip": "Требуется покупка раннего доступа",
|
||||
"resumeModelUpdates": "Возобновить обновления для этой модели",
|
||||
"ignoreModelUpdates": "Игнорировать обновления для этой модели",
|
||||
"viewLocalVersions": "Показать все локальные версии",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "Папка с примерами изображений открыта",
|
||||
"openingFolder": "Открытие папки с примерами изображений",
|
||||
"failedToOpen": "Не удалось открыть папку с примерами изображений"
|
||||
"failedToOpen": "Не удалось открыть папку с примерами изображений",
|
||||
"setupRequired": "Хранилище примеров изображений",
|
||||
"setupDescription": "Чтобы добавить собственные примеры изображений, сначала нужно установить место загрузки.",
|
||||
"setupUsage": "Этот путь используется как для загруженных, так и для пользовательских примеров изображений.",
|
||||
"openSettings": "Открыть настройки"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "Проверка обновлений...",
|
||||
"checkingMessage": "Пожалуйста, подождите, пока мы проверяем последнюю версию.",
|
||||
"showNotifications": "Показывать уведомления об обновлениях",
|
||||
"latestBadge": "Последний",
|
||||
"updateProgress": {
|
||||
"preparing": "Подготовка обновления...",
|
||||
"installing": "Установка обновления...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "Базовая модель успешно обновлена для {count} моделей",
|
||||
"bulkBaseModelUpdatePartial": "Обновлено {success} моделей, не удалось обновить {failed} моделей",
|
||||
"bulkBaseModelUpdateFailed": "Не удалось обновить базовую модель для выбранных моделей",
|
||||
"skipMetadataRefreshUpdating": "Обновление флага обновления метаданных для {count} модели(ей)...",
|
||||
"skipMetadataRefreshSet": "Обновление метаданных пропущено для {count} модели(ей)",
|
||||
"skipMetadataRefreshCleared": "Обновление метаданных возобновлено для {count} модели(ей)",
|
||||
"skipMetadataRefreshPartial": "{success} модели(ей) обновлено, {failed} не удалось",
|
||||
"skipMetadataRefreshFailed": "Не удалось обновить флаг обновления метаданных для выбранных моделей",
|
||||
"bulkContentRatingUpdating": "Обновление рейтинга контента для {count} модель(ей)...",
|
||||
"bulkContentRatingSet": "Рейтинг контента установлен на {level} для {count} модель(ей)",
|
||||
"bulkContentRatingPartial": "Рейтинг контента {level} установлен для {success} модель(ей), {failed} не удалось",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "Фильтры очищены",
|
||||
"noCustomFilterToClear": "Нет пользовательского фильтра для очистки"
|
||||
"noCustomFilterToClear": "Нет пользовательского фильтра для очистки",
|
||||
"noActiveFilters": "Нет активных фильтров для сохранения"
|
||||
},
|
||||
"presets": {
|
||||
"created": "Пресет \"{name}\" создан",
|
||||
"deleted": "Пресет \"{name}\" удален",
|
||||
"applied": "Пресет \"{name}\" применен",
|
||||
"overwritten": "Пресет \"{name}\" перезаписан",
|
||||
"restored": "Пресеты по умолчанию восстановлены"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "Имя пресета не может быть пустым",
|
||||
"presetNameTooLong": "Имя пресета должно содержать не более {max} символов",
|
||||
"presetNameInvalidChars": "Имя пресета содержит недопустимые символы",
|
||||
"presetNameExists": "Пресет с таким именем уже существует",
|
||||
"maxPresetsReached": "Допустимо максимум {max} пресетов. Удалите один, чтобы добавить больше.",
|
||||
"presetNotFound": "Пресет не найден",
|
||||
"invalidPreset": "Недопустимые данные пресета",
|
||||
"deletePresetFailed": "Не удалось удалить пресет",
|
||||
"applyPresetFailed": "Не удалось применить пресет"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "Примеры изображений {action} завершены",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "Не удалось загрузить дерево папок",
|
||||
"folderTreeError": "Ошибка загрузки дерева папок",
|
||||
"imagesImported": "Примеры изображений успешно импортированы",
|
||||
"imagesPartial": "{success} изображ. импортировано, {failed} не удалось",
|
||||
"importFailed": "Не удалось импортировать примеры изображений: {message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "Обнаружено повреждение кэша"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "Обнаружены проблемы с кэшем"
|
||||
},
|
||||
"content": "{invalid} из {total} записей кэша недействительны ({rate}). Это может привести к отсутствию моделей или ошибкам. Рекомендуется перестроить кэш.",
|
||||
"rebuildCache": "Перестроить кэш",
|
||||
"dismiss": "Отклонить",
|
||||
"rebuilding": "Перестроение кэша...",
|
||||
"rebuildFailed": "Не удалось перестроить кэш: {error}",
|
||||
"retry": "Повторить"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "下一步",
|
||||
"backToTop": "返回顶部",
|
||||
"settings": "设置",
|
||||
"help": "帮助"
|
||||
"help": "帮助",
|
||||
"add": "添加"
|
||||
},
|
||||
"status": {
|
||||
"loading": "加载中...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "更新",
|
||||
"updateAvailable": "有可用更新"
|
||||
"updateAvailable": "有可用更新",
|
||||
"skipRefresh": "元数据刷新已跳过"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "使用次数"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "筛选模型",
|
||||
"presets": "预设",
|
||||
"savePreset": "将当前激活的筛选器保存为新预设。",
|
||||
"savePresetDisabledActive": "无法保存:已有预设处于激活状态。修改筛选器后可保存新预设",
|
||||
"savePresetDisabledNoFilters": "先选择筛选器,然后保存为预设",
|
||||
"savePresetPrompt": "输入预设名称:",
|
||||
"presetClickTooltip": "点击应用预设 \"{name}\"",
|
||||
"presetDeleteTooltip": "删除预设",
|
||||
"presetDeleteConfirm": "删除预设 \"{name}\"?",
|
||||
"presetDeleteConfirmClick": "再次点击确认",
|
||||
"presetOverwriteConfirm": "预设 \"{name}\" 已存在。是否覆盖?",
|
||||
"presetNamePlaceholder": "预设名称...",
|
||||
"baseModel": "基础模型",
|
||||
"modelTags": "标签(前20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "无需署名",
|
||||
"allowSellingGeneratedContent": "允许销售",
|
||||
"noTags": "无标签",
|
||||
"clearAll": "清除所有筛选"
|
||||
"clearAll": "清除所有筛选",
|
||||
"any": "任一",
|
||||
"all": "全部",
|
||||
"tagLogicAny": "匹配任一标签 (或)",
|
||||
"tagLogicAll": "匹配所有标签 (与)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "切换主题",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "无法保存排除项:{message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "元数据刷新跳过路径",
|
||||
"placeholder": "示例:temp, archived/old, test_models",
|
||||
"help": "批量刷新元数据(\"获取全部元数据\")时跳过这些目录路径中的模型。输入相对于模型根目录的文件夹路径,以逗号分隔。",
|
||||
"validation": {
|
||||
"noPaths": "请输入至少一个路径,以逗号分隔。",
|
||||
"saveFailed": "无法保存跳过路径:{message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "显示密度",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "显示任何可用更新"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "隐藏抢先体验更新",
|
||||
"help": "抢先体验更新"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "复制 LoRA 语法时包含触发词",
|
||||
"includeTriggerWordsHelp": "复制 LoRA 语法到剪贴板时包含训练触发词"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "检查所选更新",
|
||||
"moveAll": "移动所选中到文件夹",
|
||||
"autoOrganize": "自动整理所选模型",
|
||||
"skipMetadataRefresh": "跳过所选模型的元数据刷新",
|
||||
"resumeMetadataRefresh": "恢复所选模型的元数据刷新",
|
||||
"deleteAll": "删除选中模型",
|
||||
"clear": "清除选择",
|
||||
"skipMetadataRefreshCount": "跳过({count} 个模型)",
|
||||
"resumeMetadataRefreshCount": "恢复({count} 个模型)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "正在初始化自动整理...",
|
||||
"starting": "正在为 {type} 启动自动整理...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "未命名版本",
|
||||
"noDetails": "暂无更多信息"
|
||||
"noDetails": "暂无更多信息",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "即将结束",
|
||||
"hours": "{count}小时后",
|
||||
"days": "{count}天后"
|
||||
},
|
||||
"badges": {
|
||||
"current": "当前版本",
|
||||
"inLibrary": "已在库中",
|
||||
"newer": "较新的版本",
|
||||
"earlyAccess": "抢先体验",
|
||||
"ignored": "已忽略"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "删除",
|
||||
"ignore": "忽略",
|
||||
"unignore": "取消忽略",
|
||||
"earlyAccessTooltip": "需要购买抢先体验",
|
||||
"resumeModelUpdates": "继续跟踪该模型的更新",
|
||||
"ignoreModelUpdates": "忽略该模型的更新",
|
||||
"viewLocalVersions": "查看所有本地版本",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "示例图片文件夹已打开",
|
||||
"openingFolder": "正在打开示例图片文件夹",
|
||||
"failedToOpen": "打开示例图片文件夹失败"
|
||||
"failedToOpen": "打开示例图片文件夹失败",
|
||||
"setupRequired": "示例图片存储",
|
||||
"setupDescription": "要添加自定义示例图片,您需要先设置下载位置。",
|
||||
"setupUsage": "此路径用于存储下载的示例图片和自定义图片。",
|
||||
"openSettings": "打开设置"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "正在检查更新...",
|
||||
"checkingMessage": "请稍候,正在检查最新版本。",
|
||||
"showNotifications": "显示更新通知",
|
||||
"latestBadge": "最新",
|
||||
"updateProgress": {
|
||||
"preparing": "正在准备更新...",
|
||||
"installing": "正在安装更新...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "成功为 {count} 个模型更新基础模型",
|
||||
"bulkBaseModelUpdatePartial": "更新了 {success} 个模型,{failed} 个失败",
|
||||
"bulkBaseModelUpdateFailed": "为选中模型更新基础模型失败",
|
||||
"skipMetadataRefreshUpdating": "正在更新 {count} 个模型的元数据刷新标志...",
|
||||
"skipMetadataRefreshSet": "已为 {count} 个模型跳过元数据刷新",
|
||||
"skipMetadataRefreshCleared": "已为 {count} 个模型恢复元数据刷新",
|
||||
"skipMetadataRefreshPartial": "已更新 {success} 个模型,{failed} 个失败",
|
||||
"skipMetadataRefreshFailed": "未能更新所选模型的元数据刷新标志",
|
||||
"bulkContentRatingUpdating": "正在为 {count} 个模型更新内容评级...",
|
||||
"bulkContentRatingSet": "已将 {count} 个模型的内容评级设置为 {level}",
|
||||
"bulkContentRatingPartial": "已将 {success} 个模型的内容评级设置为 {level},{failed} 个失败",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "筛选已清除",
|
||||
"noCustomFilterToClear": "没有自定义筛选可清除"
|
||||
"noCustomFilterToClear": "没有自定义筛选可清除",
|
||||
"noActiveFilters": "没有可保存的激活筛选"
|
||||
},
|
||||
"presets": {
|
||||
"created": "预设 \"{name}\" 已创建",
|
||||
"deleted": "预设 \"{name}\" 已删除",
|
||||
"applied": "预设 \"{name}\" 已应用",
|
||||
"overwritten": "预设 \"{name}\" 已覆盖",
|
||||
"restored": "默认预设已恢复"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "预设名称不能为空",
|
||||
"presetNameTooLong": "预设名称不能超过 {max} 个字符",
|
||||
"presetNameInvalidChars": "预设名称包含无效字符",
|
||||
"presetNameExists": "已存在同名预设",
|
||||
"maxPresetsReached": "最多允许 {max} 个预设。删除一个以添加更多。",
|
||||
"presetNotFound": "预设未找到",
|
||||
"invalidPreset": "无效的预设数据",
|
||||
"deletePresetFailed": "删除预设失败",
|
||||
"applyPresetFailed": "应用预设失败"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "示例图片{action}完成",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "加载文件夹树失败",
|
||||
"folderTreeError": "加载文件夹树出错",
|
||||
"imagesImported": "示例图片导入成功",
|
||||
"imagesPartial": "成功导入 {success} 张图片,{failed} 张失败",
|
||||
"importFailed": "导入示例图片失败:{message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "来爱发电为Lora Manager项目发电,支持项目持续开发的同时,获取浏览器插件验证码,按季支付更优惠!支付宝/微信方便支付。感谢支持!🚀",
|
||||
"supportCta": "为LM发电",
|
||||
"learnMore": "浏览器插件教程"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "检测到缓存损坏"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "检测到缓存问题"
|
||||
},
|
||||
"content": "{total} 个缓存条目中有 {invalid} 个无效({rate})。这可能导致模型丢失或错误。建议重建缓存。",
|
||||
"rebuildCache": "重建缓存",
|
||||
"dismiss": "忽略",
|
||||
"rebuilding": "正在重建缓存...",
|
||||
"rebuildFailed": "重建缓存失败:{error}",
|
||||
"retry": "重试"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
"next": "下一步",
|
||||
"backToTop": "回到頂部",
|
||||
"settings": "設定",
|
||||
"help": "說明"
|
||||
"help": "說明",
|
||||
"add": "新增"
|
||||
},
|
||||
"status": {
|
||||
"loading": "載入中...",
|
||||
@@ -130,7 +131,8 @@
|
||||
},
|
||||
"badges": {
|
||||
"update": "更新",
|
||||
"updateAvailable": "有可用更新"
|
||||
"updateAvailable": "有可用更新",
|
||||
"skipRefresh": "元數據更新已跳過"
|
||||
},
|
||||
"usage": {
|
||||
"timesUsed": "使用次數"
|
||||
@@ -204,6 +206,17 @@
|
||||
},
|
||||
"filter": {
|
||||
"title": "篩選模型",
|
||||
"presets": "預設",
|
||||
"savePreset": "將目前啟用的篩選器儲存為新預設。",
|
||||
"savePresetDisabledActive": "無法儲存:已有預設處於啟用狀態。修改篩選器後可儲存新預設",
|
||||
"savePresetDisabledNoFilters": "先選擇篩選器,然後儲存為預設",
|
||||
"savePresetPrompt": "輸入預設名稱:",
|
||||
"presetClickTooltip": "點擊套用預設 \"{name}\"",
|
||||
"presetDeleteTooltip": "刪除預設",
|
||||
"presetDeleteConfirm": "刪除預設 \"{name}\"?",
|
||||
"presetDeleteConfirmClick": "再次點擊確認",
|
||||
"presetOverwriteConfirm": "預設 \"{name}\" 已存在。是否覆蓋?",
|
||||
"presetNamePlaceholder": "預設名稱...",
|
||||
"baseModel": "基礎模型",
|
||||
"modelTags": "標籤(前 20)",
|
||||
"modelTypes": "Model Types",
|
||||
@@ -211,7 +224,11 @@
|
||||
"noCreditRequired": "無需署名",
|
||||
"allowSellingGeneratedContent": "允許銷售",
|
||||
"noTags": "無標籤",
|
||||
"clearAll": "清除所有篩選"
|
||||
"clearAll": "清除所有篩選",
|
||||
"any": "任一",
|
||||
"all": "全部",
|
||||
"tagLogicAny": "符合任一票籤 (或)",
|
||||
"tagLogicAll": "符合所有標籤 (與)"
|
||||
},
|
||||
"theme": {
|
||||
"toggle": "切換主題",
|
||||
@@ -275,6 +292,15 @@
|
||||
"saveFailed": "無法儲存排除項目:{message}"
|
||||
}
|
||||
},
|
||||
"metadataRefreshSkipPaths": {
|
||||
"label": "中繼資料重新整理跳過路徑",
|
||||
"placeholder": "範例:temp, archived/old, test_models",
|
||||
"help": "批次重新整理中繼資料(「擷取所有中繼資料」)時跳過這些目錄路徑中的模型。輸入相對於模型根目錄的資料夾路徑,以逗號分隔。",
|
||||
"validation": {
|
||||
"noPaths": "請輸入至少一個路徑,以逗號分隔。",
|
||||
"saveFailed": "無法儲存跳過路徑:{message}"
|
||||
}
|
||||
},
|
||||
"layoutSettings": {
|
||||
"displayDensity": "顯示密度",
|
||||
"displayDensityOptions": {
|
||||
@@ -400,6 +426,10 @@
|
||||
"any": "顯示任何可用更新"
|
||||
}
|
||||
},
|
||||
"hideEarlyAccessUpdates": {
|
||||
"label": "隱藏搶先體驗更新",
|
||||
"help": "搶先體驗更新"
|
||||
},
|
||||
"misc": {
|
||||
"includeTriggerWords": "在 LoRA 語法中包含觸發詞",
|
||||
"includeTriggerWordsHelp": "複製 LoRA 語法到剪貼簿時包含訓練觸發詞"
|
||||
@@ -511,8 +541,12 @@
|
||||
"checkUpdates": "檢查所選更新",
|
||||
"moveAll": "全部移動到資料夾",
|
||||
"autoOrganize": "自動整理所選模型",
|
||||
"skipMetadataRefresh": "跳過所選模型的元數據更新",
|
||||
"resumeMetadataRefresh": "恢復所選模型的元數據更新",
|
||||
"deleteAll": "刪除全部模型",
|
||||
"clear": "清除選取",
|
||||
"skipMetadataRefreshCount": "跳過({count} 個模型)",
|
||||
"resumeMetadataRefreshCount": "恢復({count} 個模型)",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "正在初始化自動整理...",
|
||||
"starting": "正在開始自動整理 {type}...",
|
||||
@@ -1001,12 +1035,19 @@
|
||||
},
|
||||
"labels": {
|
||||
"unnamed": "未命名版本",
|
||||
"noDetails": "沒有其他資訊"
|
||||
"noDetails": "沒有其他資訊",
|
||||
"earlyAccess": "EA"
|
||||
},
|
||||
"eaTime": {
|
||||
"endingSoon": "即將結束",
|
||||
"hours": "{count}小時後",
|
||||
"days": "{count}天後"
|
||||
},
|
||||
"badges": {
|
||||
"current": "目前版本",
|
||||
"inLibrary": "已在庫中",
|
||||
"newer": "較新版本",
|
||||
"earlyAccess": "搶先體驗",
|
||||
"ignored": "已忽略"
|
||||
},
|
||||
"actions": {
|
||||
@@ -1014,6 +1055,7 @@
|
||||
"delete": "刪除",
|
||||
"ignore": "忽略",
|
||||
"unignore": "取消忽略",
|
||||
"earlyAccessTooltip": "需要購買搶先體驗",
|
||||
"resumeModelUpdates": "恢復追蹤此模型的更新",
|
||||
"ignoreModelUpdates": "忽略此模型的更新",
|
||||
"viewLocalVersions": "檢視所有本地版本",
|
||||
@@ -1165,7 +1207,11 @@
|
||||
"exampleImages": {
|
||||
"opened": "範例圖片資料夾已開啟",
|
||||
"openingFolder": "正在開啟範例圖片資料夾",
|
||||
"failedToOpen": "開啟範例圖片資料夾失敗"
|
||||
"failedToOpen": "開啟範例圖片資料夾失敗",
|
||||
"setupRequired": "範例圖片儲存",
|
||||
"setupDescription": "要新增自訂範例圖片,您需要先設定下載位置。",
|
||||
"setupUsage": "此路徑用於儲存下載的範例圖片和自訂圖片。",
|
||||
"openSettings": "開啟設定"
|
||||
}
|
||||
},
|
||||
"help": {
|
||||
@@ -1214,6 +1260,7 @@
|
||||
"checkingUpdates": "正在檢查更新...",
|
||||
"checkingMessage": "請稍候,正在檢查最新版本。",
|
||||
"showNotifications": "顯示更新通知",
|
||||
"latestBadge": "最新",
|
||||
"updateProgress": {
|
||||
"preparing": "正在準備更新...",
|
||||
"installing": "正在安裝更新...",
|
||||
@@ -1358,6 +1405,11 @@
|
||||
"bulkBaseModelUpdateSuccess": "已成功為 {count} 個模型更新基礎模型",
|
||||
"bulkBaseModelUpdatePartial": "已更新 {success} 個模型,{failed} 個模型失敗",
|
||||
"bulkBaseModelUpdateFailed": "更新所選模型的基礎模型失敗",
|
||||
"skipMetadataRefreshUpdating": "正在更新 {count} 個模型的元數據更新標記...",
|
||||
"skipMetadataRefreshSet": "已為 {count} 個模型跳過元數據更新",
|
||||
"skipMetadataRefreshCleared": "已為 {count} 個模型恢復元數據更新",
|
||||
"skipMetadataRefreshPartial": "已更新 {success} 個模型,{failed} 個失敗",
|
||||
"skipMetadataRefreshFailed": "無法更新所選模型的元數據更新標記",
|
||||
"bulkContentRatingUpdating": "正在為 {count} 個模型更新內容分級...",
|
||||
"bulkContentRatingSet": "已將 {count} 個模型的內容分級設定為 {level}",
|
||||
"bulkContentRatingPartial": "已將 {success} 個模型的內容分級設定為 {level},{failed} 個失敗",
|
||||
@@ -1414,7 +1466,26 @@
|
||||
"filters": {
|
||||
"applied": "{message}",
|
||||
"cleared": "篩選已清除",
|
||||
"noCustomFilterToClear": "無自訂篩選可清除"
|
||||
"noCustomFilterToClear": "無自訂篩選可清除",
|
||||
"noActiveFilters": "沒有可儲存的啟用篩選"
|
||||
},
|
||||
"presets": {
|
||||
"created": "預設 \"{name}\" 已建立",
|
||||
"deleted": "預設 \"{name}\" 已刪除",
|
||||
"applied": "預設 \"{name}\" 已套用",
|
||||
"overwritten": "預設 \"{name}\" 已覆蓋",
|
||||
"restored": "預設設定已恢復"
|
||||
},
|
||||
"error": {
|
||||
"presetNameEmpty": "預設名稱不能為空",
|
||||
"presetNameTooLong": "預設名稱不能超過 {max} 個字元",
|
||||
"presetNameInvalidChars": "預設名稱包含無效字元",
|
||||
"presetNameExists": "已存在同名預設",
|
||||
"maxPresetsReached": "最多允許 {max} 個預設。刪除一個以新增更多。",
|
||||
"presetNotFound": "預設未找到",
|
||||
"invalidPreset": "無效的預設資料",
|
||||
"deletePresetFailed": "刪除預設失敗",
|
||||
"applyPresetFailed": "套用預設失敗"
|
||||
},
|
||||
"downloads": {
|
||||
"imagesCompleted": "範例圖片{action}完成",
|
||||
@@ -1426,6 +1497,7 @@
|
||||
"folderTreeFailed": "載入資料夾樹狀結構失敗",
|
||||
"folderTreeError": "載入資料夾樹狀結構錯誤",
|
||||
"imagesImported": "範例圖片匯入成功",
|
||||
"imagesPartial": "成功匯入 {success} 張圖片,{failed} 張失敗",
|
||||
"importFailed": "匯入範例圖片失敗:{message}"
|
||||
},
|
||||
"triggerWords": {
|
||||
@@ -1536,6 +1608,20 @@
|
||||
"content": "LoRA Manager is a passion project maintained full-time by a solo developer. Your support on Ko-fi helps cover development costs, keeps new updates coming, and unlocks a license key for the LM Civitai Extension as a thank-you gift. Every contribution truly makes a difference.",
|
||||
"supportCta": "Support on Ko-fi",
|
||||
"learnMore": "LM Civitai Extension Tutorial"
|
||||
},
|
||||
"cacheHealth": {
|
||||
"corrupted": {
|
||||
"title": "檢測到快取損壞"
|
||||
},
|
||||
"degraded": {
|
||||
"title": "檢測到快取問題"
|
||||
},
|
||||
"content": "{total} 個快取項目中有 {invalid} 個無效({rate})。這可能會導致模型遺失或錯誤。建議重建快取。",
|
||||
"rebuildCache": "重建快取",
|
||||
"dismiss": "關閉",
|
||||
"rebuilding": "重建快取中...",
|
||||
"rebuildFailed": "重建快取失敗:{error}",
|
||||
"retry": "重試"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"test": "vitest run",
|
||||
"test": "npm run test:js && npm run test:vue",
|
||||
"test:js": "vitest run",
|
||||
"test:vue": "cd vue-widgets && npx vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "node scripts/run_frontend_coverage.js"
|
||||
},
|
||||
|
||||
358
py/config.py
358
py/config.py
@@ -9,6 +9,7 @@ import json
|
||||
import urllib.parse
|
||||
import time
|
||||
|
||||
from .utils.cache_paths import CacheType, get_cache_file_path, get_legacy_cache_paths
|
||||
from .utils.settings_paths import ensure_settings_file, get_settings_dir, load_settings_template
|
||||
|
||||
# Use an environment variable to control standalone mode
|
||||
@@ -223,13 +224,26 @@ class Config:
|
||||
logger.error(f"Error checking link status for {path}: {e}")
|
||||
return False
|
||||
|
||||
def _entry_is_symlink(self, entry: os.DirEntry) -> bool:
|
||||
"""Check if a directory entry is a symlink, including Windows junctions."""
|
||||
if entry.is_symlink():
|
||||
return True
|
||||
if platform.system() == 'Windows':
|
||||
try:
|
||||
import ctypes
|
||||
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
|
||||
attrs = ctypes.windll.kernel32.GetFileAttributesW(entry.path)
|
||||
return attrs != -1 and (attrs & FILE_ATTRIBUTE_REPARSE_POINT)
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
def _normalize_path(self, path: str) -> str:
|
||||
return os.path.normpath(path).replace(os.sep, '/')
|
||||
|
||||
def _get_symlink_cache_path(self) -> Path:
|
||||
cache_dir = Path(get_settings_dir(create=True)) / "cache"
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
return cache_dir / "symlink_map.json"
|
||||
canonical_path = get_cache_file_path(CacheType.SYMLINK, create_dir=True)
|
||||
return Path(canonical_path)
|
||||
|
||||
def _symlink_roots(self) -> List[str]:
|
||||
roots: List[str] = []
|
||||
@@ -241,8 +255,32 @@ class Config:
|
||||
def _build_symlink_fingerprint(self) -> Dict[str, object]:
|
||||
roots = [self._normalize_path(path) for path in self._symlink_roots() if path]
|
||||
unique_roots = sorted(set(roots))
|
||||
# Fingerprint now only contains the root paths to avoid sensitivity to folder content changes.
|
||||
return {"roots": unique_roots}
|
||||
|
||||
# Include first-level symlinks in fingerprint for change detection.
|
||||
# This ensures new symlinks under roots trigger a cache invalidation.
|
||||
# Use lists (not tuples) for JSON serialization compatibility.
|
||||
direct_symlinks: List[List[str]] = []
|
||||
for root in unique_roots:
|
||||
try:
|
||||
if os.path.isdir(root):
|
||||
with os.scandir(root) as it:
|
||||
for entry in it:
|
||||
if self._entry_is_symlink(entry):
|
||||
try:
|
||||
target = os.path.realpath(entry.path)
|
||||
direct_symlinks.append([
|
||||
self._normalize_path(entry.path),
|
||||
self._normalize_path(target)
|
||||
])
|
||||
except OSError:
|
||||
pass
|
||||
except (OSError, PermissionError):
|
||||
pass
|
||||
|
||||
return {
|
||||
"roots": unique_roots,
|
||||
"direct_symlinks": sorted(direct_symlinks)
|
||||
}
|
||||
|
||||
def _initialize_symlink_mappings(self) -> None:
|
||||
start = time.perf_counter()
|
||||
@@ -255,15 +293,19 @@ class Config:
|
||||
)
|
||||
self._rebuild_preview_roots()
|
||||
|
||||
# Only rescan if target roots have changed.
|
||||
# This is stable across file additions/deletions.
|
||||
current_fingerprint = self._build_symlink_fingerprint()
|
||||
cached_fingerprint = self._cached_fingerprint
|
||||
|
||||
if cached_fingerprint and current_fingerprint == cached_fingerprint:
|
||||
|
||||
# Check 1: First-level symlinks unchanged (catches new symlinks at root)
|
||||
fingerprint_valid = cached_fingerprint and current_fingerprint == cached_fingerprint
|
||||
|
||||
# Check 2: All cached mappings still valid (catches changes at any depth)
|
||||
mappings_valid = self._validate_cached_mappings() if fingerprint_valid else False
|
||||
|
||||
if fingerprint_valid and mappings_valid:
|
||||
return
|
||||
|
||||
logger.info("Symlink root paths changed; rescanning symbolic links")
|
||||
logger.info("Symlink configuration changed; rescanning symbolic links")
|
||||
|
||||
self.rebuild_symlink_cache()
|
||||
logger.info(
|
||||
@@ -280,14 +322,28 @@ class Config:
|
||||
def _load_persisted_cache_into_mappings(self) -> bool:
|
||||
"""Load the symlink cache and store its fingerprint for comparison."""
|
||||
cache_path = self._get_symlink_cache_path()
|
||||
if not cache_path.exists():
|
||||
return False
|
||||
|
||||
try:
|
||||
with cache_path.open("r", encoding="utf-8") as handle:
|
||||
payload = json.load(handle)
|
||||
except Exception as exc:
|
||||
logger.info("Failed to load symlink cache %s: %s", cache_path, exc)
|
||||
# Check canonical path first, then legacy paths for migration
|
||||
paths_to_check = [cache_path]
|
||||
legacy_paths = get_legacy_cache_paths(CacheType.SYMLINK)
|
||||
paths_to_check.extend(Path(p) for p in legacy_paths if p != str(cache_path))
|
||||
|
||||
loaded_path = None
|
||||
payload = None
|
||||
|
||||
for check_path in paths_to_check:
|
||||
if not check_path.exists():
|
||||
continue
|
||||
try:
|
||||
with check_path.open("r", encoding="utf-8") as handle:
|
||||
payload = json.load(handle)
|
||||
loaded_path = check_path
|
||||
break
|
||||
except Exception as exc:
|
||||
logger.info("Failed to load symlink cache %s: %s", check_path, exc)
|
||||
continue
|
||||
|
||||
if payload is None:
|
||||
return False
|
||||
|
||||
if not isinstance(payload, dict):
|
||||
@@ -307,7 +363,67 @@ class Config:
|
||||
normalized_mappings[self._normalize_path(target)] = self._normalize_path(link)
|
||||
|
||||
self._path_mappings = normalized_mappings
|
||||
logger.info("Symlink cache loaded with %d mappings", len(self._path_mappings))
|
||||
|
||||
# Log migration if loaded from legacy path
|
||||
if loaded_path is not None and loaded_path != cache_path:
|
||||
logger.info(
|
||||
"Symlink cache migrated from %s (will save to %s)",
|
||||
loaded_path,
|
||||
cache_path,
|
||||
)
|
||||
|
||||
try:
|
||||
if loaded_path.exists():
|
||||
loaded_path.unlink()
|
||||
logger.info("Cleaned up legacy symlink cache: %s", loaded_path)
|
||||
|
||||
try:
|
||||
parent_dir = loaded_path.parent
|
||||
if parent_dir.name == "cache" and not any(parent_dir.iterdir()):
|
||||
parent_dir.rmdir()
|
||||
logger.info("Removed empty legacy cache directory: %s", parent_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to cleanup legacy symlink cache %s: %s",
|
||||
loaded_path,
|
||||
exc,
|
||||
)
|
||||
else:
|
||||
logger.info("Symlink cache loaded with %d mappings", len(self._path_mappings))
|
||||
|
||||
return True
|
||||
|
||||
def _validate_cached_mappings(self) -> bool:
|
||||
"""Verify all cached symlink mappings are still valid.
|
||||
|
||||
Returns True if all mappings are valid, False if rescan is needed.
|
||||
This catches removed or retargeted symlinks at ANY depth.
|
||||
"""
|
||||
for target, link in self._path_mappings.items():
|
||||
# Convert normalized paths back to OS paths
|
||||
link_path = link.replace('/', os.sep)
|
||||
|
||||
# Check if symlink still exists
|
||||
if not self._is_link(link_path):
|
||||
logger.debug("Cached symlink no longer exists: %s", link_path)
|
||||
return False
|
||||
|
||||
# Check if target is still the same
|
||||
try:
|
||||
actual_target = self._normalize_path(os.path.realpath(link_path))
|
||||
if actual_target != target:
|
||||
logger.debug(
|
||||
"Symlink target changed: %s -> %s (cached: %s)",
|
||||
link_path, actual_target, target
|
||||
)
|
||||
return False
|
||||
except OSError:
|
||||
logger.debug("Cannot resolve symlink: %s", link_path)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _save_symlink_cache(self) -> None:
|
||||
@@ -325,83 +441,53 @@ class Config:
|
||||
logger.info("Failed to write symlink cache %s: %s", cache_path, exc)
|
||||
|
||||
def _scan_symbolic_links(self):
|
||||
"""Scan all symbolic links in LoRA, Checkpoint, and Embedding root directories"""
|
||||
"""Scan symbolic links in LoRA, Checkpoint, and Embedding root directories.
|
||||
|
||||
Only scans the first level of each root directory to avoid performance
|
||||
issues with large file systems. Detects symlinks and Windows junctions
|
||||
at the root level only (not nested symlinks in subdirectories).
|
||||
"""
|
||||
start = time.perf_counter()
|
||||
|
||||
# Reset mappings before rescanning to avoid stale entries
|
||||
self._path_mappings.clear()
|
||||
self._seed_root_symlink_mappings()
|
||||
visited_dirs: Set[str] = set()
|
||||
for root in self._symlink_roots():
|
||||
self._scan_directory_links(root, visited_dirs)
|
||||
self._scan_first_level_symlinks(root)
|
||||
logger.debug(
|
||||
"Symlink scan finished in %.2f ms with %d mappings",
|
||||
(time.perf_counter() - start) * 1000,
|
||||
len(self._path_mappings),
|
||||
)
|
||||
|
||||
def _scan_directory_links(self, root: str, visited_dirs: Set[str]):
|
||||
"""Iteratively scan directory symlinks to avoid deep recursion."""
|
||||
def _scan_first_level_symlinks(self, root: str):
|
||||
"""Scan only the first level of a directory for symlinks.
|
||||
|
||||
This avoids traversing the entire directory tree which can be extremely
|
||||
slow for large model collections. Only symlinks directly under the root
|
||||
are detected.
|
||||
"""
|
||||
try:
|
||||
# Note: We only use realpath for the initial root if it's not already resolved
|
||||
# to ensure we have a valid entry point.
|
||||
root_real = self._normalize_path(os.path.realpath(root))
|
||||
except OSError:
|
||||
root_real = self._normalize_path(root)
|
||||
with os.scandir(root) as it:
|
||||
for entry in it:
|
||||
try:
|
||||
# Only detect symlinks including Windows junctions
|
||||
# Skip normal directories to avoid deep traversal
|
||||
if not self._entry_is_symlink(entry):
|
||||
continue
|
||||
|
||||
if root_real in visited_dirs:
|
||||
return
|
||||
# Resolve the symlink target
|
||||
target_path = os.path.realpath(entry.path)
|
||||
if not os.path.isdir(target_path):
|
||||
continue
|
||||
|
||||
visited_dirs.add(root_real)
|
||||
# Stack entries: (display_path, real_resolved_path)
|
||||
stack: List[Tuple[str, str]] = [(root, root_real)]
|
||||
|
||||
while stack:
|
||||
current_display, current_real = stack.pop()
|
||||
try:
|
||||
with os.scandir(current_display) as it:
|
||||
for entry in it:
|
||||
try:
|
||||
# 1. High speed detection using dirent data (is_symlink)
|
||||
is_link = entry.is_symlink()
|
||||
|
||||
# On Windows, is_symlink handles reparse points
|
||||
if is_link:
|
||||
# Only resolve realpath when we actually find a link
|
||||
target_path = os.path.realpath(entry.path)
|
||||
if not os.path.isdir(target_path):
|
||||
continue
|
||||
|
||||
normalized_target = self._normalize_path(target_path)
|
||||
self.add_path_mapping(entry.path, target_path)
|
||||
|
||||
if normalized_target in visited_dirs:
|
||||
continue
|
||||
|
||||
visited_dirs.add(normalized_target)
|
||||
stack.append((target_path, normalized_target))
|
||||
continue
|
||||
|
||||
# 2. Process normal directories
|
||||
if not entry.is_dir(follow_symlinks=False):
|
||||
continue
|
||||
|
||||
# For normal directories, we avoid realpath() call by
|
||||
# incrementally building the real path relative to current_real.
|
||||
# This is safe because 'entry' is NOT a symlink.
|
||||
entry_real = self._normalize_path(os.path.join(current_real, entry.name))
|
||||
|
||||
if entry_real in visited_dirs:
|
||||
continue
|
||||
|
||||
visited_dirs.add(entry_real)
|
||||
stack.append((entry.path, entry_real))
|
||||
except Exception as inner_exc:
|
||||
logger.debug(
|
||||
"Error processing directory entry %s: %s", entry.path, inner_exc
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning links in {current_display}: {e}")
|
||||
self.add_path_mapping(entry.path, target_path)
|
||||
except Exception as inner_exc:
|
||||
logger.debug(
|
||||
"Error processing directory entry %s: %s", entry.path, inner_exc
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning links in {root}: {e}")
|
||||
|
||||
|
||||
|
||||
@@ -559,6 +645,23 @@ class Config:
|
||||
checkpoint_map = self._dedupe_existing_paths(checkpoint_paths)
|
||||
unet_map = self._dedupe_existing_paths(unet_paths)
|
||||
|
||||
# Detect when checkpoints and unet share the same physical location
|
||||
# This is a configuration issue that can cause duplicate model entries
|
||||
overlapping_real_paths = set(checkpoint_map.keys()) & set(unet_map.keys())
|
||||
if overlapping_real_paths:
|
||||
logger.warning(
|
||||
"Detected overlapping paths between 'checkpoints' and 'diffusion_models' (unet). "
|
||||
"They should not point to the same physical folder as they are different model types. "
|
||||
"Please fix your ComfyUI path configuration to separate these folders. "
|
||||
"Falling back to 'checkpoints' for backward compatibility. "
|
||||
"Overlapping real paths: %s",
|
||||
[checkpoint_map.get(rp, rp) for rp in overlapping_real_paths]
|
||||
)
|
||||
# Remove overlapping paths from unet_map to prioritize checkpoints
|
||||
for rp in overlapping_real_paths:
|
||||
if rp in unet_map:
|
||||
del unet_map[rp]
|
||||
|
||||
merged_map: Dict[str, str] = {}
|
||||
for real_path, original in {**checkpoint_map, **unet_map}.items():
|
||||
if real_path not in merged_map:
|
||||
@@ -663,7 +766,23 @@ class Config:
|
||||
return f'/api/lm/previews?path={encoded_path}'
|
||||
|
||||
def is_preview_path_allowed(self, preview_path: str) -> bool:
|
||||
"""Return ``True`` if ``preview_path`` is within an allowed directory."""
|
||||
"""Return ``True`` if ``preview_path`` is within an allowed directory.
|
||||
|
||||
If the path is initially rejected, attempts to discover deep symlinks
|
||||
that were not scanned during initialization. If a symlink is found,
|
||||
updates the in-memory path mappings and retries the check.
|
||||
"""
|
||||
|
||||
if self._is_path_in_allowed_roots(preview_path):
|
||||
return True
|
||||
|
||||
if self._try_discover_deep_symlink(preview_path):
|
||||
return self._is_path_in_allowed_roots(preview_path)
|
||||
|
||||
return False
|
||||
|
||||
def _is_path_in_allowed_roots(self, preview_path: str) -> bool:
|
||||
"""Check if preview_path is within allowed preview roots without modification."""
|
||||
|
||||
if not preview_path:
|
||||
return False
|
||||
@@ -673,29 +792,72 @@ class Config:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Use os.path.normcase for case-insensitive comparison on Windows.
|
||||
# On Windows, Path.relative_to() is case-sensitive for drive letters,
|
||||
# causing paths like 'a:/folder' to not match 'A:/folder'.
|
||||
candidate_str = os.path.normcase(str(candidate))
|
||||
for root in self._preview_root_paths:
|
||||
root_str = os.path.normcase(str(root))
|
||||
# Check if candidate is equal to or under the root directory
|
||||
if candidate_str == root_str or candidate_str.startswith(root_str + os.sep):
|
||||
return True
|
||||
|
||||
if self._preview_root_paths:
|
||||
logger.debug(
|
||||
"Preview path rejected: %s (candidate=%s, num_roots=%d, first_root=%s)",
|
||||
preview_path,
|
||||
candidate_str,
|
||||
len(self._preview_root_paths),
|
||||
os.path.normcase(str(next(iter(self._preview_root_paths)))),
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
"Preview path rejected (no roots configured): %s",
|
||||
preview_path,
|
||||
)
|
||||
logger.debug(
|
||||
"Path not in allowed roots: %s (candidate=%s, num_roots=%d)",
|
||||
preview_path,
|
||||
candidate_str,
|
||||
len(self._preview_root_paths),
|
||||
)
|
||||
|
||||
return False
|
||||
|
||||
def _try_discover_deep_symlink(self, preview_path: str) -> bool:
|
||||
"""Attempt to discover a deep symlink that contains the preview_path.
|
||||
|
||||
Walks up from the preview path to the root directories, checking each
|
||||
parent directory for symlinks. If a symlink is found, updates the
|
||||
in-memory path mappings and preview roots.
|
||||
|
||||
Only updates in-memory state (self._path_mappings and self._preview_root_paths),
|
||||
does not modify the persistent cache file.
|
||||
|
||||
Returns:
|
||||
True if a symlink was discovered and mappings updated, False otherwise.
|
||||
"""
|
||||
if not preview_path:
|
||||
return False
|
||||
|
||||
try:
|
||||
candidate = Path(preview_path).expanduser()
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
current = candidate
|
||||
while True:
|
||||
try:
|
||||
if self._is_link(str(current)):
|
||||
try:
|
||||
target = os.path.realpath(str(current))
|
||||
normalized_target = self._normalize_path(target)
|
||||
normalized_link = self._normalize_path(str(current))
|
||||
|
||||
self._path_mappings[normalized_target] = normalized_link
|
||||
self._preview_root_paths.update(self._expand_preview_root(normalized_target))
|
||||
self._preview_root_paths.update(self._expand_preview_root(normalized_link))
|
||||
|
||||
logger.debug(
|
||||
"Discovered deep symlink: %s -> %s (preview path: %s)",
|
||||
normalized_link,
|
||||
normalized_target,
|
||||
preview_path
|
||||
)
|
||||
|
||||
return True
|
||||
except OSError:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
parent = current.parent
|
||||
if parent == current:
|
||||
break
|
||||
current = parent
|
||||
|
||||
return False
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import os
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1" or os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
|
||||
@@ -14,7 +17,7 @@ if not standalone_mode:
|
||||
# Initialize registry
|
||||
registry = MetadataRegistry()
|
||||
|
||||
print("ComfyUI Metadata Collector initialized")
|
||||
logger.info("ComfyUI Metadata Collector initialized")
|
||||
|
||||
def get_metadata(prompt_id=None):
|
||||
"""Helper function to get metadata from the registry"""
|
||||
@@ -23,7 +26,7 @@ if not standalone_mode:
|
||||
else:
|
||||
# Standalone mode - provide dummy implementations
|
||||
def init():
|
||||
print("ComfyUI Metadata Collector disabled in standalone mode")
|
||||
logger.info("ComfyUI Metadata Collector disabled in standalone mode")
|
||||
|
||||
def get_metadata(prompt_id=None):
|
||||
"""Dummy implementation for standalone mode"""
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import sys
|
||||
import inspect
|
||||
import logging
|
||||
from .metadata_registry import MetadataRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MetadataHook:
|
||||
"""Install hooks for metadata collection"""
|
||||
|
||||
@@ -23,7 +26,7 @@ class MetadataHook:
|
||||
|
||||
# If we can't find the execution module, we can't install hooks
|
||||
if execution is None:
|
||||
print("Could not locate ComfyUI execution module, metadata collection disabled")
|
||||
logger.warning("Could not locate ComfyUI execution module, metadata collection disabled")
|
||||
return
|
||||
|
||||
# Detect whether we're using the new async version of ComfyUI
|
||||
@@ -37,16 +40,16 @@ class MetadataHook:
|
||||
is_async = inspect.iscoroutinefunction(execution._map_node_over_list)
|
||||
|
||||
if is_async:
|
||||
print("Detected async ComfyUI execution, installing async metadata hooks")
|
||||
logger.info("Detected async ComfyUI execution, installing async metadata hooks")
|
||||
MetadataHook._install_async_hooks(execution, map_node_func_name)
|
||||
else:
|
||||
print("Detected sync ComfyUI execution, installing sync metadata hooks")
|
||||
logger.info("Detected sync ComfyUI execution, installing sync metadata hooks")
|
||||
MetadataHook._install_sync_hooks(execution)
|
||||
|
||||
print("Metadata collection hooks installed for runtime values")
|
||||
logger.info("Metadata collection hooks installed for runtime values")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error installing metadata hooks: {str(e)}")
|
||||
logger.error(f"Error installing metadata hooks: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def _install_sync_hooks(execution):
|
||||
@@ -82,7 +85,7 @@ class MetadataHook:
|
||||
if node_id is not None:
|
||||
registry.record_node_execution(node_id, class_type, input_data_all, None)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
logger.error(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
|
||||
# Execute the original function
|
||||
results = original_map_node_over_list(obj, input_data_all, func, allow_interrupt, execution_block_cb, pre_execute_cb)
|
||||
@@ -113,7 +116,7 @@ class MetadataHook:
|
||||
if node_id is not None:
|
||||
registry.update_node_execution(node_id, class_type, results)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
logger.error(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
|
||||
return results
|
||||
|
||||
@@ -159,7 +162,7 @@ class MetadataHook:
|
||||
if node_id is not None:
|
||||
registry.record_node_execution(node_id, class_type, input_data_all, None)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
logger.error(f"Error collecting metadata (pre-execution): {str(e)}")
|
||||
|
||||
# Call original function with all args/kwargs
|
||||
results = await original_map_node_over_list(
|
||||
@@ -176,7 +179,7 @@ class MetadataHook:
|
||||
if node_id is not None:
|
||||
registry.update_node_execution(node_id, class_type, results)
|
||||
except Exception as e:
|
||||
print(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
logger.error(f"Error collecting metadata (post-execution): {str(e)}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@@ -714,10 +714,10 @@ NODE_EXTRACTORS = {
|
||||
"UNETLoader": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"UnetLoaderGGUF": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"LoraLoader": LoraLoaderExtractor,
|
||||
"LoraManagerLoader": LoraLoaderManagerExtractor,
|
||||
"LoraLoaderLM": LoraLoaderManagerExtractor,
|
||||
# Conditioning
|
||||
"CLIPTextEncode": CLIPTextEncodeExtractor,
|
||||
"PromptLoraManager": CLIPTextEncodeExtractor,
|
||||
"PromptLM": CLIPTextEncodeExtractor,
|
||||
"CLIPTextEncodeFlux": CLIPTextEncodeFluxExtractor, # Add CLIPTextEncodeFlux
|
||||
"WAS_Text_to_Conditioning": CLIPTextEncodeExtractor,
|
||||
"AdvancedCLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb
|
||||
|
||||
@@ -4,7 +4,7 @@ from ..metadata_collector.metadata_processor import MetadataProcessor
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DebugMetadata:
|
||||
class DebugMetadataLM:
|
||||
NAME = "Debug Metadata (LoraManager)"
|
||||
CATEGORY = "Lora Manager/utils"
|
||||
DESCRIPTION = "Debug node to verify metadata_processor functionality"
|
||||
|
||||
134
py/nodes/lora_cycler.py
Normal file
134
py/nodes/lora_cycler.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""
|
||||
Lora Cycler Node - Sequentially cycles through LoRAs from a pool.
|
||||
|
||||
This node accepts optional pool_config input to filter available LoRAs, and outputs
|
||||
a LORA_STACK with one LoRA at a time. Returns UI updates with current/next LoRA info
|
||||
and tracks the cycle progress which persists across workflow save/load.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from ..utils.utils import get_lora_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoraCyclerLM:
|
||||
"""Node that sequentially cycles through LoRAs from a pool"""
|
||||
|
||||
NAME = "Lora Cycler (LoraManager)"
|
||||
CATEGORY = "Lora Manager/randomizer"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"cycler_config": ("CYCLER_CONFIG", {}),
|
||||
},
|
||||
"optional": {
|
||||
"pool_config": ("POOL_CONFIG", {}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("LORA_STACK",)
|
||||
RETURN_NAMES = ("LORA_STACK",)
|
||||
|
||||
FUNCTION = "cycle"
|
||||
OUTPUT_NODE = False
|
||||
|
||||
async def cycle(self, cycler_config, pool_config=None):
|
||||
"""
|
||||
Cycle through LoRAs based on configuration and pool filters.
|
||||
|
||||
Args:
|
||||
cycler_config: Dict with cycler settings (current_index, model_strength, clip_strength, sort_by)
|
||||
pool_config: Optional config from LoRA Pool node for filtering
|
||||
|
||||
Returns:
|
||||
Dictionary with 'result' (LORA_STACK tuple) and 'ui' (for widget display)
|
||||
"""
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.lora_service import LoraService
|
||||
|
||||
# Extract settings from cycler_config
|
||||
current_index = cycler_config.get("current_index", 1) # 1-based
|
||||
model_strength = float(cycler_config.get("model_strength", 1.0))
|
||||
clip_strength = float(cycler_config.get("clip_strength", 1.0))
|
||||
sort_by = "filename"
|
||||
|
||||
# Dual-index mechanism for batch queue synchronization
|
||||
execution_index = cycler_config.get("execution_index") # Can be None
|
||||
# next_index_from_config = cycler_config.get("next_index") # Not used on backend
|
||||
|
||||
# Get scanner and service
|
||||
scanner = await ServiceRegistry.get_lora_scanner()
|
||||
lora_service = LoraService(scanner)
|
||||
|
||||
# Get filtered and sorted LoRA list
|
||||
lora_list = await lora_service.get_cycler_list(
|
||||
pool_config=pool_config, sort_by=sort_by
|
||||
)
|
||||
|
||||
total_count = len(lora_list)
|
||||
|
||||
if total_count == 0:
|
||||
logger.warning("[LoraCyclerLM] No LoRAs available in pool")
|
||||
return {
|
||||
"result": ([],),
|
||||
"ui": {
|
||||
"current_index": [1],
|
||||
"next_index": [1],
|
||||
"total_count": [0],
|
||||
"current_lora_name": [""],
|
||||
"current_lora_filename": [""],
|
||||
"error": ["No LoRAs available in pool"],
|
||||
},
|
||||
}
|
||||
|
||||
# Determine which index to use for this execution
|
||||
# If execution_index is provided (batch queue case), use it
|
||||
# Otherwise use current_index (first execution or non-batch case)
|
||||
if execution_index is not None:
|
||||
actual_index = execution_index
|
||||
else:
|
||||
actual_index = current_index
|
||||
|
||||
# Clamp index to valid range (1-based)
|
||||
clamped_index = max(1, min(actual_index, total_count))
|
||||
|
||||
# Get LoRA at current index (convert to 0-based for list access)
|
||||
current_lora = lora_list[clamped_index - 1]
|
||||
|
||||
# Build LORA_STACK with single LoRA
|
||||
lora_path, _ = get_lora_info(current_lora["file_name"])
|
||||
if not lora_path:
|
||||
logger.warning(
|
||||
f"[LoraCyclerLM] Could not find path for LoRA: {current_lora['file_name']}"
|
||||
)
|
||||
lora_stack = []
|
||||
else:
|
||||
# Normalize path separators
|
||||
lora_path = lora_path.replace("/", os.sep)
|
||||
lora_stack = [(lora_path, model_strength, clip_strength)]
|
||||
|
||||
# Calculate next index (wrap to 1 if at end)
|
||||
next_index = clamped_index + 1
|
||||
if next_index > total_count:
|
||||
next_index = 1
|
||||
|
||||
# Get next LoRA for UI display (what will be used next generation)
|
||||
next_lora = lora_list[next_index - 1]
|
||||
next_display_name = next_lora["file_name"]
|
||||
|
||||
return {
|
||||
"result": (lora_stack,),
|
||||
"ui": {
|
||||
"current_index": [clamped_index],
|
||||
"next_index": [next_index],
|
||||
"total_count": [total_count],
|
||||
"current_lora_name": [current_lora["file_name"]],
|
||||
"current_lora_filename": [current_lora["file_name"]],
|
||||
"next_lora_name": [next_display_name],
|
||||
"next_lora_filename": [next_lora["file_name"]],
|
||||
},
|
||||
}
|
||||
@@ -6,7 +6,7 @@ from .utils import FlexibleOptionalInputType, any_type, extract_lora_name, get_l
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraManagerLoader:
|
||||
class LoraLoaderLM:
|
||||
NAME = "Lora Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
|
||||
@@ -16,12 +16,9 @@ class LoraManagerLoader:
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
# "clip": ("CLIP",),
|
||||
"text": ("STRING", {
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"text": ("AUTOCOMPLETE_TEXT_LORAS", {
|
||||
"placeholder": "Search LoRAs to add...",
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
}),
|
||||
},
|
||||
"optional": FlexibleOptionalInputType(any_type),
|
||||
@@ -131,7 +128,7 @@ class LoraManagerLoader:
|
||||
|
||||
return (model, clip, trigger_words_text, formatted_loras_text)
|
||||
|
||||
class LoraManagerTextLoader:
|
||||
class LoraTextLoaderLM:
|
||||
NAME = "LoRA Text Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoraPoolNode:
|
||||
class LoraPoolLM:
|
||||
"""
|
||||
A node that defines LoRA filter criteria through a Vue-based widget.
|
||||
|
||||
@@ -67,7 +67,7 @@ class LoraPoolNode:
|
||||
filters = pool_config.get("filters", self._default_config()["filters"])
|
||||
|
||||
# Log for debugging
|
||||
logger.debug(f"[LoraPoolNode] Processing filters: {filters}")
|
||||
logger.debug(f"[LoraPoolLM] Processing filters: {filters}")
|
||||
|
||||
return (filters,)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ from .utils import extract_lora_name
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoraRandomizerNode:
|
||||
class LoraRandomizerLM:
|
||||
"""Node that randomly selects LoRAs from a pool"""
|
||||
|
||||
NAME = "Lora Randomizer (LoraManager)"
|
||||
@@ -72,7 +72,7 @@ class LoraRandomizerNode:
|
||||
loras = self._preprocess_loras_input(loras)
|
||||
|
||||
roll_mode = randomizer_config.get("roll_mode", "always")
|
||||
logger.debug(f"[LoraRandomizerNode] roll_mode: {roll_mode}")
|
||||
logger.debug(f"[LoraRandomizerLM] roll_mode: {roll_mode}")
|
||||
|
||||
# Dual seed mechanism for batch queue synchronization
|
||||
# execution_seed: seed for generating execution_stack (= previous next_seed)
|
||||
@@ -127,7 +127,7 @@ class LoraRandomizerNode:
|
||||
lora_path, trigger_words = get_lora_info(lora["name"])
|
||||
if not lora_path:
|
||||
logger.warning(
|
||||
f"[LoraRandomizerNode] Could not find path for LoRA: {lora['name']}"
|
||||
f"[LoraRandomizerLM] Could not find path for LoRA: {lora['name']}"
|
||||
)
|
||||
continue
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class LoraStacker:
|
||||
class LoraStackerLM:
|
||||
NAME = "Lora Stacker (LoraManager)"
|
||||
CATEGORY = "Lora Manager/stackers"
|
||||
|
||||
@@ -14,12 +14,9 @@ class LoraStacker:
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"text": ("STRING", {
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"text": ("AUTOCOMPLETE_TEXT_LORAS", {
|
||||
"placeholder": "Search LoRAs to add...",
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
}),
|
||||
},
|
||||
"optional": FlexibleOptionalInputType(any_type),
|
||||
|
||||
@@ -1,59 +1,84 @@
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
import inspect
|
||||
|
||||
class PromptLoraManager:
|
||||
|
||||
class _AllContainer:
|
||||
"""Container that accepts any key for dynamic input validation."""
|
||||
|
||||
def __contains__(self, item):
|
||||
return True
|
||||
|
||||
def __getitem__(self, key):
|
||||
return ("STRING", {"forceInput": True})
|
||||
|
||||
|
||||
class PromptLM:
|
||||
"""Encodes text (and optional trigger words) into CLIP conditioning."""
|
||||
|
||||
NAME = "Prompt (LoraManager)"
|
||||
CATEGORY = "Lora Manager/conditioning"
|
||||
DESCRIPTION = (
|
||||
"Encodes a text prompt using a CLIP model into an embedding that can be used "
|
||||
"to guide the diffusion model towards generating specific images."
|
||||
"to guide the diffusion model towards generating specific images. "
|
||||
"Supports dynamic trigger words inputs."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
dyn_inputs = {
|
||||
"trigger_words1": (
|
||||
"STRING",
|
||||
{
|
||||
"forceInput": True,
|
||||
"tooltip": "Trigger words to prepend. Connect to add more inputs.",
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
# Bypass validation for dynamic inputs during graph execution
|
||||
stack = inspect.stack()
|
||||
if len(stack) > 2 and stack[2].function == "get_input_info":
|
||||
dyn_inputs = _AllContainer()
|
||||
|
||||
return {
|
||||
"required": {
|
||||
"text": (
|
||||
'STRING',
|
||||
"AUTOCOMPLETE_TEXT_PROMPT,STRING",
|
||||
{
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"widgetType": "AUTOCOMPLETE_TEXT_PROMPT",
|
||||
"placeholder": "Enter prompt... /char, /artist for quick tag search",
|
||||
"tooltip": "The text to be encoded.",
|
||||
},
|
||||
),
|
||||
"clip": (
|
||||
'CLIP',
|
||||
"CLIP",
|
||||
{"tooltip": "The CLIP model used for encoding the text."},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"trigger_words": (
|
||||
'STRING',
|
||||
{
|
||||
"forceInput": True,
|
||||
"tooltip": (
|
||||
"Optional trigger words to prepend to the text before "
|
||||
"encoding."
|
||||
)
|
||||
},
|
||||
)
|
||||
},
|
||||
"optional": dyn_inputs,
|
||||
}
|
||||
|
||||
RETURN_TYPES = ('CONDITIONING', 'STRING',)
|
||||
RETURN_NAMES = ('CONDITIONING', 'PROMPT',)
|
||||
RETURN_TYPES = ("CONDITIONING", "STRING")
|
||||
RETURN_NAMES = ("CONDITIONING", "PROMPT")
|
||||
OUTPUT_TOOLTIPS = (
|
||||
"A conditioning containing the embedded text used to guide the diffusion model.",
|
||||
)
|
||||
FUNCTION = "encode"
|
||||
|
||||
def encode(self, text: str, clip: Any, trigger_words: Optional[str] = None):
|
||||
prompt = text
|
||||
def encode(self, text: str, clip: Any, **kwargs):
|
||||
# Collect all trigger words from dynamic inputs
|
||||
trigger_words = []
|
||||
for key, value in kwargs.items():
|
||||
if key.startswith("trigger_words") and value:
|
||||
trigger_words.append(value)
|
||||
|
||||
# Build final prompt
|
||||
if trigger_words:
|
||||
prompt = ", ".join([trigger_words, text])
|
||||
prompt = ", ".join(trigger_words + [text])
|
||||
else:
|
||||
prompt = text
|
||||
|
||||
from nodes import CLIPTextEncode # type: ignore
|
||||
|
||||
conditioning = CLIPTextEncode().encode(clip, prompt)[0]
|
||||
return (conditioning, prompt,)
|
||||
return (conditioning, prompt)
|
||||
@@ -8,6 +8,9 @@ from ..metadata_collector.metadata_processor import MetadataProcessor
|
||||
from ..metadata_collector import get_metadata
|
||||
from PIL import Image, PngImagePlugin
|
||||
import piexif
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SaveImageLM:
|
||||
NAME = "Save Image (LoraManager)"
|
||||
@@ -385,7 +388,7 @@ class SaveImageLM:
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
save_kwargs["exif"] = exif_bytes
|
||||
except Exception as e:
|
||||
print(f"Error adding EXIF data: {e}")
|
||||
logger.error(f"Error adding EXIF data: {e}")
|
||||
img.save(file_path, format="JPEG", **save_kwargs)
|
||||
elif file_format == "webp":
|
||||
try:
|
||||
@@ -403,7 +406,7 @@ class SaveImageLM:
|
||||
exif_bytes = piexif.dump(exif_dict)
|
||||
save_kwargs["exif"] = exif_bytes
|
||||
except Exception as e:
|
||||
print(f"Error adding EXIF data: {e}")
|
||||
logger.error(f"Error adding EXIF data: {e}")
|
||||
|
||||
img.save(file_path, format="WEBP", **save_kwargs)
|
||||
|
||||
@@ -414,7 +417,7 @@ class SaveImageLM:
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error saving image: {e}")
|
||||
logger.error(f"Error saving image: {e}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
33
py/nodes/text.py
Normal file
33
py/nodes/text.py
Normal file
@@ -0,0 +1,33 @@
|
||||
class TextLM:
|
||||
"""A simple text node with autocomplete support."""
|
||||
|
||||
NAME = "Text (LoraManager)"
|
||||
CATEGORY = "Lora Manager/utils"
|
||||
DESCRIPTION = (
|
||||
"A simple text input node with autocomplete support for tags and styles."
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"text": (
|
||||
"AUTOCOMPLETE_TEXT_PROMPT,STRING",
|
||||
{
|
||||
"widgetType": "AUTOCOMPLETE_TEXT_PROMPT",
|
||||
"placeholder": "Enter text... /char, /artist for quick tag search",
|
||||
"tooltip": "The text output.",
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("STRING",)
|
||||
OUTPUT_TOOLTIPS = (
|
||||
"The text output.",
|
||||
)
|
||||
FUNCTION = "process"
|
||||
|
||||
def process(self, text: str):
|
||||
return (text,)
|
||||
@@ -6,27 +6,36 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TriggerWordToggle:
|
||||
class TriggerWordToggleLM:
|
||||
NAME = "TriggerWord Toggle (LoraManager)"
|
||||
CATEGORY = "Lora Manager/utils"
|
||||
DESCRIPTION = "Toggle trigger words on/off"
|
||||
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"group_mode": ("BOOLEAN", {
|
||||
"default": True,
|
||||
"tooltip": "When enabled, treats each group of trigger words as a single toggleable unit."
|
||||
}),
|
||||
"default_active": ("BOOLEAN", {
|
||||
"default": True,
|
||||
"tooltip": "Sets the default initial state (active or inactive) when trigger words are added."
|
||||
}),
|
||||
"allow_strength_adjustment": ("BOOLEAN", {
|
||||
"default": False,
|
||||
"tooltip": "Enable mouse wheel adjustment of each trigger word's strength."
|
||||
}),
|
||||
"group_mode": (
|
||||
"BOOLEAN",
|
||||
{
|
||||
"default": True,
|
||||
"tooltip": "When enabled, treats each group of trigger words as a single toggleable unit.",
|
||||
},
|
||||
),
|
||||
"default_active": (
|
||||
"BOOLEAN",
|
||||
{
|
||||
"default": True,
|
||||
"tooltip": "Sets the default initial state (active or inactive) when trigger words are added.",
|
||||
},
|
||||
),
|
||||
"allow_strength_adjustment": (
|
||||
"BOOLEAN",
|
||||
{
|
||||
"default": False,
|
||||
"tooltip": "Enable mouse wheel adjustment of each trigger word's strength.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": FlexibleOptionalInputType(any_type),
|
||||
"hidden": {
|
||||
@@ -38,19 +47,35 @@ class TriggerWordToggle:
|
||||
RETURN_NAMES = ("filtered_trigger_words",)
|
||||
FUNCTION = "process_trigger_words"
|
||||
|
||||
def _get_toggle_data(self, kwargs, key='toggle_trigger_words'):
|
||||
def _get_toggle_data(self, kwargs, key="toggle_trigger_words"):
|
||||
"""Helper to extract data from either old or new kwargs format"""
|
||||
if key not in kwargs:
|
||||
return None
|
||||
|
||||
|
||||
data = kwargs[key]
|
||||
# Handle new format: {'key': {'__value__': ...}}
|
||||
if isinstance(data, dict) and '__value__' in data:
|
||||
return data['__value__']
|
||||
if isinstance(data, dict) and "__value__" in data:
|
||||
return data["__value__"]
|
||||
# Handle old format: {'key': ...}
|
||||
else:
|
||||
return data
|
||||
|
||||
def _normalize_trigger_words(self, trigger_words):
|
||||
"""Normalize trigger words by splitting by both single and double commas, stripping whitespace, and filtering empty strings"""
|
||||
if not trigger_words or not isinstance(trigger_words, str):
|
||||
return set()
|
||||
|
||||
# Split by double commas first to preserve groups, then by single commas
|
||||
groups = re.split(r",{2,}", trigger_words)
|
||||
words = []
|
||||
for group in groups:
|
||||
# Split each group by single comma
|
||||
group_words = [word.strip() for word in group.split(",")]
|
||||
words.extend(group_words)
|
||||
|
||||
# Filter out empty strings and return as set
|
||||
return set(word for word in words if word)
|
||||
|
||||
def process_trigger_words(
|
||||
self,
|
||||
id,
|
||||
@@ -60,13 +85,25 @@ class TriggerWordToggle:
|
||||
**kwargs,
|
||||
):
|
||||
# Handle both old and new formats for trigger_words
|
||||
trigger_words_data = self._get_toggle_data(kwargs, 'orinalMessage')
|
||||
trigger_words = trigger_words_data if isinstance(trigger_words_data, str) else ""
|
||||
|
||||
trigger_words_data = self._get_toggle_data(kwargs, "orinalMessage")
|
||||
trigger_words = (
|
||||
trigger_words_data if isinstance(trigger_words_data, str) else ""
|
||||
)
|
||||
|
||||
filtered_triggers = trigger_words
|
||||
|
||||
|
||||
# Check if trigger_words is provided and different from orinalMessage
|
||||
trigger_words_override = self._get_toggle_data(kwargs, "trigger_words")
|
||||
if (
|
||||
trigger_words_override
|
||||
and isinstance(trigger_words_override, str)
|
||||
and self._normalize_trigger_words(trigger_words_override) != self._normalize_trigger_words(trigger_words)
|
||||
):
|
||||
filtered_triggers = trigger_words_override
|
||||
return (filtered_triggers,)
|
||||
|
||||
# Get toggle data with support for both formats
|
||||
trigger_data = self._get_toggle_data(kwargs, 'toggle_trigger_words')
|
||||
trigger_data = self._get_toggle_data(kwargs, "toggle_trigger_words")
|
||||
if trigger_data:
|
||||
try:
|
||||
# Convert to list if it's a JSON string
|
||||
@@ -77,7 +114,9 @@ class TriggerWordToggle:
|
||||
if group_mode:
|
||||
if allow_strength_adjustment:
|
||||
parsed_items = [
|
||||
self._parse_trigger_item(item, allow_strength_adjustment)
|
||||
self._parse_trigger_item(
|
||||
item, allow_strength_adjustment
|
||||
)
|
||||
for item in trigger_data
|
||||
]
|
||||
filtered_groups = [
|
||||
@@ -91,11 +130,14 @@ class TriggerWordToggle:
|
||||
]
|
||||
else:
|
||||
filtered_groups = [
|
||||
(item.get('text') or "").strip()
|
||||
(item.get("text") or "").strip()
|
||||
for item in trigger_data
|
||||
if (item.get('text') or "").strip() and item.get('active', False)
|
||||
if (item.get("text") or "").strip()
|
||||
and item.get("active", False)
|
||||
]
|
||||
filtered_triggers = ', '.join(filtered_groups) if filtered_groups else ""
|
||||
filtered_triggers = (
|
||||
", ".join(filtered_groups) if filtered_groups else ""
|
||||
)
|
||||
else:
|
||||
parsed_items = [
|
||||
self._parse_trigger_item(item, allow_strength_adjustment)
|
||||
@@ -110,28 +152,34 @@ class TriggerWordToggle:
|
||||
for item in parsed_items
|
||||
if item["text"] and item["active"]
|
||||
]
|
||||
filtered_triggers = ', '.join(filtered_words) if filtered_words else ""
|
||||
filtered_triggers = (
|
||||
", ".join(filtered_words) if filtered_words else ""
|
||||
)
|
||||
else:
|
||||
# Fallback to original message parsing if data is not in the expected list format
|
||||
if group_mode:
|
||||
groups = re.split(r',{2,}', trigger_words)
|
||||
groups = re.split(r",{2,}", trigger_words)
|
||||
groups = [group.strip() for group in groups if group.strip()]
|
||||
filtered_triggers = ', '.join(groups)
|
||||
filtered_triggers = ", ".join(groups)
|
||||
else:
|
||||
words = [word.strip() for word in trigger_words.split(',') if word.strip()]
|
||||
filtered_triggers = ', '.join(words)
|
||||
words = [
|
||||
word.strip()
|
||||
for word in trigger_words.split(",")
|
||||
if word.strip()
|
||||
]
|
||||
filtered_triggers = ", ".join(words)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing trigger words: {e}")
|
||||
|
||||
|
||||
return (filtered_triggers,)
|
||||
|
||||
def _parse_trigger_item(self, item, allow_strength_adjustment):
|
||||
text = (item.get('text') or "").strip()
|
||||
active = bool(item.get('active', False))
|
||||
strength = item.get('strength')
|
||||
text = (item.get("text") or "").strip()
|
||||
active = bool(item.get("active", False))
|
||||
strength = item.get("strength")
|
||||
|
||||
strength_match = re.match(r'^\((.+):([\d.]+)\)$', text)
|
||||
strength_match = re.match(r"^\((.+):([\d.]+)\)$", text)
|
||||
if strength_match:
|
||||
text = strength_match.group(1).strip()
|
||||
if strength is None:
|
||||
|
||||
@@ -15,12 +15,9 @@ class WanVideoLoraSelectLM:
|
||||
"required": {
|
||||
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
|
||||
"merge_loras": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
|
||||
"text": ("STRING", {
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"text": ("AUTOCOMPLETE_TEXT_LORAS", {
|
||||
"placeholder": "Search LoRAs to add...",
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
}),
|
||||
},
|
||||
"optional": FlexibleOptionalInputType(any_type),
|
||||
|
||||
@@ -7,7 +7,7 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 定义新节点的类
|
||||
class WanVideoLoraSelectFromText:
|
||||
class WanVideoLoraTextSelectLM:
|
||||
# 节点在UI中显示的名称
|
||||
NAME = "WanVideo Lora Select From Text (LoraManager)"
|
||||
# 节点所属的分类
|
||||
@@ -115,11 +115,3 @@ class WanVideoLoraSelectFromText:
|
||||
active_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (loras_list, trigger_words_text, active_loras_text)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"WanVideoLoraSelectFromText": WanVideoLoraSelectFromText
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"WanVideoLoraSelectFromText": "WanVideo Lora Select From Text (LoraManager)"
|
||||
}
|
||||
|
||||
@@ -204,6 +204,7 @@ class BaseModelRoutes(ABC):
|
||||
service=service,
|
||||
update_service=update_service,
|
||||
metadata_provider_selector=get_metadata_provider,
|
||||
settings_service=self._settings,
|
||||
logger=logger,
|
||||
)
|
||||
return ModelHandlerSet(
|
||||
|
||||
@@ -30,6 +30,7 @@ ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
RouteDefinition("POST", "/api/lm/force-download-example-images", "force_download_example_images"),
|
||||
RouteDefinition("POST", "/api/lm/cleanup-example-image-folders", "cleanup_example_image_folders"),
|
||||
RouteDefinition("POST", "/api/lm/example-images/set-nsfw-level", "set_example_image_nsfw_level"),
|
||||
RouteDefinition("POST", "/api/lm/check-example-images-needed", "check_example_images_needed"),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
"""Handler set for example image routes."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, Mapping
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from ...services.use_cases.example_images import (
|
||||
DownloadExampleImagesConfigurationError,
|
||||
DownloadExampleImagesInProgressError,
|
||||
@@ -92,6 +95,19 @@ class ExampleImagesDownloadHandler:
|
||||
except ExampleImagesDownloadError as exc:
|
||||
return web.json_response({'success': False, 'error': str(exc)}, status=500)
|
||||
|
||||
async def check_example_images_needed(self, request: web.Request) -> web.StreamResponse:
|
||||
"""Lightweight check to see if any models need example images downloaded."""
|
||||
try:
|
||||
payload = await request.json()
|
||||
model_types = payload.get('model_types', ['lora', 'checkpoint', 'embedding'])
|
||||
result = await self._download_manager.check_pending_models(model_types)
|
||||
return web.json_response(result)
|
||||
except Exception as exc:
|
||||
return web.json_response(
|
||||
{'success': False, 'error': str(exc)},
|
||||
status=500
|
||||
)
|
||||
|
||||
|
||||
class ExampleImagesManagementHandler:
|
||||
"""HTTP adapters for import/delete endpoints."""
|
||||
@@ -109,6 +125,9 @@ class ExampleImagesManagementHandler:
|
||||
return web.json_response({'success': False, 'error': str(exc)}, status=400)
|
||||
except ExampleImagesImportError as exc:
|
||||
return web.json_response({'success': False, 'error': str(exc)}, status=500)
|
||||
except Exception as exc:
|
||||
logger.exception("Unexpected error importing example images")
|
||||
return web.json_response({'success': False, 'error': str(exc)}, status=500)
|
||||
|
||||
async def delete_example_image(self, request: web.Request) -> web.StreamResponse:
|
||||
return await self._processor.delete_custom_image(request)
|
||||
@@ -161,6 +180,7 @@ class ExampleImagesHandlerSet:
|
||||
"resume_example_images": self.download.resume_example_images,
|
||||
"stop_example_images": self.download.stop_example_images,
|
||||
"force_download_example_images": self.download.force_download_example_images,
|
||||
"check_example_images_needed": self.download.check_example_images_needed,
|
||||
"import_example_images": self.management.import_example_images,
|
||||
"delete_example_image": self.management.delete_example_image,
|
||||
"set_example_image_nsfw_level": self.management.set_example_image_nsfw_level,
|
||||
|
||||
@@ -220,40 +220,17 @@ class HealthCheckHandler:
|
||||
class SettingsHandler:
|
||||
"""Sync settings between backend and frontend."""
|
||||
|
||||
_SYNC_KEYS = (
|
||||
"civitai_api_key",
|
||||
"default_lora_root",
|
||||
"default_checkpoint_root",
|
||||
"default_unet_root",
|
||||
"default_embedding_root",
|
||||
"base_model_path_mappings",
|
||||
"download_path_templates",
|
||||
"enable_metadata_archive_db",
|
||||
"language",
|
||||
"use_portable_settings",
|
||||
"proxy_enabled",
|
||||
"proxy_type",
|
||||
"proxy_host",
|
||||
"proxy_port",
|
||||
"proxy_username",
|
||||
"proxy_password",
|
||||
"example_images_path",
|
||||
"optimize_example_images",
|
||||
"auto_download_example_images",
|
||||
"blur_mature_content",
|
||||
"autoplay_on_hover",
|
||||
"display_density",
|
||||
"card_info_display",
|
||||
"show_folder_sidebar",
|
||||
"include_trigger_words",
|
||||
"show_only_sfw",
|
||||
"compact_mode",
|
||||
"priority_tags",
|
||||
"model_card_footer_action",
|
||||
"model_name_display",
|
||||
"update_flag_strategy",
|
||||
"auto_organize_exclusions",
|
||||
)
|
||||
# Settings keys that should NOT be synced to frontend.
|
||||
# All other settings are synced by default.
|
||||
_NO_SYNC_KEYS = frozenset({
|
||||
# Internal/performance settings (not used by frontend)
|
||||
"hash_chunk_size_mb",
|
||||
"download_stall_timeout_seconds",
|
||||
# Complex internal structures retrieved via separate endpoints
|
||||
"folder_paths",
|
||||
"libraries",
|
||||
"active_library",
|
||||
})
|
||||
|
||||
_PROXY_KEYS = {
|
||||
"proxy_enabled",
|
||||
@@ -300,10 +277,12 @@ class SettingsHandler:
|
||||
async def get_settings(self, request: web.Request) -> web.Response:
|
||||
try:
|
||||
response_data = {}
|
||||
for key in self._SYNC_KEYS:
|
||||
value = self._settings.get(key)
|
||||
if value is not None:
|
||||
response_data[key] = value
|
||||
# Sync all settings except those in _NO_SYNC_KEYS
|
||||
for key in self._settings.keys():
|
||||
if key not in self._NO_SYNC_KEYS:
|
||||
value = self._settings.get(key)
|
||||
if value is not None:
|
||||
response_data[key] = value
|
||||
settings_file = getattr(self._settings, "settings_file", None)
|
||||
if settings_file:
|
||||
response_data["settings_file"] = settings_file
|
||||
@@ -1201,6 +1180,80 @@ class FileSystemHandler:
|
||||
return web.json_response({"success": False, "error": str(exc)}, status=500)
|
||||
|
||||
|
||||
class CustomWordsHandler:
|
||||
"""Handler for autocomplete via TagFTSIndex."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
from ...services.custom_words_service import get_custom_words_service
|
||||
self._service = get_custom_words_service()
|
||||
|
||||
async def search_custom_words(self, request: web.Request) -> web.Response:
|
||||
"""Search custom words with autocomplete.
|
||||
|
||||
Query parameters:
|
||||
search: The search term to match against.
|
||||
limit: Maximum number of results to return (default: 20).
|
||||
category: Optional category filter. Can be:
|
||||
- A category name (e.g., "character", "artist", "general")
|
||||
- Comma-separated category IDs (e.g., "4,11" for character)
|
||||
enriched: If "true", return enriched results with category and post_count
|
||||
even without category filtering.
|
||||
"""
|
||||
try:
|
||||
search_term = request.query.get("search", "")
|
||||
limit = int(request.query.get("limit", "20"))
|
||||
category_param = request.query.get("category", "")
|
||||
enriched_param = request.query.get("enriched", "").lower() == "true"
|
||||
|
||||
# Parse category parameter
|
||||
categories = None
|
||||
if category_param:
|
||||
categories = self._parse_category_param(category_param)
|
||||
|
||||
results = self._service.search_words(
|
||||
search_term, limit, categories=categories, enriched=enriched_param
|
||||
)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"words": results
|
||||
})
|
||||
except Exception as exc:
|
||||
logger.error("Error searching custom words: %s", exc, exc_info=True)
|
||||
return web.json_response({"error": str(exc)}, status=500)
|
||||
|
||||
def _parse_category_param(self, param: str) -> list[int] | None:
|
||||
"""Parse category parameter into list of category IDs.
|
||||
|
||||
Args:
|
||||
param: Category parameter value (name or comma-separated IDs).
|
||||
|
||||
Returns:
|
||||
List of category IDs, or None if parsing fails.
|
||||
"""
|
||||
from ...services.tag_fts_index import CATEGORY_NAME_TO_IDS
|
||||
|
||||
param = param.strip().lower()
|
||||
if not param:
|
||||
return None
|
||||
|
||||
# Try to parse as category name first
|
||||
if param in CATEGORY_NAME_TO_IDS:
|
||||
return CATEGORY_NAME_TO_IDS[param]
|
||||
|
||||
# Try to parse as comma-separated integers
|
||||
try:
|
||||
category_ids = []
|
||||
for part in param.split(","):
|
||||
part = part.strip()
|
||||
if part:
|
||||
category_ids.append(int(part))
|
||||
return category_ids if category_ids else None
|
||||
except ValueError:
|
||||
logger.debug("Invalid category parameter: %s", param)
|
||||
return None
|
||||
|
||||
|
||||
class NodeRegistryHandler:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -1427,6 +1480,7 @@ class MiscHandlerSet:
|
||||
model_library: ModelLibraryHandler,
|
||||
metadata_archive: MetadataArchiveHandler,
|
||||
filesystem: FileSystemHandler,
|
||||
custom_words: CustomWordsHandler,
|
||||
) -> None:
|
||||
self.health = health
|
||||
self.settings = settings
|
||||
@@ -1438,6 +1492,7 @@ class MiscHandlerSet:
|
||||
self.model_library = model_library
|
||||
self.metadata_archive = metadata_archive
|
||||
self.filesystem = filesystem
|
||||
self.custom_words = custom_words
|
||||
|
||||
def to_route_mapping(
|
||||
self,
|
||||
@@ -1465,6 +1520,7 @@ class MiscHandlerSet:
|
||||
"get_model_versions_status": self.model_library.get_model_versions_status,
|
||||
"open_file_location": self.filesystem.open_file_location,
|
||||
"open_settings_location": self.filesystem.open_settings_location,
|
||||
"search_custom_words": self.custom_words.search_custom_words,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Awaitable, Callable, Dict, Iterable, List, Mapping, Optional
|
||||
@@ -269,6 +270,11 @@ class ModelListingHandler:
|
||||
request.query.get("update_available_only", "false").lower() == "true"
|
||||
)
|
||||
|
||||
# Tag logic: "any" (OR) or "all" (AND) for include tags
|
||||
tag_logic = request.query.get("tag_logic", "any").lower()
|
||||
if tag_logic not in ("any", "all"):
|
||||
tag_logic = "any"
|
||||
|
||||
# New license-based query filters
|
||||
credit_required = request.query.get("credit_required")
|
||||
if credit_required is not None:
|
||||
@@ -297,6 +303,7 @@ class ModelListingHandler:
|
||||
"fuzzy_search": fuzzy_search,
|
||||
"base_models": base_models,
|
||||
"tags": tag_filters,
|
||||
"tag_logic": tag_logic,
|
||||
"search_options": search_options,
|
||||
"hash_filters": hash_filters,
|
||||
"favorites_only": favorites_only,
|
||||
@@ -641,7 +648,7 @@ class ModelQueryHandler:
|
||||
async def get_top_tags(self, request: web.Request) -> web.Response:
|
||||
try:
|
||||
limit = int(request.query.get("limit", "20"))
|
||||
if limit < 1 or limit > 100:
|
||||
if limit < 0:
|
||||
limit = 20
|
||||
top_tags = await self._service.get_top_tags(limit)
|
||||
return web.json_response({"success": True, "tags": top_tags})
|
||||
@@ -755,19 +762,22 @@ class ModelQueryHandler:
|
||||
|
||||
async def find_duplicate_models(self, request: web.Request) -> web.Response:
|
||||
try:
|
||||
filters = self._parse_duplicate_filters(request)
|
||||
duplicates = self._service.find_duplicate_hashes()
|
||||
result = []
|
||||
cache = await self._service.scanner.get_cached_data()
|
||||
|
||||
for sha256, paths in duplicates.items():
|
||||
group = {"hash": sha256, "models": []}
|
||||
# Collect all models in this group
|
||||
all_models = []
|
||||
for path in paths:
|
||||
model = next(
|
||||
(m for m in cache.raw_data if m["file_path"] == path), None
|
||||
)
|
||||
if model:
|
||||
group["models"].append(
|
||||
await self._service.format_response(model)
|
||||
)
|
||||
all_models.append(model)
|
||||
|
||||
# Include primary if not already in paths
|
||||
primary_path = self._service.get_path_by_hash(sha256)
|
||||
if primary_path and primary_path not in paths:
|
||||
primary_model = next(
|
||||
@@ -775,11 +785,25 @@ class ModelQueryHandler:
|
||||
None,
|
||||
)
|
||||
if primary_model:
|
||||
group["models"].insert(
|
||||
0, await self._service.format_response(primary_model)
|
||||
)
|
||||
all_models.insert(0, primary_model)
|
||||
|
||||
# Apply filters
|
||||
filtered = self._apply_duplicate_filters(all_models, filters)
|
||||
|
||||
# Sort: originals first, copies last
|
||||
sorted_models = self._sort_duplicate_group(filtered)
|
||||
|
||||
# Format response
|
||||
group = {"hash": sha256, "models": []}
|
||||
for model in sorted_models:
|
||||
group["models"].append(
|
||||
await self._service.format_response(model)
|
||||
)
|
||||
|
||||
# Only include groups with 2+ models after filtering
|
||||
if len(group["models"]) > 1:
|
||||
result.append(group)
|
||||
|
||||
return web.json_response(
|
||||
{"success": True, "duplicates": result, "count": len(result)}
|
||||
)
|
||||
@@ -792,6 +816,83 @@ class ModelQueryHandler:
|
||||
)
|
||||
return web.json_response({"success": False, "error": str(exc)}, status=500)
|
||||
|
||||
def _parse_duplicate_filters(self, request: web.Request) -> Dict[str, Any]:
|
||||
"""Parse filter parameters from the request for duplicate finding."""
|
||||
return {
|
||||
"base_models": request.query.getall("base_model", []),
|
||||
"tag_include": request.query.getall("tag_include", []),
|
||||
"tag_exclude": request.query.getall("tag_exclude", []),
|
||||
"model_types": request.query.getall("model_type", []),
|
||||
"folder": request.query.get("folder"),
|
||||
"favorites_only": request.query.get("favorites_only", "").lower() == "true",
|
||||
}
|
||||
|
||||
def _apply_duplicate_filters(self, models: List[Dict[str, Any]], filters: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""Apply filters to a list of models within a duplicate group."""
|
||||
result = models
|
||||
|
||||
# Apply base model filter
|
||||
if filters.get("base_models"):
|
||||
base_set = set(filters["base_models"])
|
||||
result = [m for m in result if m.get("base_model") in base_set]
|
||||
|
||||
# Apply tag filters (include)
|
||||
for tag in filters.get("tag_include", []):
|
||||
if tag == "__no_tags__":
|
||||
result = [m for m in result if not m.get("tags")]
|
||||
else:
|
||||
result = [m for m in result if tag in (m.get("tags") or [])]
|
||||
|
||||
# Apply tag filters (exclude)
|
||||
for tag in filters.get("tag_exclude", []):
|
||||
if tag == "__no_tags__":
|
||||
result = [m for m in result if m.get("tags")]
|
||||
else:
|
||||
result = [m for m in result if tag not in (m.get("tags") or [])]
|
||||
|
||||
# Apply model type filter
|
||||
if filters.get("model_types"):
|
||||
type_set = {t.lower() for t in filters["model_types"]}
|
||||
result = [
|
||||
m for m in result if (m.get("model_type") or "").lower() in type_set
|
||||
]
|
||||
|
||||
# Apply folder filter
|
||||
if filters.get("folder"):
|
||||
folder = filters["folder"]
|
||||
result = [m for m in result if m.get("folder", "").startswith(folder)]
|
||||
|
||||
# Apply favorites filter
|
||||
if filters.get("favorites_only"):
|
||||
result = [m for m in result if m.get("favorite", False)]
|
||||
|
||||
return result
|
||||
|
||||
def _sort_duplicate_group(self, models: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Sort models: originals first (left), copies (with -????. pattern) last (right)."""
|
||||
if len(models) <= 1:
|
||||
return models
|
||||
|
||||
min_len = min(len(m.get("file_name", "")) for m in models)
|
||||
|
||||
def copy_score(m):
|
||||
fn = m.get("file_name", "")
|
||||
score = 0
|
||||
# Match -0001.safetensors, -1234.safetensors etc.
|
||||
if re.search(r"-\d{4}\.", fn):
|
||||
score += 100
|
||||
# Match (1), (2) etc.
|
||||
if re.search(r"\(\d+\)", fn):
|
||||
score += 50
|
||||
# Match 'copy' in filename
|
||||
if "copy" in fn.lower():
|
||||
score += 50
|
||||
# Longer filenames are more likely copies
|
||||
score += len(fn) - min_len
|
||||
return (score, fn.lower())
|
||||
|
||||
return sorted(models, key=copy_score)
|
||||
|
||||
async def find_filename_conflicts(self, request: web.Request) -> web.Response:
|
||||
try:
|
||||
duplicates = self._service.find_duplicate_filenames()
|
||||
@@ -1041,6 +1142,7 @@ class ModelDownloadHandler:
|
||||
request.query.get("use_default_paths", "false").lower() == "true"
|
||||
)
|
||||
source = request.query.get("source")
|
||||
file_params_json = request.query.get("file_params")
|
||||
|
||||
data = {"model_id": model_id, "use_default_paths": use_default_paths}
|
||||
if model_version_id:
|
||||
@@ -1049,6 +1151,12 @@ class ModelDownloadHandler:
|
||||
data["download_id"] = download_id
|
||||
if source:
|
||||
data["source"] = source
|
||||
if file_params_json:
|
||||
import json
|
||||
try:
|
||||
data["file_params"] = json.loads(file_params_json)
|
||||
except json.JSONDecodeError:
|
||||
self._logger.warning("Invalid file_params JSON: %s", file_params_json)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
future = loop.create_future()
|
||||
@@ -1432,11 +1540,13 @@ class ModelUpdateHandler:
|
||||
service,
|
||||
update_service,
|
||||
metadata_provider_selector,
|
||||
settings_service,
|
||||
logger: logging.Logger,
|
||||
) -> None:
|
||||
self._service = service
|
||||
self._update_service = update_service
|
||||
self._metadata_provider_selector = metadata_provider_selector
|
||||
self._settings = settings_service
|
||||
self._logger = logger
|
||||
|
||||
async def fetch_missing_civitai_license_data(
|
||||
@@ -1673,6 +1783,9 @@ class ModelUpdateHandler:
|
||||
{"success": False, "error": "Model not tracked"}, status=404
|
||||
)
|
||||
|
||||
# Enrich EA versions with detailed info if needed
|
||||
record = await self._enrich_early_access_details(record)
|
||||
|
||||
overrides = await self._build_version_context(record)
|
||||
return web.json_response(
|
||||
{
|
||||
@@ -1711,6 +1824,78 @@ class ModelUpdateHandler:
|
||||
)
|
||||
return None
|
||||
|
||||
async def _enrich_early_access_details(self, record):
|
||||
"""Fetch detailed EA info for versions missing exact end time.
|
||||
|
||||
Identifies versions with is_early_access=True but no early_access_ends_at,
|
||||
then fetches detailed info from CivitAI to get the exact end time.
|
||||
"""
|
||||
if not record or not record.versions:
|
||||
return record
|
||||
|
||||
# Find versions that need enrichment
|
||||
versions_needing_update = []
|
||||
for version in record.versions:
|
||||
if version.is_early_access and not version.early_access_ends_at:
|
||||
versions_needing_update.append(version)
|
||||
|
||||
if not versions_needing_update:
|
||||
return record
|
||||
|
||||
provider = await self._get_civitai_provider()
|
||||
if not provider:
|
||||
return record
|
||||
|
||||
# Fetch detailed info for each version needing update
|
||||
updated_versions = []
|
||||
for version in versions_needing_update:
|
||||
try:
|
||||
version_info, error = await provider.get_model_version_info(
|
||||
str(version.version_id)
|
||||
)
|
||||
if version_info and not error:
|
||||
ea_ends_at = version_info.get("earlyAccessEndsAt")
|
||||
if ea_ends_at:
|
||||
# Create updated version with EA end time
|
||||
from dataclasses import replace
|
||||
|
||||
updated_version = replace(
|
||||
version, early_access_ends_at=ea_ends_at
|
||||
)
|
||||
updated_versions.append(updated_version)
|
||||
self._logger.debug(
|
||||
"Enriched EA info for version %s: %s",
|
||||
version.version_id,
|
||||
ea_ends_at,
|
||||
)
|
||||
except Exception as exc:
|
||||
self._logger.debug(
|
||||
"Failed to fetch EA details for version %s: %s",
|
||||
version.version_id,
|
||||
exc,
|
||||
)
|
||||
|
||||
if not updated_versions:
|
||||
return record
|
||||
|
||||
# Update record with enriched versions
|
||||
version_map = {v.version_id: v for v in record.versions}
|
||||
for updated in updated_versions:
|
||||
version_map[updated.version_id] = updated
|
||||
|
||||
# Create new record with updated versions
|
||||
from dataclasses import replace
|
||||
|
||||
new_record = replace(
|
||||
record, versions=list(version_map.values()),
|
||||
)
|
||||
|
||||
# Optionally persist to database for caching
|
||||
# Note: We don't persist here to avoid side effects; the data will be
|
||||
# refreshed on next bulk update if still needed
|
||||
|
||||
return new_record
|
||||
|
||||
async def _collect_models_missing_license(
|
||||
self,
|
||||
cache,
|
||||
@@ -1877,6 +2062,15 @@ class ModelUpdateHandler:
|
||||
version_context: Optional[Dict[int, Dict[str, Optional[str]]]] = None,
|
||||
) -> Dict:
|
||||
context = version_context or {}
|
||||
# Check user setting for hiding early access versions
|
||||
hide_early_access = False
|
||||
if self._settings is not None:
|
||||
try:
|
||||
hide_early_access = bool(
|
||||
self._settings.get("hide_early_access_updates", False)
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
return {
|
||||
"modelType": record.model_type,
|
||||
"modelId": record.model_id,
|
||||
@@ -1885,7 +2079,7 @@ class ModelUpdateHandler:
|
||||
"inLibraryVersionIds": record.in_library_version_ids,
|
||||
"lastCheckedAt": record.last_checked_at,
|
||||
"shouldIgnore": record.should_ignore_model,
|
||||
"hasUpdate": record.has_update(),
|
||||
"hasUpdate": record.has_update(hide_early_access=hide_early_access),
|
||||
"versions": [
|
||||
self._serialize_version(version, context.get(version.version_id))
|
||||
for version in record.versions
|
||||
@@ -1901,6 +2095,24 @@ class ModelUpdateHandler:
|
||||
preview_url = (
|
||||
preview_override if preview_override is not None else version.preview_url
|
||||
)
|
||||
|
||||
# Determine if version is currently in early access
|
||||
# Two-phase detection: use exact end time if available, otherwise fallback to basic flag
|
||||
is_early_access = False
|
||||
if version.early_access_ends_at:
|
||||
try:
|
||||
from datetime import datetime, timezone
|
||||
ea_date = datetime.fromisoformat(
|
||||
version.early_access_ends_at.replace("Z", "+00:00")
|
||||
)
|
||||
is_early_access = ea_date > datetime.now(timezone.utc)
|
||||
except (ValueError, AttributeError):
|
||||
# If date parsing fails, treat as active EA (conservative)
|
||||
is_early_access = True
|
||||
elif getattr(version, 'is_early_access', False):
|
||||
# Fallback to basic EA flag from bulk API
|
||||
is_early_access = True
|
||||
|
||||
return {
|
||||
"versionId": version.version_id,
|
||||
"name": version.name,
|
||||
@@ -1910,6 +2122,8 @@ class ModelUpdateHandler:
|
||||
"previewUrl": preview_url,
|
||||
"isInLibrary": version.is_in_library,
|
||||
"shouldIgnore": version.should_ignore,
|
||||
"earlyAccessEndsAt": version.early_access_ends_at,
|
||||
"isEarlyAccess": is_early_access,
|
||||
"filePath": context.get("file_path"),
|
||||
"fileName": context.get("file_name"),
|
||||
}
|
||||
|
||||
@@ -33,6 +33,10 @@ class PreviewHandler:
|
||||
raise web.HTTPBadRequest(text="Invalid preview path encoding") from exc
|
||||
|
||||
normalized = decoded_path.replace("\\", "/")
|
||||
|
||||
if not self._config.is_preview_path_allowed(normalized):
|
||||
raise web.HTTPForbidden(text="Preview path is not within an allowed directory")
|
||||
|
||||
candidate = Path(normalized)
|
||||
try:
|
||||
resolved = candidate.expanduser().resolve(strict=False)
|
||||
@@ -40,14 +44,8 @@ class PreviewHandler:
|
||||
logger.debug("Failed to resolve preview path %s: %s", normalized, exc)
|
||||
raise web.HTTPBadRequest(text="Unable to resolve preview path") from exc
|
||||
|
||||
resolved_str = str(resolved)
|
||||
# TODO: Temporarily disabled path validation due to issues #772 and #774
|
||||
# Re-enable after fixing preview root path handling
|
||||
# if not self._config.is_preview_path_allowed(resolved_str):
|
||||
# raise web.HTTPForbidden(text="Preview path is not within an allowed directory")
|
||||
|
||||
if not resolved.is_file():
|
||||
logger.debug("Preview file not found at %s", resolved_str)
|
||||
logger.debug("Preview file not found at %s", str(resolved))
|
||||
raise web.HTTPNotFound(text="Preview file not found")
|
||||
|
||||
# aiohttp's FileResponse handles range requests and content headers for us.
|
||||
|
||||
@@ -412,10 +412,11 @@ class RecipeQueryHandler:
|
||||
if recipe_scanner is None:
|
||||
raise RuntimeError("Recipe scanner unavailable")
|
||||
|
||||
duplicate_groups = await recipe_scanner.find_all_duplicate_recipes()
|
||||
fingerprint_groups = await recipe_scanner.find_all_duplicate_recipes()
|
||||
url_groups = await recipe_scanner.find_duplicate_recipes_by_source()
|
||||
response_data = []
|
||||
|
||||
for fingerprint, recipe_ids in duplicate_groups.items():
|
||||
for fingerprint, recipe_ids in fingerprint_groups.items():
|
||||
if len(recipe_ids) <= 1:
|
||||
continue
|
||||
|
||||
@@ -439,12 +440,44 @@ class RecipeQueryHandler:
|
||||
recipes.sort(key=lambda entry: entry.get("modified", 0), reverse=True)
|
||||
response_data.append(
|
||||
{
|
||||
"type": "fingerprint",
|
||||
"fingerprint": fingerprint,
|
||||
"count": len(recipes),
|
||||
"recipes": recipes,
|
||||
}
|
||||
)
|
||||
|
||||
for url, recipe_ids in url_groups.items():
|
||||
if len(recipe_ids) <= 1:
|
||||
continue
|
||||
|
||||
recipes = []
|
||||
for recipe_id in recipe_ids:
|
||||
recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
|
||||
if recipe:
|
||||
recipes.append(
|
||||
{
|
||||
"id": recipe.get("id"),
|
||||
"title": recipe.get("title"),
|
||||
"file_url": recipe.get("file_url")
|
||||
or self._format_recipe_file_url(recipe.get("file_path", "")),
|
||||
"modified": recipe.get("modified"),
|
||||
"created_date": recipe.get("created_date"),
|
||||
"lora_count": len(recipe.get("loras", [])),
|
||||
}
|
||||
)
|
||||
|
||||
if len(recipes) >= 2:
|
||||
recipes.sort(key=lambda entry: entry.get("modified", 0), reverse=True)
|
||||
response_data.append(
|
||||
{
|
||||
"type": "source_url",
|
||||
"fingerprint": url,
|
||||
"count": len(recipes),
|
||||
"recipes": recipes,
|
||||
}
|
||||
)
|
||||
|
||||
response_data.sort(key=lambda entry: entry["count"], reverse=True)
|
||||
return web.json_response({"success": True, "duplicate_groups": response_data})
|
||||
except Exception as exc:
|
||||
@@ -1021,7 +1054,7 @@ class RecipeManagementHandler:
|
||||
"exclude": False,
|
||||
}
|
||||
|
||||
async def _download_remote_media(self, image_url: str) -> tuple[bytes, str]:
|
||||
async def _download_remote_media(self, image_url: str) -> tuple[bytes, str, Any]:
|
||||
civitai_client = self._civitai_client_getter()
|
||||
downloader = await self._downloader_factory()
|
||||
temp_path = None
|
||||
@@ -1029,6 +1062,7 @@ class RecipeManagementHandler:
|
||||
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
download_url = image_url
|
||||
image_info = None
|
||||
civitai_match = re.match(r"https://civitai\.com/images/(\d+)", image_url)
|
||||
if civitai_match:
|
||||
if civitai_client is None:
|
||||
|
||||
@@ -63,6 +63,11 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"POST", "/api/lm/{prefix}/random-sample", prefix, self.get_random_loras
|
||||
)
|
||||
|
||||
# Cycler routes
|
||||
registrar.add_prefixed_route(
|
||||
"POST", "/api/lm/{prefix}/cycler-list", prefix, self.get_cycler_list
|
||||
)
|
||||
|
||||
# ComfyUI integration
|
||||
registrar.add_prefixed_route(
|
||||
"POST", "/api/lm/{prefix}/get_trigger_words", prefix, self.get_trigger_words
|
||||
@@ -283,6 +288,29 @@ class LoraRoutes(BaseModelRoutes):
|
||||
logger.error(f"Error getting random LoRAs: {e}", exc_info=True)
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
async def get_cycler_list(self, request: web.Request) -> web.Response:
|
||||
"""Get filtered and sorted LoRA list for cycler widget"""
|
||||
try:
|
||||
json_data = await request.json()
|
||||
|
||||
# Parse parameters
|
||||
pool_config = json_data.get("pool_config")
|
||||
sort_by = json_data.get("sort_by", "filename")
|
||||
|
||||
# Get cycler list from service
|
||||
lora_list = await self.service.get_cycler_list(
|
||||
pool_config=pool_config,
|
||||
sort_by=sort_by
|
||||
)
|
||||
|
||||
return web.json_response(
|
||||
{"success": True, "loras": lora_list, "count": len(lora_list)}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting cycler list: {e}", exc_info=True)
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
async def get_trigger_words(self, request: web.Request) -> web.Response:
|
||||
"""Get trigger words for specified LoRA models"""
|
||||
try:
|
||||
|
||||
@@ -42,6 +42,7 @@ MISC_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
|
||||
RouteDefinition("GET", "/api/lm/metadata-archive-status", "get_metadata_archive_status"),
|
||||
RouteDefinition("GET", "/api/lm/model-versions-status", "get_model_versions_status"),
|
||||
RouteDefinition("POST", "/api/lm/settings/open-location", "open_settings_location"),
|
||||
RouteDefinition("GET", "/api/lm/custom-words/search", "search_custom_words"),
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from ..services.settings_manager import get_settings_manager
|
||||
from ..services.downloader import get_downloader
|
||||
from ..utils.usage_stats import UsageStats
|
||||
from .handlers.misc_handlers import (
|
||||
CustomWordsHandler,
|
||||
FileSystemHandler,
|
||||
HealthCheckHandler,
|
||||
LoraCodeHandler,
|
||||
@@ -117,6 +118,7 @@ class MiscRoutes:
|
||||
service_registry=self._service_registry_adapter,
|
||||
metadata_provider_factory=self._metadata_provider_factory,
|
||||
)
|
||||
custom_words = CustomWordsHandler()
|
||||
|
||||
return self._handler_set_factory(
|
||||
health=health,
|
||||
@@ -129,6 +131,7 @@ class MiscRoutes:
|
||||
model_library=model_library,
|
||||
metadata_archive=metadata_archive,
|
||||
filesystem=filesystem,
|
||||
custom_words=custom_words,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -45,8 +45,9 @@ class UpdateRoutes:
|
||||
# Fetch remote version from GitHub
|
||||
if nightly:
|
||||
remote_version, changelog = await UpdateRoutes._get_nightly_version()
|
||||
releases = None
|
||||
else:
|
||||
remote_version, changelog = await UpdateRoutes._get_remote_version()
|
||||
remote_version, changelog, releases = await UpdateRoutes._get_remote_version()
|
||||
|
||||
# Compare versions
|
||||
if nightly:
|
||||
@@ -59,7 +60,7 @@ class UpdateRoutes:
|
||||
remote_version.replace('v', '')
|
||||
)
|
||||
|
||||
return web.json_response({
|
||||
response_data = {
|
||||
'success': True,
|
||||
'current_version': local_version,
|
||||
'latest_version': remote_version,
|
||||
@@ -67,7 +68,13 @@ class UpdateRoutes:
|
||||
'changelog': changelog,
|
||||
'git_info': git_info,
|
||||
'nightly': nightly
|
||||
})
|
||||
}
|
||||
|
||||
# Include releases list for stable mode
|
||||
if releases is not None:
|
||||
response_data['releases'] = releases
|
||||
|
||||
return web.json_response(response_data)
|
||||
|
||||
except NETWORK_EXCEPTIONS as e:
|
||||
logger.warning("Network unavailable during update check: %s", e)
|
||||
@@ -443,42 +450,58 @@ class UpdateRoutes:
|
||||
return git_info
|
||||
|
||||
@staticmethod
|
||||
async def _get_remote_version() -> tuple[str, List[str]]:
|
||||
async def _get_remote_version() -> tuple[str, List[str], List[Dict]]:
|
||||
"""
|
||||
Fetch remote version from GitHub
|
||||
Returns:
|
||||
tuple: (version string, changelog list)
|
||||
tuple: (version string, changelog list, releases list)
|
||||
"""
|
||||
repo_owner = "willmiao"
|
||||
repo_name = "ComfyUI-Lora-Manager"
|
||||
|
||||
# Use GitHub API to fetch the latest release
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
# Use GitHub API to fetch the last 5 releases
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases?per_page=5"
|
||||
|
||||
try:
|
||||
downloader = await get_downloader()
|
||||
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
|
||||
|
||||
if not success:
|
||||
logger.warning(f"Failed to fetch GitHub release: {data}")
|
||||
return "v0.0.0", []
|
||||
logger.warning(f"Failed to fetch GitHub releases: {data}")
|
||||
return "v0.0.0", [], []
|
||||
|
||||
version = data.get('tag_name', '')
|
||||
if not version.startswith('v'):
|
||||
version = f"v{version}"
|
||||
# Parse releases
|
||||
releases = []
|
||||
for i, release in enumerate(data):
|
||||
version = release.get('tag_name', '')
|
||||
if not version.startswith('v'):
|
||||
version = f"v{version}"
|
||||
|
||||
# Extract changelog from release notes
|
||||
body = release.get('body', '')
|
||||
changelog = UpdateRoutes._parse_changelog(body)
|
||||
|
||||
releases.append({
|
||||
'version': version,
|
||||
'changelog': changelog,
|
||||
'published_at': release.get('published_at', ''),
|
||||
'is_latest': i == 0
|
||||
})
|
||||
|
||||
# Extract changelog from release notes
|
||||
body = data.get('body', '')
|
||||
changelog = UpdateRoutes._parse_changelog(body)
|
||||
# Get latest version and its changelog
|
||||
if releases:
|
||||
latest_version = releases[0]['version']
|
||||
latest_changelog = releases[0]['changelog']
|
||||
return latest_version, latest_changelog, releases
|
||||
|
||||
return version, changelog
|
||||
return "v0.0.0", [], []
|
||||
|
||||
except NETWORK_EXCEPTIONS as e:
|
||||
logger.warning("Unable to reach GitHub for release info: %s", e)
|
||||
return "v0.0.0", []
|
||||
return "v0.0.0", [], []
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching remote version: {e}", exc_info=True)
|
||||
return "v0.0.0", []
|
||||
return "v0.0.0", [], []
|
||||
|
||||
@staticmethod
|
||||
def _parse_changelog(release_notes: str) -> List[str]:
|
||||
|
||||
@@ -5,7 +5,7 @@ import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from ..utils.constants import VALID_LORA_TYPES
|
||||
from ..utils.constants import VALID_LORA_SUB_TYPES, VALID_CHECKPOINT_SUB_TYPES
|
||||
from ..utils.models import BaseModelMetadata
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from ..utils.usage_stats import UsageStats
|
||||
@@ -15,8 +15,8 @@ from .model_query import (
|
||||
ModelFilterSet,
|
||||
SearchStrategy,
|
||||
SettingsProvider,
|
||||
normalize_civitai_model_type,
|
||||
resolve_civitai_model_type,
|
||||
normalize_sub_type,
|
||||
resolve_sub_type,
|
||||
)
|
||||
from .settings_manager import get_settings_manager
|
||||
|
||||
@@ -81,6 +81,7 @@ class BaseModelService(ABC):
|
||||
update_available_only: bool = False,
|
||||
credit_required: Optional[bool] = None,
|
||||
allow_selling_generated_content: Optional[bool] = None,
|
||||
tag_logic: str = "any",
|
||||
**kwargs,
|
||||
) -> Dict:
|
||||
"""Get paginated and filtered model data"""
|
||||
@@ -109,6 +110,7 @@ class BaseModelService(ABC):
|
||||
tags=tags,
|
||||
favorites_only=favorites_only,
|
||||
search_options=search_options,
|
||||
tag_logic=tag_logic,
|
||||
)
|
||||
|
||||
if search:
|
||||
@@ -241,6 +243,7 @@ class BaseModelService(ABC):
|
||||
tags: Optional[Dict[str, str]] = None,
|
||||
favorites_only: bool = False,
|
||||
search_options: dict = None,
|
||||
tag_logic: str = "any",
|
||||
) -> List[Dict]:
|
||||
"""Apply common filters that work across all model types"""
|
||||
normalized_options = self.search_strategy.normalize_options(search_options)
|
||||
@@ -253,6 +256,7 @@ class BaseModelService(ABC):
|
||||
tags=tags,
|
||||
favorites_only=favorites_only,
|
||||
search_options=normalized_options,
|
||||
tag_logic=tag_logic,
|
||||
)
|
||||
return self.filter_set.apply(data, criteria)
|
||||
|
||||
@@ -376,6 +380,13 @@ class BaseModelService(ABC):
|
||||
strategy = "same_base"
|
||||
same_base_mode = strategy == "same_base"
|
||||
|
||||
# Check user setting for hiding early access updates
|
||||
hide_early_access = False
|
||||
try:
|
||||
hide_early_access = bool(self.settings.get("hide_early_access_updates", False))
|
||||
except Exception:
|
||||
hide_early_access = False
|
||||
|
||||
records = None
|
||||
resolved: Optional[Dict[int, bool]] = None
|
||||
if same_base_mode:
|
||||
@@ -384,7 +395,7 @@ class BaseModelService(ABC):
|
||||
try:
|
||||
records = await record_method(self.model_type, ordered_ids)
|
||||
resolved = {
|
||||
model_id: record.has_update()
|
||||
model_id: record.has_update(hide_early_access=hide_early_access)
|
||||
for model_id, record in records.items()
|
||||
}
|
||||
except Exception as exc:
|
||||
@@ -402,7 +413,7 @@ class BaseModelService(ABC):
|
||||
bulk_method = getattr(self.update_service, "has_updates_bulk", None)
|
||||
if callable(bulk_method):
|
||||
try:
|
||||
resolved = await bulk_method(self.model_type, ordered_ids)
|
||||
resolved = await bulk_method(self.model_type, ordered_ids, hide_early_access=hide_early_access)
|
||||
except Exception as exc:
|
||||
logger.error(
|
||||
"Failed to resolve update status in bulk for %s models (%s): %s",
|
||||
@@ -415,7 +426,7 @@ class BaseModelService(ABC):
|
||||
|
||||
if resolved is None:
|
||||
tasks = [
|
||||
self.update_service.has_update(self.model_type, model_id)
|
||||
self.update_service.has_update(self.model_type, model_id, hide_early_access=hide_early_access)
|
||||
for model_id in ordered_ids
|
||||
]
|
||||
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
@@ -453,6 +464,7 @@ class BaseModelService(ABC):
|
||||
flag = record.has_update_for_base(
|
||||
threshold_version,
|
||||
base_model,
|
||||
hide_early_access=hide_early_access,
|
||||
)
|
||||
else:
|
||||
flag = default_flag
|
||||
@@ -568,16 +580,21 @@ class BaseModelService(ABC):
|
||||
return await self.scanner.get_base_models(limit)
|
||||
|
||||
async def get_model_types(self, limit: int = 20) -> List[Dict[str, Any]]:
|
||||
"""Get counts of normalized CivitAI model types present in the cache."""
|
||||
"""Get counts of sub-types present in the cache."""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
type_counts: Dict[str, int] = {}
|
||||
for entry in cache.raw_data:
|
||||
normalized_type = normalize_civitai_model_type(
|
||||
resolve_civitai_model_type(entry)
|
||||
)
|
||||
if not normalized_type or normalized_type not in VALID_LORA_TYPES:
|
||||
normalized_type = normalize_sub_type(resolve_sub_type(entry))
|
||||
if not normalized_type:
|
||||
continue
|
||||
|
||||
# Filter by valid sub-types based on scanner type
|
||||
if self.model_type == "lora" and normalized_type not in VALID_LORA_SUB_TYPES:
|
||||
continue
|
||||
if self.model_type == "checkpoint" and normalized_type not in VALID_CHECKPOINT_SUB_TYPES:
|
||||
continue
|
||||
|
||||
type_counts[normalized_type] = type_counts.get(normalized_type, 0) + 1
|
||||
|
||||
sorted_types = sorted(
|
||||
|
||||
259
py/services/cache_entry_validator.py
Normal file
259
py/services/cache_entry_validator.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Cache Entry Validator
|
||||
|
||||
Validates and repairs cache entries to prevent runtime errors from
|
||||
missing or invalid critical fields.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
import logging
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of validating a single cache entry."""
|
||||
is_valid: bool
|
||||
repaired: bool
|
||||
errors: List[str] = field(default_factory=list)
|
||||
entry: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class CacheEntryValidator:
|
||||
"""
|
||||
Validates and repairs cache entry core fields.
|
||||
|
||||
Critical fields that cause runtime errors when missing:
|
||||
- file_path: KeyError in multiple locations
|
||||
- sha256: KeyError/AttributeError in hash operations
|
||||
|
||||
Medium severity fields that may cause sorting/display issues:
|
||||
- size: KeyError during sorting
|
||||
- modified: KeyError during sorting
|
||||
- model_name: AttributeError on .lower() calls
|
||||
|
||||
Low severity fields:
|
||||
- tags: KeyError/TypeError in recipe operations
|
||||
"""
|
||||
|
||||
# Field definitions: (default_value, is_required)
|
||||
CORE_FIELDS: Dict[str, Tuple[Any, bool]] = {
|
||||
'file_path': ('', True),
|
||||
'sha256': ('', True),
|
||||
'file_name': ('', False),
|
||||
'model_name': ('', False),
|
||||
'folder': ('', False),
|
||||
'size': (0, False),
|
||||
'modified': (0.0, False),
|
||||
'tags': ([], False),
|
||||
'preview_url': ('', False),
|
||||
'base_model': ('', False),
|
||||
'from_civitai': (True, False),
|
||||
'favorite': (False, False),
|
||||
'exclude': (False, False),
|
||||
'db_checked': (False, False),
|
||||
'preview_nsfw_level': (0, False),
|
||||
'notes': ('', False),
|
||||
'usage_tips': ('', False),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def validate(cls, entry: Dict[str, Any], *, auto_repair: bool = True) -> ValidationResult:
|
||||
"""
|
||||
Validate a single cache entry.
|
||||
|
||||
Args:
|
||||
entry: The cache entry dictionary to validate
|
||||
auto_repair: If True, attempt to repair missing/invalid fields
|
||||
|
||||
Returns:
|
||||
ValidationResult with validation status and optionally repaired entry
|
||||
"""
|
||||
if entry is None:
|
||||
return ValidationResult(
|
||||
is_valid=False,
|
||||
repaired=False,
|
||||
errors=['Entry is None'],
|
||||
entry=None
|
||||
)
|
||||
|
||||
if not isinstance(entry, dict):
|
||||
return ValidationResult(
|
||||
is_valid=False,
|
||||
repaired=False,
|
||||
errors=[f'Entry is not a dict: {type(entry).__name__}'],
|
||||
entry=None
|
||||
)
|
||||
|
||||
errors: List[str] = []
|
||||
repaired = False
|
||||
working_entry = dict(entry) if auto_repair else entry
|
||||
|
||||
for field_name, (default_value, is_required) in cls.CORE_FIELDS.items():
|
||||
value = working_entry.get(field_name)
|
||||
|
||||
# Check if field is missing or None
|
||||
if value is None:
|
||||
if is_required:
|
||||
errors.append(f"Required field '{field_name}' is missing or None")
|
||||
if auto_repair:
|
||||
working_entry[field_name] = cls._get_default_copy(default_value)
|
||||
repaired = True
|
||||
continue
|
||||
|
||||
# Validate field type and value
|
||||
field_error = cls._validate_field(field_name, value, default_value)
|
||||
if field_error:
|
||||
errors.append(field_error)
|
||||
if auto_repair:
|
||||
working_entry[field_name] = cls._get_default_copy(default_value)
|
||||
repaired = True
|
||||
|
||||
# Special validation: file_path must not be empty for required field
|
||||
file_path = working_entry.get('file_path', '')
|
||||
if not file_path or (isinstance(file_path, str) and not file_path.strip()):
|
||||
errors.append("Required field 'file_path' is empty")
|
||||
# Cannot repair empty file_path - entry is invalid
|
||||
return ValidationResult(
|
||||
is_valid=False,
|
||||
repaired=repaired,
|
||||
errors=errors,
|
||||
entry=working_entry if auto_repair else None
|
||||
)
|
||||
|
||||
# Special validation: sha256 must not be empty for required field
|
||||
sha256 = working_entry.get('sha256', '')
|
||||
if not sha256 or (isinstance(sha256, str) and not sha256.strip()):
|
||||
errors.append("Required field 'sha256' is empty")
|
||||
# Cannot repair empty sha256 - entry is invalid
|
||||
return ValidationResult(
|
||||
is_valid=False,
|
||||
repaired=repaired,
|
||||
errors=errors,
|
||||
entry=working_entry if auto_repair else None
|
||||
)
|
||||
|
||||
# Normalize sha256 to lowercase if needed
|
||||
if isinstance(sha256, str):
|
||||
normalized_sha = sha256.lower().strip()
|
||||
if normalized_sha != sha256:
|
||||
working_entry['sha256'] = normalized_sha
|
||||
repaired = True
|
||||
|
||||
# Determine if entry is valid
|
||||
# Entry is valid if no critical required field errors remain after repair
|
||||
# Critical fields are file_path and sha256
|
||||
CRITICAL_REQUIRED_FIELDS = {'file_path', 'sha256'}
|
||||
has_critical_errors = any(
|
||||
"Required field" in error and
|
||||
any(f"'{field}'" in error for field in CRITICAL_REQUIRED_FIELDS)
|
||||
for error in errors
|
||||
)
|
||||
|
||||
is_valid = not has_critical_errors
|
||||
|
||||
return ValidationResult(
|
||||
is_valid=is_valid,
|
||||
repaired=repaired,
|
||||
errors=errors,
|
||||
entry=working_entry if auto_repair else entry
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_batch(
|
||||
cls,
|
||||
entries: List[Dict[str, Any]],
|
||||
*,
|
||||
auto_repair: bool = True
|
||||
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
||||
"""
|
||||
Validate a batch of cache entries.
|
||||
|
||||
Args:
|
||||
entries: List of cache entry dictionaries to validate
|
||||
auto_repair: If True, attempt to repair missing/invalid fields
|
||||
|
||||
Returns:
|
||||
Tuple of (valid_entries, invalid_entries)
|
||||
"""
|
||||
if not entries:
|
||||
return [], []
|
||||
|
||||
valid_entries: List[Dict[str, Any]] = []
|
||||
invalid_entries: List[Dict[str, Any]] = []
|
||||
|
||||
for entry in entries:
|
||||
result = cls.validate(entry, auto_repair=auto_repair)
|
||||
|
||||
if result.is_valid:
|
||||
# Use repaired entry if available, otherwise original
|
||||
valid_entries.append(result.entry if result.entry else entry)
|
||||
else:
|
||||
invalid_entries.append(entry)
|
||||
# Log invalid entries for debugging
|
||||
file_path = entry.get('file_path', '<unknown>') if isinstance(entry, dict) else '<not a dict>'
|
||||
logger.warning(
|
||||
f"Invalid cache entry for '{file_path}': {', '.join(result.errors)}"
|
||||
)
|
||||
|
||||
return valid_entries, invalid_entries
|
||||
|
||||
@classmethod
|
||||
def _validate_field(cls, field_name: str, value: Any, default_value: Any) -> Optional[str]:
|
||||
"""
|
||||
Validate a specific field value.
|
||||
|
||||
Returns an error message if invalid, None if valid.
|
||||
"""
|
||||
expected_type = type(default_value)
|
||||
|
||||
# Special handling for numeric types
|
||||
if expected_type == int:
|
||||
if not isinstance(value, (int, float)):
|
||||
return f"Field '{field_name}' should be numeric, got {type(value).__name__}"
|
||||
elif expected_type == float:
|
||||
if not isinstance(value, (int, float)):
|
||||
return f"Field '{field_name}' should be numeric, got {type(value).__name__}"
|
||||
elif expected_type == bool:
|
||||
# Be lenient with boolean fields - accept truthy/falsy values
|
||||
pass
|
||||
elif expected_type == str:
|
||||
if not isinstance(value, str):
|
||||
return f"Field '{field_name}' should be string, got {type(value).__name__}"
|
||||
elif expected_type == list:
|
||||
if not isinstance(value, (list, tuple)):
|
||||
return f"Field '{field_name}' should be list, got {type(value).__name__}"
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _get_default_copy(cls, default_value: Any) -> Any:
|
||||
"""Get a copy of the default value to avoid shared mutable state."""
|
||||
if isinstance(default_value, list):
|
||||
return list(default_value)
|
||||
if isinstance(default_value, dict):
|
||||
return dict(default_value)
|
||||
return default_value
|
||||
|
||||
@classmethod
|
||||
def get_file_path_safe(cls, entry: Dict[str, Any], default: str = '') -> str:
|
||||
"""Safely get file_path from an entry."""
|
||||
if not isinstance(entry, dict):
|
||||
return default
|
||||
value = entry.get('file_path')
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return default
|
||||
|
||||
@classmethod
|
||||
def get_sha256_safe(cls, entry: Dict[str, Any], default: str = '') -> str:
|
||||
"""Safely get sha256 from an entry."""
|
||||
if not isinstance(entry, dict):
|
||||
return default
|
||||
value = entry.get('sha256')
|
||||
if isinstance(value, str):
|
||||
return value.lower()
|
||||
return default
|
||||
201
py/services/cache_health_monitor.py
Normal file
201
py/services/cache_health_monitor.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""
|
||||
Cache Health Monitor
|
||||
|
||||
Monitors cache health status and determines when user intervention is needed.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
import logging
|
||||
|
||||
from .cache_entry_validator import CacheEntryValidator, ValidationResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CacheHealthStatus(Enum):
|
||||
"""Health status of the cache."""
|
||||
HEALTHY = "healthy"
|
||||
DEGRADED = "degraded"
|
||||
CORRUPTED = "corrupted"
|
||||
|
||||
|
||||
@dataclass
|
||||
class HealthReport:
|
||||
"""Report of cache health check."""
|
||||
status: CacheHealthStatus
|
||||
total_entries: int
|
||||
valid_entries: int
|
||||
invalid_entries: int
|
||||
repaired_entries: int
|
||||
invalid_paths: List[str] = field(default_factory=list)
|
||||
message: str = ""
|
||||
|
||||
@property
|
||||
def corruption_rate(self) -> float:
|
||||
"""Calculate the percentage of invalid entries."""
|
||||
if self.total_entries <= 0:
|
||||
return 0.0
|
||||
return self.invalid_entries / self.total_entries
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON serialization."""
|
||||
return {
|
||||
'status': self.status.value,
|
||||
'total_entries': self.total_entries,
|
||||
'valid_entries': self.valid_entries,
|
||||
'invalid_entries': self.invalid_entries,
|
||||
'repaired_entries': self.repaired_entries,
|
||||
'corruption_rate': f"{self.corruption_rate:.1%}",
|
||||
'invalid_paths': self.invalid_paths[:10], # Limit to first 10
|
||||
'message': self.message,
|
||||
}
|
||||
|
||||
|
||||
class CacheHealthMonitor:
|
||||
"""
|
||||
Monitors cache health and determines appropriate status.
|
||||
|
||||
Thresholds:
|
||||
- HEALTHY: 0% invalid entries
|
||||
- DEGRADED: 0-5% invalid entries (auto-repaired, user should rebuild)
|
||||
- CORRUPTED: >5% invalid entries (significant data loss likely)
|
||||
"""
|
||||
|
||||
# Threshold percentages
|
||||
DEGRADED_THRESHOLD = 0.01 # 1% - show warning
|
||||
CORRUPTED_THRESHOLD = 0.05 # 5% - critical warning
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
degraded_threshold: float = DEGRADED_THRESHOLD,
|
||||
corrupted_threshold: float = CORRUPTED_THRESHOLD
|
||||
):
|
||||
"""
|
||||
Initialize the health monitor.
|
||||
|
||||
Args:
|
||||
degraded_threshold: Corruption rate threshold for DEGRADED status
|
||||
corrupted_threshold: Corruption rate threshold for CORRUPTED status
|
||||
"""
|
||||
self.degraded_threshold = degraded_threshold
|
||||
self.corrupted_threshold = corrupted_threshold
|
||||
|
||||
def check_health(
|
||||
self,
|
||||
entries: List[Dict[str, Any]],
|
||||
*,
|
||||
auto_repair: bool = True
|
||||
) -> HealthReport:
|
||||
"""
|
||||
Check the health of cache entries.
|
||||
|
||||
Args:
|
||||
entries: List of cache entry dictionaries to check
|
||||
auto_repair: If True, attempt to repair entries during validation
|
||||
|
||||
Returns:
|
||||
HealthReport with status and statistics
|
||||
"""
|
||||
if not entries:
|
||||
return HealthReport(
|
||||
status=CacheHealthStatus.HEALTHY,
|
||||
total_entries=0,
|
||||
valid_entries=0,
|
||||
invalid_entries=0,
|
||||
repaired_entries=0,
|
||||
message="Cache is empty"
|
||||
)
|
||||
|
||||
total_entries = len(entries)
|
||||
valid_entries: List[Dict[str, Any]] = []
|
||||
invalid_entries: List[Dict[str, Any]] = []
|
||||
repaired_count = 0
|
||||
invalid_paths: List[str] = []
|
||||
|
||||
for entry in entries:
|
||||
result = CacheEntryValidator.validate(entry, auto_repair=auto_repair)
|
||||
|
||||
if result.is_valid:
|
||||
valid_entries.append(result.entry if result.entry else entry)
|
||||
if result.repaired:
|
||||
repaired_count += 1
|
||||
else:
|
||||
invalid_entries.append(entry)
|
||||
# Extract file path for reporting
|
||||
file_path = CacheEntryValidator.get_file_path_safe(entry, '<unknown>')
|
||||
invalid_paths.append(file_path)
|
||||
|
||||
invalid_count = len(invalid_entries)
|
||||
valid_count = len(valid_entries)
|
||||
|
||||
# Determine status based on corruption rate
|
||||
corruption_rate = invalid_count / total_entries if total_entries > 0 else 0.0
|
||||
|
||||
if invalid_count == 0:
|
||||
status = CacheHealthStatus.HEALTHY
|
||||
message = "Cache is healthy"
|
||||
elif corruption_rate >= self.corrupted_threshold:
|
||||
status = CacheHealthStatus.CORRUPTED
|
||||
message = (
|
||||
f"Cache is corrupted: {invalid_count} invalid entries "
|
||||
f"({corruption_rate:.1%}). Rebuild recommended."
|
||||
)
|
||||
elif corruption_rate >= self.degraded_threshold or invalid_count > 0:
|
||||
status = CacheHealthStatus.DEGRADED
|
||||
message = (
|
||||
f"Cache has {invalid_count} invalid entries "
|
||||
f"({corruption_rate:.1%}). Consider rebuilding cache."
|
||||
)
|
||||
else:
|
||||
# This shouldn't happen, but handle gracefully
|
||||
status = CacheHealthStatus.HEALTHY
|
||||
message = "Cache is healthy"
|
||||
|
||||
# Log the health check result
|
||||
if status != CacheHealthStatus.HEALTHY:
|
||||
logger.warning(
|
||||
f"Cache health check: {status.value} - "
|
||||
f"{invalid_count}/{total_entries} invalid, "
|
||||
f"{repaired_count} repaired"
|
||||
)
|
||||
if invalid_paths:
|
||||
logger.debug(f"Invalid entry paths: {invalid_paths[:5]}")
|
||||
|
||||
return HealthReport(
|
||||
status=status,
|
||||
total_entries=total_entries,
|
||||
valid_entries=valid_count,
|
||||
invalid_entries=invalid_count,
|
||||
repaired_entries=repaired_count,
|
||||
invalid_paths=invalid_paths,
|
||||
message=message
|
||||
)
|
||||
|
||||
def should_notify_user(self, report: HealthReport) -> bool:
|
||||
"""
|
||||
Determine if the user should be notified about cache health.
|
||||
|
||||
Args:
|
||||
report: The health report to evaluate
|
||||
|
||||
Returns:
|
||||
True if user should be notified
|
||||
"""
|
||||
return report.status != CacheHealthStatus.HEALTHY
|
||||
|
||||
def get_notification_severity(self, report: HealthReport) -> str:
|
||||
"""
|
||||
Get the severity level for user notification.
|
||||
|
||||
Args:
|
||||
report: The health report to evaluate
|
||||
|
||||
Returns:
|
||||
Severity string: 'warning' or 'error'
|
||||
"""
|
||||
if report.status == CacheHealthStatus.CORRUPTED:
|
||||
return 'error'
|
||||
return 'warning'
|
||||
@@ -21,7 +21,8 @@ class CheckpointScanner(ModelScanner):
|
||||
hash_index=ModelHashIndex()
|
||||
)
|
||||
|
||||
def _resolve_model_type(self, root_path: Optional[str]) -> Optional[str]:
|
||||
def _resolve_sub_type(self, root_path: Optional[str]) -> Optional[str]:
|
||||
"""Resolve the sub-type based on the root path."""
|
||||
if not root_path:
|
||||
return None
|
||||
|
||||
@@ -34,18 +35,19 @@ class CheckpointScanner(ModelScanner):
|
||||
return None
|
||||
|
||||
def adjust_metadata(self, metadata, file_path, root_path):
|
||||
if hasattr(metadata, "model_type"):
|
||||
model_type = self._resolve_model_type(root_path)
|
||||
if model_type:
|
||||
metadata.model_type = model_type
|
||||
"""Adjust metadata during scanning to set sub_type."""
|
||||
sub_type = self._resolve_sub_type(root_path)
|
||||
if sub_type:
|
||||
metadata.sub_type = sub_type
|
||||
return metadata
|
||||
|
||||
def adjust_cached_entry(self, entry: Dict[str, Any]) -> Dict[str, Any]:
|
||||
model_type = self._resolve_model_type(
|
||||
"""Adjust entries loaded from the persisted cache to ensure sub_type is set."""
|
||||
sub_type = self._resolve_sub_type(
|
||||
self._find_root_for_file(entry.get("file_path"))
|
||||
)
|
||||
if model_type:
|
||||
entry["model_type"] = model_type
|
||||
if sub_type:
|
||||
entry["sub_type"] = sub_type
|
||||
return entry
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
|
||||
@@ -22,6 +22,9 @@ class CheckpointService(BaseModelService):
|
||||
|
||||
async def format_response(self, checkpoint_data: Dict) -> Dict:
|
||||
"""Format Checkpoint data for API response"""
|
||||
# Get sub_type from cache entry (new canonical field)
|
||||
sub_type = checkpoint_data.get("sub_type", "checkpoint")
|
||||
|
||||
return {
|
||||
"model_name": checkpoint_data["model_name"],
|
||||
"file_name": checkpoint_data["file_name"],
|
||||
@@ -37,9 +40,10 @@ class CheckpointService(BaseModelService):
|
||||
"from_civitai": checkpoint_data.get("from_civitai", True),
|
||||
"usage_count": checkpoint_data.get("usage_count", 0),
|
||||
"notes": checkpoint_data.get("notes", ""),
|
||||
"model_type": checkpoint_data.get("model_type", "checkpoint"),
|
||||
"sub_type": sub_type,
|
||||
"favorite": checkpoint_data.get("favorite", False),
|
||||
"update_available": bool(checkpoint_data.get("update_available", False)),
|
||||
"skip_metadata_refresh": bool(checkpoint_data.get("skip_metadata_refresh", False)),
|
||||
"civitai": self.filter_civitai_data(checkpoint_data.get("civitai", {}), minimal=True)
|
||||
}
|
||||
|
||||
|
||||
91
py/services/custom_words_service.py
Normal file
91
py/services/custom_words_service.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Service for managing autocomplete via TagFTSIndex.
|
||||
|
||||
This service provides full-text search capabilities for Danbooru/e621 tags
|
||||
with category filtering and enriched results including post counts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CustomWordsService:
|
||||
"""Service for autocomplete via TagFTSIndex.
|
||||
|
||||
This service:
|
||||
- Uses TagFTSIndex for fast full-text search of Danbooru/e621 tags
|
||||
- Supports category-based filtering
|
||||
- Returns enriched results with category and post_count
|
||||
- Provides sub-100ms search times for 221k+ tags
|
||||
"""
|
||||
|
||||
_instance: Optional[CustomWordsService] = None
|
||||
_initialized: bool = False
|
||||
|
||||
def __new__(cls) -> CustomWordsService:
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self) -> None:
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
self._tag_index: Optional[Any] = None
|
||||
self._initialized = True
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls) -> CustomWordsService:
|
||||
"""Get the singleton instance of CustomWordsService."""
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def _get_tag_index(self):
|
||||
"""Get or create the TagFTSIndex instance (lazy initialization)."""
|
||||
if self._tag_index is None:
|
||||
try:
|
||||
from .tag_fts_index import get_tag_fts_index
|
||||
self._tag_index = get_tag_fts_index()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to initialize TagFTSIndex: {e}")
|
||||
self._tag_index = None
|
||||
return self._tag_index
|
||||
|
||||
def search_words(
|
||||
self,
|
||||
search_term: str,
|
||||
limit: int = 20,
|
||||
categories: Optional[List[int]] = None,
|
||||
enriched: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Search tags using TagFTSIndex with category filtering.
|
||||
|
||||
Args:
|
||||
search_term: The search term to match against.
|
||||
limit: Maximum number of results to return.
|
||||
categories: Optional list of category IDs to filter by.
|
||||
enriched: If True, always return enriched results with category
|
||||
and post_count (default behavior now).
|
||||
|
||||
Returns:
|
||||
List of dicts with tag_name, category, and post_count.
|
||||
"""
|
||||
tag_index = self._get_tag_index()
|
||||
if tag_index is not None:
|
||||
results = tag_index.search(search_term, categories=categories, limit=limit)
|
||||
return results
|
||||
|
||||
logger.debug("TagFTSIndex not available, returning empty results")
|
||||
return []
|
||||
|
||||
|
||||
def get_custom_words_service() -> CustomWordsService:
|
||||
"""Factory function to get the CustomWordsService singleton."""
|
||||
return CustomWordsService.get_instance()
|
||||
|
||||
|
||||
__all__ = ["CustomWordsService", "get_custom_words_service"]
|
||||
@@ -86,6 +86,7 @@ class DownloadCoordinator:
|
||||
progress_callback=progress_callback,
|
||||
download_id=download_id,
|
||||
source=payload.get("source"),
|
||||
file_params=payload.get("file_params"),
|
||||
)
|
||||
|
||||
result["download_id"] = download_id
|
||||
|
||||
@@ -70,6 +70,7 @@ class DownloadManager:
|
||||
use_default_paths: bool = False,
|
||||
download_id: str = None,
|
||||
source: str = None,
|
||||
file_params: Dict = None,
|
||||
) -> Dict:
|
||||
"""Download model from Civitai with task tracking and concurrency control
|
||||
|
||||
@@ -82,6 +83,7 @@ class DownloadManager:
|
||||
use_default_paths: Flag to use default paths
|
||||
download_id: Unique identifier for this download task
|
||||
source: Optional source parameter to specify metadata provider
|
||||
file_params: Optional dict with file selection params (type, format, size, fp, isPrimary)
|
||||
|
||||
Returns:
|
||||
Dict with download result
|
||||
@@ -122,6 +124,7 @@ class DownloadManager:
|
||||
progress_callback,
|
||||
use_default_paths,
|
||||
source,
|
||||
file_params,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -155,6 +158,7 @@ class DownloadManager:
|
||||
progress_callback=None,
|
||||
use_default_paths: bool = False,
|
||||
source: str = None,
|
||||
file_params: Dict = None,
|
||||
):
|
||||
"""Execute download with semaphore to limit concurrency"""
|
||||
# Update status to waiting
|
||||
@@ -215,6 +219,7 @@ class DownloadManager:
|
||||
use_default_paths,
|
||||
task_id,
|
||||
source,
|
||||
file_params,
|
||||
)
|
||||
|
||||
# Update status based on result
|
||||
@@ -266,6 +271,7 @@ class DownloadManager:
|
||||
use_default_paths,
|
||||
download_id=None,
|
||||
source=None,
|
||||
file_params=None,
|
||||
):
|
||||
"""Wrapper for original download_from_civitai implementation"""
|
||||
try:
|
||||
@@ -456,16 +462,57 @@ class DownloadManager:
|
||||
await progress_callback(0)
|
||||
|
||||
# 2. Get file information
|
||||
file_info = next(
|
||||
(
|
||||
f
|
||||
for f in version_info.get("files", [])
|
||||
if f.get("primary") and f.get("type") in ("Model", "Negative")
|
||||
),
|
||||
None,
|
||||
)
|
||||
files = version_info.get("files", [])
|
||||
file_info = None
|
||||
|
||||
# If file_params is provided, try to find matching file
|
||||
if file_params and model_version_id:
|
||||
target_type = file_params.get("type", "Model")
|
||||
target_format = file_params.get("format", "SafeTensor")
|
||||
target_size = file_params.get("size", "full")
|
||||
target_fp = file_params.get("fp")
|
||||
is_primary = file_params.get("isPrimary", False)
|
||||
|
||||
if is_primary:
|
||||
# Find primary file
|
||||
file_info = next(
|
||||
(f for f in files if f.get("primary") and f.get("type") in ("Model", "Negative")),
|
||||
None
|
||||
)
|
||||
else:
|
||||
# Match by metadata
|
||||
for f in files:
|
||||
f_type = f.get("type", "")
|
||||
f_meta = f.get("metadata", {})
|
||||
|
||||
# Check type match
|
||||
if f_type != target_type:
|
||||
continue
|
||||
|
||||
# Check metadata match
|
||||
if f_meta.get("format") != target_format:
|
||||
continue
|
||||
if f_meta.get("size") != target_size:
|
||||
continue
|
||||
if target_fp and f_meta.get("fp") != target_fp:
|
||||
continue
|
||||
|
||||
file_info = f
|
||||
break
|
||||
|
||||
# Fallback to primary file if no match found
|
||||
if not file_info:
|
||||
return {"success": False, "error": "No primary file found in metadata"}
|
||||
file_info = next(
|
||||
(
|
||||
f
|
||||
for f in files
|
||||
if f.get("primary") and f.get("type") in ("Model", "Negative")
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if not file_info:
|
||||
return {"success": False, "error": "No suitable file found in metadata"}
|
||||
mirrors = file_info.get("mirrors") or []
|
||||
download_urls = []
|
||||
if mirrors:
|
||||
@@ -496,7 +543,9 @@ class DownloadManager:
|
||||
return {"success": False, "error": "No mirror URL found"}
|
||||
|
||||
# 3. Prepare download
|
||||
file_name = file_info["name"]
|
||||
file_name = file_info.get("name", "")
|
||||
if not file_name:
|
||||
return {"success": False, "error": "No filename found in file info"}
|
||||
save_path = os.path.join(save_dir, file_name)
|
||||
|
||||
# 5. Prepare metadata based on model type
|
||||
|
||||
@@ -22,6 +22,9 @@ class EmbeddingService(BaseModelService):
|
||||
|
||||
async def format_response(self, embedding_data: Dict) -> Dict:
|
||||
"""Format Embedding data for API response"""
|
||||
# Get sub_type from cache entry (new canonical field)
|
||||
sub_type = embedding_data.get("sub_type", "embedding")
|
||||
|
||||
return {
|
||||
"model_name": embedding_data["model_name"],
|
||||
"file_name": embedding_data["file_name"],
|
||||
@@ -37,9 +40,10 @@ class EmbeddingService(BaseModelService):
|
||||
"from_civitai": embedding_data.get("from_civitai", True),
|
||||
# "usage_count": embedding_data.get("usage_count", 0), # TODO: Enable when embedding usage tracking is implemented
|
||||
"notes": embedding_data.get("notes", ""),
|
||||
"model_type": embedding_data.get("model_type", "embedding"),
|
||||
"sub_type": sub_type,
|
||||
"favorite": embedding_data.get("favorite", False),
|
||||
"update_available": bool(embedding_data.get("update_available", False)),
|
||||
"skip_metadata_refresh": bool(embedding_data.get("skip_metadata_refresh", False)),
|
||||
"civitai": self.filter_civitai_data(embedding_data.get("civitai", {}), minimal=True)
|
||||
}
|
||||
|
||||
|
||||
@@ -30,36 +30,36 @@ class LoraScanner(ModelScanner):
|
||||
|
||||
async def diagnose_hash_index(self):
|
||||
"""Diagnostic method to verify hash index functionality"""
|
||||
print("\n\n*** DIAGNOSING LORA HASH INDEX ***\n\n", file=sys.stderr)
|
||||
logger.debug("\n\n*** DIAGNOSING LORA HASH INDEX ***\n\n")
|
||||
|
||||
# First check if the hash index has any entries
|
||||
if hasattr(self, '_hash_index'):
|
||||
index_entries = len(self._hash_index._hash_to_path)
|
||||
print(f"Hash index has {index_entries} entries", file=sys.stderr)
|
||||
logger.debug(f"Hash index has {index_entries} entries")
|
||||
|
||||
# Print a few example entries if available
|
||||
if index_entries > 0:
|
||||
print("\nSample hash index entries:", file=sys.stderr)
|
||||
logger.debug("\nSample hash index entries:")
|
||||
count = 0
|
||||
for hash_val, path in self._hash_index._hash_to_path.items():
|
||||
if count < 5: # Just show the first 5
|
||||
print(f"Hash: {hash_val[:8]}... -> Path: {path}", file=sys.stderr)
|
||||
logger.debug(f"Hash: {hash_val[:8]}... -> Path: {path}")
|
||||
count += 1
|
||||
else:
|
||||
break
|
||||
else:
|
||||
print("Hash index not initialized", file=sys.stderr)
|
||||
logger.debug("Hash index not initialized")
|
||||
|
||||
# Try looking up by a known hash for testing
|
||||
if not hasattr(self, '_hash_index') or not self._hash_index._hash_to_path:
|
||||
print("No hash entries to test lookup with", file=sys.stderr)
|
||||
logger.debug("No hash entries to test lookup with")
|
||||
return
|
||||
|
||||
test_hash = next(iter(self._hash_index._hash_to_path.keys()))
|
||||
test_path = self._hash_index.get_path(test_hash)
|
||||
print(f"\nTest lookup by hash: {test_hash[:8]}... -> {test_path}", file=sys.stderr)
|
||||
logger.debug(f"\nTest lookup by hash: {test_hash[:8]}... -> {test_path}")
|
||||
|
||||
# Also test reverse lookup
|
||||
test_hash_result = self._hash_index.get_hash(test_path)
|
||||
print(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n", file=sys.stderr)
|
||||
logger.debug(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n")
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .base_model_service import BaseModelService
|
||||
from .model_query import resolve_sub_type
|
||||
from ..utils.models import LoraMetadata
|
||||
from ..config import config
|
||||
|
||||
@@ -23,6 +24,10 @@ class LoraService(BaseModelService):
|
||||
|
||||
async def format_response(self, lora_data: Dict) -> Dict:
|
||||
"""Format LoRA data for API response"""
|
||||
# Resolve sub_type using priority: sub_type > model_type > civitai.model.type > default
|
||||
# Normalize to lowercase for consistent API responses
|
||||
sub_type = resolve_sub_type(lora_data).lower()
|
||||
|
||||
return {
|
||||
"model_name": lora_data["model_name"],
|
||||
"file_name": lora_data["file_name"],
|
||||
@@ -43,6 +48,8 @@ class LoraService(BaseModelService):
|
||||
"notes": lora_data.get("notes", ""),
|
||||
"favorite": lora_data.get("favorite", False),
|
||||
"update_available": bool(lora_data.get("update_available", False)),
|
||||
"skip_metadata_refresh": bool(lora_data.get("skip_metadata_refresh", False)),
|
||||
"sub_type": sub_type,
|
||||
"civitai": self.filter_civitai_data(
|
||||
lora_data.get("civitai", {}), minimal=True
|
||||
),
|
||||
@@ -479,3 +486,49 @@ class LoraService(BaseModelService):
|
||||
]
|
||||
|
||||
return available_loras
|
||||
|
||||
async def get_cycler_list(
|
||||
self,
|
||||
pool_config: Optional[Dict] = None,
|
||||
sort_by: str = "filename"
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get filtered and sorted LoRA list for cycling.
|
||||
|
||||
Args:
|
||||
pool_config: Optional pool config for filtering (filters dict)
|
||||
sort_by: Sort field - 'filename' or 'model_name'
|
||||
|
||||
Returns:
|
||||
List of LoRA dicts with file_name and model_name
|
||||
"""
|
||||
# Get cached data
|
||||
cache = await self.scanner.get_cached_data(force_refresh=False)
|
||||
available_loras = cache.raw_data if cache else []
|
||||
|
||||
# Apply pool filters if provided
|
||||
if pool_config:
|
||||
available_loras = await self._apply_pool_filters(
|
||||
available_loras, pool_config
|
||||
)
|
||||
|
||||
# Sort by specified field
|
||||
if sort_by == "model_name":
|
||||
available_loras = sorted(
|
||||
available_loras,
|
||||
key=lambda x: (x.get("model_name") or x.get("file_name", "")).lower()
|
||||
)
|
||||
else: # Default to filename
|
||||
available_loras = sorted(
|
||||
available_loras,
|
||||
key=lambda x: x.get("file_name", "").lower()
|
||||
)
|
||||
|
||||
# Return minimal data needed for cycling
|
||||
return [
|
||||
{
|
||||
"file_name": lora["file_name"],
|
||||
"model_name": lora.get("model_name", lora["file_name"]),
|
||||
}
|
||||
for lora in available_loras
|
||||
]
|
||||
|
||||
@@ -44,6 +44,8 @@ async def initialize_metadata_providers():
|
||||
logger.debug(f"SQLite metadata provider registered with database: {db_path}")
|
||||
else:
|
||||
logger.warning("Metadata archive database is enabled but database file not found")
|
||||
logger.info("Automatically disabling enable_metadata_archive_db setting")
|
||||
settings_manager.set('enable_metadata_archive_db', False)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize SQLite metadata provider: {e}")
|
||||
|
||||
|
||||
@@ -243,17 +243,27 @@ class MetadataSyncService:
|
||||
last_error = error or last_error
|
||||
|
||||
if civitai_metadata is None or metadata_provider is None:
|
||||
# Track if we need to save metadata
|
||||
needs_save = False
|
||||
|
||||
if sqlite_attempted:
|
||||
model_data["db_checked"] = True
|
||||
needs_save = True
|
||||
|
||||
if civitai_api_not_found:
|
||||
model_data["from_civitai"] = False
|
||||
model_data["civitai_deleted"] = True
|
||||
model_data["db_checked"] = sqlite_attempted or (enable_archive and model_data.get("db_checked", False))
|
||||
model_data["last_checked_at"] = datetime.now().timestamp()
|
||||
needs_save = True
|
||||
|
||||
# Save metadata if any state was updated
|
||||
if needs_save:
|
||||
data_to_save = model_data.copy()
|
||||
data_to_save.pop("folder", None)
|
||||
# Update last_checked_at for sqlite-only attempts if not already set
|
||||
if "last_checked_at" not in data_to_save:
|
||||
data_to_save["last_checked_at"] = datetime.now().timestamp()
|
||||
await self._metadata_manager.save_metadata(file_path, data_to_save)
|
||||
|
||||
default_error = (
|
||||
|
||||
@@ -5,7 +5,6 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from operator import itemgetter
|
||||
from natsort import natsorted
|
||||
|
||||
# Supported sort modes: (sort_key, order)
|
||||
@@ -229,17 +228,17 @@ class ModelCache:
|
||||
reverse=reverse
|
||||
)
|
||||
elif sort_key == 'date':
|
||||
# Sort by modified timestamp
|
||||
# Sort by modified timestamp (use .get() with default to handle missing fields)
|
||||
result = sorted(
|
||||
data,
|
||||
key=itemgetter('modified'),
|
||||
key=lambda x: x.get('modified', 0.0),
|
||||
reverse=reverse
|
||||
)
|
||||
elif sort_key == 'size':
|
||||
# Sort by file size
|
||||
# Sort by file size (use .get() with default to handle missing fields)
|
||||
result = sorted(
|
||||
data,
|
||||
key=itemgetter('size'),
|
||||
key=lambda x: x.get('size', 0),
|
||||
reverse=reverse
|
||||
)
|
||||
elif sort_key == 'usage':
|
||||
|
||||
@@ -676,10 +676,12 @@ class ModelMetadataProviderManager:
|
||||
|
||||
def _get_provider(self, provider_name: str = None) -> ModelMetadataProvider:
|
||||
"""Get provider by name or default provider"""
|
||||
if provider_name and provider_name in self.providers:
|
||||
if provider_name:
|
||||
if provider_name not in self.providers:
|
||||
raise ValueError(f"Provider '{provider_name}' is not registered")
|
||||
return self.providers[provider_name]
|
||||
|
||||
|
||||
if self.default_provider is None:
|
||||
raise ValueError("No default provider set and no valid provider specified")
|
||||
|
||||
|
||||
return self.providers[self.default_provider]
|
||||
|
||||
@@ -33,28 +33,42 @@ def _coerce_to_str(value: Any) -> Optional[str]:
|
||||
return candidate if candidate else None
|
||||
|
||||
|
||||
def normalize_civitai_model_type(value: Any) -> Optional[str]:
|
||||
"""Return a lowercase string suitable for comparisons."""
|
||||
def normalize_sub_type(value: Any) -> Optional[str]:
|
||||
"""Return a lowercase string suitable for sub_type comparisons."""
|
||||
candidate = _coerce_to_str(value)
|
||||
return candidate.lower() if candidate else None
|
||||
|
||||
|
||||
def resolve_civitai_model_type(entry: Mapping[str, Any]) -> str:
|
||||
"""Extract the model type from CivitAI metadata, defaulting to LORA."""
|
||||
def resolve_sub_type(entry: Mapping[str, Any]) -> str:
|
||||
"""Extract the sub-type from metadata, checking multiple sources.
|
||||
|
||||
Priority:
|
||||
1. entry['sub_type'] - new canonical field
|
||||
2. entry['model_type'] - backward compatibility
|
||||
3. civitai.model.type - CivitAI API data
|
||||
4. DEFAULT_CIVITAI_MODEL_TYPE - fallback
|
||||
"""
|
||||
if not isinstance(entry, Mapping):
|
||||
return DEFAULT_CIVITAI_MODEL_TYPE
|
||||
|
||||
# Priority 1: Check new canonical field 'sub_type'
|
||||
sub_type = _coerce_to_str(entry.get("sub_type"))
|
||||
if sub_type:
|
||||
return sub_type
|
||||
|
||||
# Priority 2: Backward compatibility - check 'model_type' field
|
||||
model_type = _coerce_to_str(entry.get("model_type"))
|
||||
if model_type:
|
||||
return model_type
|
||||
|
||||
# Priority 3: Extract from CivitAI metadata
|
||||
civitai = entry.get("civitai")
|
||||
if isinstance(civitai, Mapping):
|
||||
civitai_model = civitai.get("model")
|
||||
if isinstance(civitai_model, Mapping):
|
||||
model_type = _coerce_to_str(civitai_model.get("type"))
|
||||
if model_type:
|
||||
return model_type
|
||||
|
||||
model_type = _coerce_to_str(entry.get("model_type"))
|
||||
if model_type:
|
||||
return model_type
|
||||
civitai_type = _coerce_to_str(civitai_model.get("type"))
|
||||
if civitai_type:
|
||||
return civitai_type
|
||||
|
||||
return DEFAULT_CIVITAI_MODEL_TYPE
|
||||
|
||||
@@ -85,6 +99,7 @@ class FilterCriteria:
|
||||
favorites_only: bool = False
|
||||
search_options: Optional[Dict[str, Any]] = None
|
||||
model_types: Optional[Sequence[str]] = None
|
||||
tag_logic: str = "any" # "any" (OR) or "all" (AND)
|
||||
|
||||
|
||||
class ModelCacheRepository:
|
||||
@@ -286,11 +301,29 @@ class ModelFilterSet:
|
||||
include_tags = {tag for tag in tag_filters if tag}
|
||||
|
||||
if include_tags:
|
||||
tag_logic = criteria.tag_logic.lower() if criteria.tag_logic else "any"
|
||||
|
||||
def matches_include(item_tags):
|
||||
if not item_tags and "__no_tags__" in include_tags:
|
||||
return True
|
||||
return any(tag in include_tags for tag in (item_tags or []))
|
||||
if tag_logic == "all":
|
||||
# AND logic: item must have ALL include tags
|
||||
# Special case: __no_tags__ is handled separately
|
||||
non_special_tags = include_tags - {"__no_tags__"}
|
||||
if "__no_tags__" in include_tags:
|
||||
# If __no_tags__ is selected along with other tags,
|
||||
# treat it as "no tags OR (all other tags)"
|
||||
if not item_tags:
|
||||
return True
|
||||
# Otherwise, check if all non-special tags match
|
||||
if non_special_tags:
|
||||
return all(tag in (item_tags or []) for tag in non_special_tags)
|
||||
return True
|
||||
# Normal case: all tags must match
|
||||
return all(tag in (item_tags or []) for tag in non_special_tags)
|
||||
else:
|
||||
# OR logic (default): item must have ANY include tag
|
||||
return any(tag in include_tags for tag in (item_tags or []))
|
||||
|
||||
items = [item for item in items if matches_include(item.get("tags"))]
|
||||
|
||||
@@ -313,7 +346,7 @@ class ModelFilterSet:
|
||||
normalized_model_types = {
|
||||
model_type
|
||||
for model_type in (
|
||||
normalize_civitai_model_type(value) for value in model_types
|
||||
normalize_sub_type(value) for value in model_types
|
||||
)
|
||||
if model_type
|
||||
}
|
||||
@@ -321,7 +354,7 @@ class ModelFilterSet:
|
||||
items = [
|
||||
item
|
||||
for item in items
|
||||
if normalize_civitai_model_type(resolve_civitai_model_type(item))
|
||||
if normalize_sub_type(resolve_sub_type(item))
|
||||
in normalized_model_types
|
||||
]
|
||||
model_types_duration = time.perf_counter() - t0
|
||||
|
||||
@@ -20,6 +20,8 @@ from .service_registry import ServiceRegistry
|
||||
from .websocket_manager import ws_manager
|
||||
from .persistent_model_cache import get_persistent_cache
|
||||
from .settings_manager import get_settings_manager
|
||||
from .cache_entry_validator import CacheEntryValidator
|
||||
from .cache_health_monitor import CacheHealthMonitor, CacheHealthStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -246,6 +248,7 @@ class ModelScanner:
|
||||
'tags': tags_list,
|
||||
'civitai': civitai_slim,
|
||||
'civitai_deleted': bool(get_value('civitai_deleted', False)),
|
||||
'skip_metadata_refresh': bool(get_value('skip_metadata_refresh', False)),
|
||||
}
|
||||
|
||||
license_source: Dict[str, Any] = {}
|
||||
@@ -275,9 +278,10 @@ class ModelScanner:
|
||||
_, license_flags = resolve_license_info(license_source or {})
|
||||
entry['license_flags'] = license_flags
|
||||
|
||||
model_type = get_value('model_type', None)
|
||||
if model_type:
|
||||
entry['model_type'] = model_type
|
||||
# Handle sub_type (new canonical field)
|
||||
sub_type = get_value('sub_type', None)
|
||||
if sub_type:
|
||||
entry['sub_type'] = sub_type
|
||||
|
||||
return entry
|
||||
|
||||
@@ -467,6 +471,39 @@ class ModelScanner:
|
||||
for tag in adjusted_item.get('tags') or []:
|
||||
tags_count[tag] = tags_count.get(tag, 0) + 1
|
||||
|
||||
# Validate cache entries and check health
|
||||
valid_entries, invalid_entries = CacheEntryValidator.validate_batch(
|
||||
adjusted_raw_data, auto_repair=True
|
||||
)
|
||||
|
||||
if invalid_entries:
|
||||
monitor = CacheHealthMonitor()
|
||||
report = monitor.check_health(adjusted_raw_data, auto_repair=True)
|
||||
|
||||
if report.status != CacheHealthStatus.HEALTHY:
|
||||
# Broadcast health warning to frontend
|
||||
await ws_manager.broadcast_cache_health_warning(report, page_type)
|
||||
logger.warning(
|
||||
f"{self.model_type.capitalize()} Scanner: Cache health issue detected - "
|
||||
f"{report.invalid_entries} invalid entries, {report.repaired_entries} repaired"
|
||||
)
|
||||
|
||||
# Use only valid entries
|
||||
adjusted_raw_data = valid_entries
|
||||
|
||||
# Rebuild tags count from valid entries only
|
||||
tags_count = {}
|
||||
for item in adjusted_raw_data:
|
||||
for tag in item.get('tags') or []:
|
||||
tags_count[tag] = tags_count.get(tag, 0) + 1
|
||||
|
||||
# Remove invalid entries from hash index
|
||||
for invalid_entry in invalid_entries:
|
||||
file_path = CacheEntryValidator.get_file_path_safe(invalid_entry)
|
||||
sha256 = CacheEntryValidator.get_sha256_safe(invalid_entry)
|
||||
if file_path:
|
||||
hash_index.remove_by_path(file_path, sha256)
|
||||
|
||||
scan_result = CacheBuildResult(
|
||||
raw_data=adjusted_raw_data,
|
||||
hash_index=hash_index,
|
||||
@@ -650,7 +687,6 @@ class ModelScanner:
|
||||
|
||||
async def _initialize_cache(self) -> None:
|
||||
"""Initialize or refresh the cache"""
|
||||
print("init start", flush=True)
|
||||
self._is_initializing = True # Set flag
|
||||
try:
|
||||
start_time = time.time()
|
||||
@@ -664,7 +700,6 @@ class ModelScanner:
|
||||
scan_result = await self._gather_model_data()
|
||||
await self._apply_scan_result(scan_result)
|
||||
await self._save_persistent_cache(scan_result)
|
||||
print("init end", flush=True)
|
||||
|
||||
logger.info(
|
||||
f"{self.model_type.capitalize()} Scanner: Cache initialization completed in {time.time() - start_time:.2f} seconds, "
|
||||
@@ -775,6 +810,18 @@ class ModelScanner:
|
||||
model_data = self.adjust_cached_entry(dict(model_data))
|
||||
if not model_data:
|
||||
continue
|
||||
|
||||
# Validate the new entry before adding
|
||||
validation_result = CacheEntryValidator.validate(
|
||||
model_data, auto_repair=True
|
||||
)
|
||||
if not validation_result.is_valid:
|
||||
logger.warning(
|
||||
f"Skipping invalid entry during reconcile: {path}"
|
||||
)
|
||||
continue
|
||||
model_data = validation_result.entry
|
||||
|
||||
self._ensure_license_flags(model_data)
|
||||
# Add to cache
|
||||
self._cache.raw_data.append(model_data)
|
||||
@@ -1089,6 +1136,17 @@ class ModelScanner:
|
||||
processed_files += 1
|
||||
|
||||
if result:
|
||||
# Validate the entry before adding
|
||||
validation_result = CacheEntryValidator.validate(
|
||||
result, auto_repair=True
|
||||
)
|
||||
if not validation_result.is_valid:
|
||||
logger.warning(
|
||||
f"Skipping invalid scan result: {file_path}"
|
||||
)
|
||||
continue
|
||||
result = validation_result.entry
|
||||
|
||||
self._ensure_license_flags(result)
|
||||
raw_data.append(result)
|
||||
|
||||
@@ -1390,7 +1448,7 @@ class ModelScanner:
|
||||
return None
|
||||
|
||||
async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
"""Get top tags sorted by count"""
|
||||
"""Get top tags sorted by count. If limit is 0, return all tags."""
|
||||
await self.get_cached_data()
|
||||
|
||||
sorted_tags = sorted(
|
||||
@@ -1399,6 +1457,8 @@ class ModelScanner:
|
||||
reverse=True
|
||||
)
|
||||
|
||||
if limit == 0:
|
||||
return sorted_tags
|
||||
return sorted_tags[:limit]
|
||||
|
||||
async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]:
|
||||
|
||||
@@ -7,7 +7,8 @@ import os
|
||||
import sqlite3
|
||||
import time
|
||||
from dataclasses import dataclass, replace
|
||||
from typing import Dict, Iterable, List, Mapping, Optional, Sequence
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, Iterable, List, Mapping, Optional, Sequence
|
||||
|
||||
from .errors import RateLimitError, ResourceNotFoundError
|
||||
from .settings_manager import get_settings_manager
|
||||
@@ -64,7 +65,9 @@ class ModelVersionRecord:
|
||||
preview_url: Optional[str]
|
||||
is_in_library: bool
|
||||
should_ignore: bool
|
||||
early_access_ends_at: Optional[str] = None
|
||||
sort_index: int = 0
|
||||
is_early_access: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -97,8 +100,12 @@ class ModelUpdateRecord:
|
||||
|
||||
return [version.version_id for version in self.versions if version.is_in_library]
|
||||
|
||||
def has_update(self) -> bool:
|
||||
"""Return True when a non-ignored remote version newer than the newest local copy is available."""
|
||||
def has_update(self, hide_early_access: bool = False) -> bool:
|
||||
"""Return True when a non-ignored remote version newer than the newest local copy is available.
|
||||
|
||||
Args:
|
||||
hide_early_access: If True, exclude early access versions from update check.
|
||||
"""
|
||||
|
||||
if self.should_ignore_model:
|
||||
return False
|
||||
@@ -110,22 +117,56 @@ class ModelUpdateRecord:
|
||||
|
||||
if max_in_library is None:
|
||||
return any(
|
||||
not version.is_in_library and not version.should_ignore for version in self.versions
|
||||
not version.is_in_library
|
||||
and not version.should_ignore
|
||||
and not (hide_early_access and ModelUpdateRecord._is_early_access_active(version))
|
||||
for version in self.versions
|
||||
)
|
||||
|
||||
for version in self.versions:
|
||||
if version.is_in_library or version.should_ignore:
|
||||
continue
|
||||
if hide_early_access and ModelUpdateRecord._is_early_access_active(version):
|
||||
continue
|
||||
if version.version_id > max_in_library:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _is_early_access_active(version: ModelVersionRecord) -> bool:
|
||||
"""Check if a version is currently in early access period.
|
||||
|
||||
Uses two-phase detection:
|
||||
1. If exact EA end time available (from single version API), use it for precise check
|
||||
2. Otherwise fallback to basic EA flag (from bulk API)
|
||||
"""
|
||||
# Phase 2: Precise check with exact end time
|
||||
if version.early_access_ends_at:
|
||||
try:
|
||||
ea_date = datetime.fromisoformat(
|
||||
version.early_access_ends_at.replace("Z", "+00:00")
|
||||
)
|
||||
return ea_date > datetime.now(timezone.utc)
|
||||
except (ValueError, AttributeError):
|
||||
# If date parsing fails, treat as active EA (conservative)
|
||||
return True
|
||||
|
||||
# Phase 1: Basic EA flag from bulk API
|
||||
return version.is_early_access
|
||||
|
||||
def has_update_for_base(
|
||||
self,
|
||||
local_version_id: Optional[int],
|
||||
local_base_model: Optional[str],
|
||||
hide_early_access: bool = False,
|
||||
) -> bool:
|
||||
"""Return True when a newer remote version with the same base model exists."""
|
||||
"""Return True when a newer remote version with the same base model exists.
|
||||
|
||||
Args:
|
||||
local_version_id: The current local version id.
|
||||
local_base_model: The base model to filter by.
|
||||
hide_early_access: If True, exclude early access versions from update check.
|
||||
"""
|
||||
|
||||
if self.should_ignore_model:
|
||||
return False
|
||||
@@ -153,6 +194,8 @@ class ModelUpdateRecord:
|
||||
for version in self.versions:
|
||||
if version.is_in_library or version.should_ignore:
|
||||
continue
|
||||
if hide_early_access and ModelUpdateRecord._is_early_access_active(version):
|
||||
continue
|
||||
version_base = _normalize_base_model(version.base_model)
|
||||
if version_base != normalized_base:
|
||||
continue
|
||||
@@ -268,6 +311,14 @@ class ModelUpdateService:
|
||||
"ALTER TABLE model_update_versions "
|
||||
"ADD COLUMN should_ignore INTEGER NOT NULL DEFAULT 0"
|
||||
),
|
||||
"early_access_ends_at": (
|
||||
"ALTER TABLE model_update_versions "
|
||||
"ADD COLUMN early_access_ends_at TEXT"
|
||||
),
|
||||
"is_early_access": (
|
||||
"ALTER TABLE model_update_versions "
|
||||
"ADD COLUMN is_early_access INTEGER NOT NULL DEFAULT 0"
|
||||
),
|
||||
}
|
||||
|
||||
for column, statement in migrations.items():
|
||||
@@ -367,6 +418,8 @@ class ModelUpdateService:
|
||||
preview_url TEXT,
|
||||
is_in_library INTEGER NOT NULL DEFAULT 0,
|
||||
should_ignore INTEGER NOT NULL DEFAULT 0,
|
||||
early_access_ends_at TEXT,
|
||||
is_early_access INTEGER NOT NULL DEFAULT 0,
|
||||
PRIMARY KEY (model_id, version_id),
|
||||
FOREIGN KEY(model_id) REFERENCES model_update_status(model_id) ON DELETE CASCADE
|
||||
)
|
||||
@@ -384,6 +437,8 @@ class ModelUpdateService:
|
||||
"preview_url",
|
||||
"is_in_library",
|
||||
"should_ignore",
|
||||
"early_access_ends_at",
|
||||
"is_early_access",
|
||||
]
|
||||
defaults = {
|
||||
"sort_index": "0",
|
||||
@@ -394,6 +449,8 @@ class ModelUpdateService:
|
||||
"preview_url": "NULL",
|
||||
"is_in_library": "0",
|
||||
"should_ignore": "0",
|
||||
"early_access_ends_at": "NULL",
|
||||
"is_early_access": "0",
|
||||
}
|
||||
|
||||
select_parts = []
|
||||
@@ -667,6 +724,8 @@ class ModelUpdateService:
|
||||
is_in_library=False,
|
||||
should_ignore=should_ignore,
|
||||
sort_index=len(versions),
|
||||
early_access_ends_at=None,
|
||||
is_early_access=False,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -686,16 +745,17 @@ class ModelUpdateService:
|
||||
async with self._lock:
|
||||
return self._get_record(model_type, model_id)
|
||||
|
||||
async def has_update(self, model_type: str, model_id: int) -> bool:
|
||||
async def has_update(self, model_type: str, model_id: int, hide_early_access: bool = False) -> bool:
|
||||
"""Determine if a model has updates pending."""
|
||||
|
||||
record = await self.get_record(model_type, model_id)
|
||||
return record.has_update() if record else False
|
||||
return record.has_update(hide_early_access=hide_early_access) if record else False
|
||||
|
||||
async def has_updates_bulk(
|
||||
self,
|
||||
model_type: str,
|
||||
model_ids: Sequence[int],
|
||||
hide_early_access: bool = False,
|
||||
) -> Dict[int, bool]:
|
||||
"""Return update availability for each model id in a single database pass."""
|
||||
|
||||
@@ -707,7 +767,7 @@ class ModelUpdateService:
|
||||
records = self._get_records_bulk(model_type, normalized_ids)
|
||||
|
||||
return {
|
||||
model_id: records.get(model_id).has_update() if records.get(model_id) else False
|
||||
model_id: records.get(model_id).has_update(hide_early_access=hide_early_access) if records.get(model_id) else False
|
||||
for model_id in normalized_ids
|
||||
}
|
||||
|
||||
@@ -987,6 +1047,8 @@ class ModelUpdateService:
|
||||
is_in_library=True,
|
||||
should_ignore=ignore_map.get(missing_id, False),
|
||||
sort_index=len(versions),
|
||||
early_access_ends_at=None,
|
||||
is_early_access=False,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1029,6 +1091,8 @@ class ModelUpdateService:
|
||||
is_in_library=version_id in local_set,
|
||||
should_ignore=ignore_map.get(version_id, remote_version.should_ignore),
|
||||
sort_index=sort_map.get(version_id, index),
|
||||
early_access_ends_at=remote_version.early_access_ends_at,
|
||||
is_early_access=remote_version.is_early_access,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1055,6 +1119,8 @@ class ModelUpdateService:
|
||||
is_in_library=True,
|
||||
should_ignore=ignore_map.get(version_id, False),
|
||||
sort_index=len(versions),
|
||||
early_access_ends_at=None,
|
||||
is_early_access=False,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1120,6 +1186,11 @@ class ModelUpdateService:
|
||||
released_at = _normalize_string(entry.get("publishedAt") or entry.get("createdAt"))
|
||||
size_bytes = self._extract_size_bytes(entry.get("files"))
|
||||
preview_url = self._extract_preview_url(entry.get("images"))
|
||||
early_access_ends_at = _normalize_string(entry.get("earlyAccessEndsAt"))
|
||||
|
||||
# Check availability field from bulk API for basic EA detection
|
||||
availability = _normalize_string(entry.get("availability"))
|
||||
is_early_access = availability == "EarlyAccess"
|
||||
|
||||
return ModelVersionRecord(
|
||||
version_id=version_id,
|
||||
@@ -1130,7 +1201,9 @@ class ModelUpdateService:
|
||||
preview_url=preview_url,
|
||||
is_in_library=False,
|
||||
should_ignore=False,
|
||||
early_access_ends_at=early_access_ends_at,
|
||||
sort_index=index,
|
||||
is_early_access=is_early_access,
|
||||
)
|
||||
|
||||
def _extract_size_bytes(self, files) -> Optional[int]:
|
||||
@@ -1231,7 +1304,8 @@ class ModelUpdateService:
|
||||
version_rows = conn.execute(
|
||||
f"""
|
||||
SELECT model_id, version_id, sort_index, name, base_model, released_at,
|
||||
size_bytes, preview_url, is_in_library, should_ignore
|
||||
size_bytes, preview_url, is_in_library, should_ignore, early_access_ends_at,
|
||||
is_early_access
|
||||
FROM model_update_versions
|
||||
WHERE model_id IN ({placeholders})
|
||||
ORDER BY model_id ASC, sort_index ASC, version_id ASC
|
||||
@@ -1252,7 +1326,9 @@ class ModelUpdateService:
|
||||
preview_url=row["preview_url"],
|
||||
is_in_library=bool(row["is_in_library"]),
|
||||
should_ignore=bool(row["should_ignore"]),
|
||||
early_access_ends_at=row["early_access_ends_at"],
|
||||
sort_index=_normalize_int(row["sort_index"]) or 0,
|
||||
is_early_access=bool(row["is_early_access"]),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1308,8 +1384,9 @@ class ModelUpdateService:
|
||||
"""
|
||||
INSERT INTO model_update_versions (
|
||||
version_id, model_id, sort_index, name, base_model, released_at,
|
||||
size_bytes, preview_url, is_in_library, should_ignore
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
size_bytes, preview_url, is_in_library, should_ignore, early_access_ends_at,
|
||||
is_early_access
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
version.version_id,
|
||||
@@ -1322,6 +1399,8 @@ class ModelUpdateService:
|
||||
version.preview_url,
|
||||
1 if version.is_in_library else 0,
|
||||
1 if version.should_ignore else 0,
|
||||
version.early_access_ends_at,
|
||||
1 if version.is_early_access else 0,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sqlite3
|
||||
import threading
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Mapping, Optional, Sequence, Tuple
|
||||
|
||||
from ..utils.settings_paths import get_project_root, get_settings_dir
|
||||
from ..utils.cache_paths import CacheType, resolve_cache_path_with_migration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -53,6 +52,7 @@ class PersistentModelCache:
|
||||
"trained_words",
|
||||
"license_flags",
|
||||
"civitai_deleted",
|
||||
"skip_metadata_refresh",
|
||||
"exclude",
|
||||
"db_checked",
|
||||
"last_checked_at",
|
||||
@@ -184,6 +184,7 @@ class PersistentModelCache:
|
||||
"tags": tags.get(file_path, []),
|
||||
"civitai": civitai,
|
||||
"civitai_deleted": bool(row["civitai_deleted"]),
|
||||
"skip_metadata_refresh": bool(row["skip_metadata_refresh"]),
|
||||
"license_flags": int(license_value),
|
||||
}
|
||||
raw_data.append(item)
|
||||
@@ -404,20 +405,12 @@ class PersistentModelCache:
|
||||
# Internal helpers -------------------------------------------------
|
||||
|
||||
def _resolve_default_path(self, library_name: str) -> str:
|
||||
override = os.environ.get("LORA_MANAGER_CACHE_DB")
|
||||
if override:
|
||||
return override
|
||||
try:
|
||||
settings_dir = get_settings_dir(create=True)
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
logger.warning("Falling back to project directory for cache: %s", exc)
|
||||
settings_dir = get_project_root()
|
||||
safe_name = re.sub(r"[^A-Za-z0-9_.-]", "_", library_name or "default")
|
||||
if safe_name.lower() in ("default", ""):
|
||||
legacy_path = os.path.join(settings_dir, self._DEFAULT_FILENAME)
|
||||
if os.path.exists(legacy_path):
|
||||
return legacy_path
|
||||
return os.path.join(settings_dir, "model_cache", f"{safe_name}.sqlite")
|
||||
env_override = os.environ.get("LORA_MANAGER_CACHE_DB")
|
||||
return resolve_cache_path_with_migration(
|
||||
CacheType.MODEL,
|
||||
library_name=library_name,
|
||||
env_override=env_override,
|
||||
)
|
||||
|
||||
def _initialize_schema(self) -> None:
|
||||
with self._db_lock:
|
||||
@@ -500,6 +493,7 @@ class PersistentModelCache:
|
||||
"civitai_creator_username": "TEXT",
|
||||
"civitai_model_type": "TEXT",
|
||||
"civitai_deleted": "INTEGER DEFAULT 0",
|
||||
"skip_metadata_refresh": "INTEGER DEFAULT 0",
|
||||
# Persisting without explicit flags should assume CivitAI's documented defaults (0b111001 == 57).
|
||||
"license_flags": f"INTEGER DEFAULT {DEFAULT_LICENSE_FLAGS}",
|
||||
}
|
||||
@@ -572,6 +566,7 @@ class PersistentModelCache:
|
||||
trained_words_json,
|
||||
int(license_flags),
|
||||
1 if item.get("civitai_deleted") else 0,
|
||||
1 if item.get("skip_metadata_refresh") else 0,
|
||||
1 if item.get("exclude") else 0,
|
||||
1 if item.get("db_checked") else 0,
|
||||
float(item.get("last_checked_at") or 0.0),
|
||||
|
||||
484
py/services/persistent_recipe_cache.py
Normal file
484
py/services/persistent_recipe_cache.py
Normal file
@@ -0,0 +1,484 @@
|
||||
"""SQLite-based persistent cache for recipe metadata.
|
||||
|
||||
This module provides fast recipe cache persistence using SQLite, enabling
|
||||
quick startup by loading from cache instead of walking directories and
|
||||
parsing JSON files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sqlite3
|
||||
import threading
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Set, Tuple
|
||||
|
||||
from ..utils.cache_paths import CacheType, resolve_cache_path_with_migration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PersistedRecipeData:
|
||||
"""Lightweight structure returned by the persistent recipe cache."""
|
||||
|
||||
raw_data: List[Dict]
|
||||
file_stats: Dict[str, Tuple[float, int]] # json_path -> (mtime, size)
|
||||
|
||||
|
||||
class PersistentRecipeCache:
|
||||
"""Persist recipe metadata in SQLite for fast startup."""
|
||||
|
||||
_DEFAULT_FILENAME = "recipe_cache.sqlite"
|
||||
_RECIPE_COLUMNS: Tuple[str, ...] = (
|
||||
"recipe_id",
|
||||
"file_path",
|
||||
"json_path",
|
||||
"title",
|
||||
"folder",
|
||||
"base_model",
|
||||
"fingerprint",
|
||||
"created_date",
|
||||
"modified",
|
||||
"file_mtime",
|
||||
"file_size",
|
||||
"favorite",
|
||||
"repair_version",
|
||||
"preview_nsfw_level",
|
||||
"loras_json",
|
||||
"checkpoint_json",
|
||||
"gen_params_json",
|
||||
"tags_json",
|
||||
)
|
||||
_instances: Dict[str, "PersistentRecipeCache"] = {}
|
||||
_instance_lock = threading.Lock()
|
||||
|
||||
def __init__(self, library_name: str = "default", db_path: Optional[str] = None) -> None:
|
||||
self._library_name = library_name or "default"
|
||||
self._db_path = db_path or self._resolve_default_path(self._library_name)
|
||||
self._db_lock = threading.Lock()
|
||||
self._schema_initialized = False
|
||||
try:
|
||||
directory = os.path.dirname(self._db_path)
|
||||
if directory:
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
except Exception as exc:
|
||||
logger.warning("Could not create recipe cache directory %s: %s", directory, exc)
|
||||
if self.is_enabled():
|
||||
self._initialize_schema()
|
||||
|
||||
@classmethod
|
||||
def get_default(cls, library_name: Optional[str] = None) -> "PersistentRecipeCache":
|
||||
name = library_name or "default"
|
||||
with cls._instance_lock:
|
||||
if name not in cls._instances:
|
||||
cls._instances[name] = cls(name)
|
||||
return cls._instances[name]
|
||||
|
||||
@classmethod
|
||||
def clear_instances(cls) -> None:
|
||||
"""Clear all cached instances (useful for library switching)."""
|
||||
with cls._instance_lock:
|
||||
cls._instances.clear()
|
||||
|
||||
def is_enabled(self) -> bool:
|
||||
return os.environ.get("LORA_MANAGER_DISABLE_PERSISTENT_CACHE", "0") != "1"
|
||||
|
||||
def get_database_path(self) -> str:
|
||||
"""Expose the resolved SQLite database path."""
|
||||
return self._db_path
|
||||
|
||||
def load_cache(self) -> Optional[PersistedRecipeData]:
|
||||
"""Load all cached recipes from SQLite.
|
||||
|
||||
Returns:
|
||||
PersistedRecipeData with raw_data and file_stats if cache exists,
|
||||
None if cache is empty or unavailable.
|
||||
"""
|
||||
if not self.is_enabled():
|
||||
return None
|
||||
if not self._schema_initialized:
|
||||
self._initialize_schema()
|
||||
if not self._schema_initialized:
|
||||
return None
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
# Load all recipes
|
||||
columns_sql = ", ".join(self._RECIPE_COLUMNS)
|
||||
rows = conn.execute(f"SELECT {columns_sql} FROM recipes").fetchall()
|
||||
|
||||
if not rows:
|
||||
return None
|
||||
|
||||
finally:
|
||||
conn.close()
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to load persisted recipe cache: %s", exc)
|
||||
return None
|
||||
|
||||
raw_data: List[Dict] = []
|
||||
file_stats: Dict[str, Tuple[float, int]] = {}
|
||||
|
||||
for row in rows:
|
||||
recipe = self._row_to_recipe(row)
|
||||
raw_data.append(recipe)
|
||||
|
||||
json_path = row["json_path"]
|
||||
if json_path:
|
||||
file_stats[json_path] = (
|
||||
row["file_mtime"] or 0.0,
|
||||
row["file_size"] or 0,
|
||||
)
|
||||
|
||||
return PersistedRecipeData(raw_data=raw_data, file_stats=file_stats)
|
||||
|
||||
def save_cache(self, recipes: List[Dict], json_paths: Optional[Dict[str, str]] = None) -> None:
|
||||
"""Save all recipes to SQLite cache.
|
||||
|
||||
Args:
|
||||
recipes: List of recipe dictionaries to persist.
|
||||
json_paths: Optional mapping of recipe_id -> json_path for file stats.
|
||||
"""
|
||||
if not self.is_enabled():
|
||||
return
|
||||
if not self._schema_initialized:
|
||||
self._initialize_schema()
|
||||
if not self._schema_initialized:
|
||||
return
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect()
|
||||
try:
|
||||
conn.execute("PRAGMA foreign_keys = ON")
|
||||
conn.execute("BEGIN")
|
||||
|
||||
# Clear existing data
|
||||
conn.execute("DELETE FROM recipes")
|
||||
|
||||
# Prepare and insert all rows
|
||||
recipe_rows = []
|
||||
for recipe in recipes:
|
||||
recipe_id = str(recipe.get("id", ""))
|
||||
if not recipe_id:
|
||||
continue
|
||||
|
||||
json_path = ""
|
||||
if json_paths:
|
||||
json_path = json_paths.get(recipe_id, "")
|
||||
|
||||
row = self._prepare_recipe_row(recipe, json_path)
|
||||
recipe_rows.append(row)
|
||||
|
||||
if recipe_rows:
|
||||
placeholders = ", ".join(["?"] * len(self._RECIPE_COLUMNS))
|
||||
columns = ", ".join(self._RECIPE_COLUMNS)
|
||||
conn.executemany(
|
||||
f"INSERT INTO recipes ({columns}) VALUES ({placeholders})",
|
||||
recipe_rows,
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
logger.debug("Persisted %d recipes to cache", len(recipe_rows))
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to persist recipe cache: %s", exc)
|
||||
|
||||
def get_file_stats(self) -> Dict[str, Tuple[float, int]]:
|
||||
"""Return stored file stats for all cached recipes.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping json_path -> (mtime, size).
|
||||
"""
|
||||
if not self.is_enabled() or not self._schema_initialized:
|
||||
return {}
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
rows = conn.execute(
|
||||
"SELECT json_path, file_mtime, file_size FROM recipes WHERE json_path IS NOT NULL"
|
||||
).fetchall()
|
||||
return {
|
||||
row["json_path"]: (row["file_mtime"] or 0.0, row["file_size"] or 0)
|
||||
for row in rows
|
||||
if row["json_path"]
|
||||
}
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def update_recipe(self, recipe: Dict, json_path: Optional[str] = None) -> None:
|
||||
"""Update or insert a single recipe in the cache.
|
||||
|
||||
Args:
|
||||
recipe: The recipe dictionary to persist.
|
||||
json_path: Optional path to the recipe JSON file.
|
||||
"""
|
||||
if not self.is_enabled() or not self._schema_initialized:
|
||||
return
|
||||
|
||||
recipe_id = str(recipe.get("id", ""))
|
||||
if not recipe_id:
|
||||
return
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect()
|
||||
try:
|
||||
row = self._prepare_recipe_row(recipe, json_path or "")
|
||||
placeholders = ", ".join(["?"] * len(self._RECIPE_COLUMNS))
|
||||
columns = ", ".join(self._RECIPE_COLUMNS)
|
||||
conn.execute(
|
||||
f"INSERT OR REPLACE INTO recipes ({columns}) VALUES ({placeholders})",
|
||||
row,
|
||||
)
|
||||
conn.commit()
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to update recipe %s in cache: %s", recipe_id, exc)
|
||||
|
||||
def remove_recipe(self, recipe_id: str) -> None:
|
||||
"""Remove a recipe from the cache by ID.
|
||||
|
||||
Args:
|
||||
recipe_id: The ID of the recipe to remove.
|
||||
"""
|
||||
if not self.is_enabled() or not self._schema_initialized:
|
||||
return
|
||||
|
||||
if not recipe_id:
|
||||
return
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect()
|
||||
try:
|
||||
conn.execute("DELETE FROM recipes WHERE recipe_id = ?", (str(recipe_id),))
|
||||
conn.commit()
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to remove recipe %s from cache: %s", recipe_id, exc)
|
||||
|
||||
def get_indexed_recipe_ids(self) -> Set[str]:
|
||||
"""Return all recipe IDs in the cache.
|
||||
|
||||
Returns:
|
||||
Set of recipe ID strings.
|
||||
"""
|
||||
if not self.is_enabled() or not self._schema_initialized:
|
||||
return set()
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
rows = conn.execute("SELECT recipe_id FROM recipes").fetchall()
|
||||
return {row["recipe_id"] for row in rows if row["recipe_id"]}
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception:
|
||||
return set()
|
||||
|
||||
def get_recipe_count(self) -> int:
|
||||
"""Return the number of recipes in the cache."""
|
||||
if not self.is_enabled() or not self._schema_initialized:
|
||||
return 0
|
||||
|
||||
try:
|
||||
with self._db_lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
result = conn.execute("SELECT COUNT(*) FROM recipes").fetchone()
|
||||
return result[0] if result else 0
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
# Internal helpers
|
||||
|
||||
def _resolve_default_path(self, library_name: str) -> str:
|
||||
env_override = os.environ.get("LORA_MANAGER_RECIPE_CACHE_DB")
|
||||
return resolve_cache_path_with_migration(
|
||||
CacheType.RECIPE,
|
||||
library_name=library_name,
|
||||
env_override=env_override,
|
||||
)
|
||||
|
||||
def _initialize_schema(self) -> None:
|
||||
with self._db_lock:
|
||||
if self._schema_initialized:
|
||||
return
|
||||
try:
|
||||
with self._connect() as conn:
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
conn.execute("PRAGMA foreign_keys = ON")
|
||||
conn.executescript(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS recipes (
|
||||
recipe_id TEXT PRIMARY KEY,
|
||||
file_path TEXT,
|
||||
json_path TEXT,
|
||||
title TEXT,
|
||||
folder TEXT,
|
||||
base_model TEXT,
|
||||
fingerprint TEXT,
|
||||
created_date REAL,
|
||||
modified REAL,
|
||||
file_mtime REAL,
|
||||
file_size INTEGER,
|
||||
favorite INTEGER DEFAULT 0,
|
||||
repair_version INTEGER DEFAULT 0,
|
||||
preview_nsfw_level INTEGER DEFAULT 0,
|
||||
loras_json TEXT,
|
||||
checkpoint_json TEXT,
|
||||
gen_params_json TEXT,
|
||||
tags_json TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_recipes_json_path ON recipes(json_path);
|
||||
CREATE INDEX IF NOT EXISTS idx_recipes_fingerprint ON recipes(fingerprint);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS cache_metadata (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT
|
||||
);
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
self._schema_initialized = True
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to initialize persistent recipe cache schema: %s", exc)
|
||||
|
||||
def _connect(self, readonly: bool = False) -> sqlite3.Connection:
|
||||
uri = False
|
||||
path = self._db_path
|
||||
if readonly:
|
||||
if not os.path.exists(path):
|
||||
raise FileNotFoundError(path)
|
||||
path = f"file:{path}?mode=ro"
|
||||
uri = True
|
||||
conn = sqlite3.connect(path, check_same_thread=False, uri=uri, detect_types=sqlite3.PARSE_DECLTYPES)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def _prepare_recipe_row(self, recipe: Dict, json_path: str) -> Tuple:
|
||||
"""Convert a recipe dict to a row tuple for SQLite insertion."""
|
||||
loras = recipe.get("loras")
|
||||
loras_json = json.dumps(loras) if loras else None
|
||||
|
||||
checkpoint = recipe.get("checkpoint")
|
||||
checkpoint_json = json.dumps(checkpoint) if checkpoint else None
|
||||
|
||||
gen_params = recipe.get("gen_params")
|
||||
gen_params_json = json.dumps(gen_params) if gen_params else None
|
||||
|
||||
tags = recipe.get("tags")
|
||||
tags_json = json.dumps(tags) if tags else None
|
||||
|
||||
# Get file stats if json_path exists
|
||||
file_mtime = 0.0
|
||||
file_size = 0
|
||||
if json_path and os.path.exists(json_path):
|
||||
try:
|
||||
stat = os.stat(json_path)
|
||||
file_mtime = stat.st_mtime
|
||||
file_size = stat.st_size
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return (
|
||||
str(recipe.get("id", "")),
|
||||
recipe.get("file_path"),
|
||||
json_path,
|
||||
recipe.get("title"),
|
||||
recipe.get("folder"),
|
||||
recipe.get("base_model"),
|
||||
recipe.get("fingerprint"),
|
||||
float(recipe.get("created_date") or 0.0),
|
||||
float(recipe.get("modified") or 0.0),
|
||||
file_mtime,
|
||||
file_size,
|
||||
1 if recipe.get("favorite") else 0,
|
||||
int(recipe.get("repair_version") or 0),
|
||||
int(recipe.get("preview_nsfw_level") or 0),
|
||||
loras_json,
|
||||
checkpoint_json,
|
||||
gen_params_json,
|
||||
tags_json,
|
||||
)
|
||||
|
||||
def _row_to_recipe(self, row: sqlite3.Row) -> Dict:
|
||||
"""Convert a SQLite row to a recipe dictionary."""
|
||||
loras = []
|
||||
if row["loras_json"]:
|
||||
try:
|
||||
loras = json.loads(row["loras_json"])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
checkpoint = None
|
||||
if row["checkpoint_json"]:
|
||||
try:
|
||||
checkpoint = json.loads(row["checkpoint_json"])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
gen_params = {}
|
||||
if row["gen_params_json"]:
|
||||
try:
|
||||
gen_params = json.loads(row["gen_params_json"])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
tags = []
|
||||
if row["tags_json"]:
|
||||
try:
|
||||
tags = json.loads(row["tags_json"])
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
recipe = {
|
||||
"id": row["recipe_id"],
|
||||
"file_path": row["file_path"] or "",
|
||||
"title": row["title"] or "",
|
||||
"folder": row["folder"] or "",
|
||||
"base_model": row["base_model"] or "",
|
||||
"fingerprint": row["fingerprint"] or "",
|
||||
"created_date": row["created_date"] or 0.0,
|
||||
"modified": row["modified"] or 0.0,
|
||||
"favorite": bool(row["favorite"]),
|
||||
"repair_version": row["repair_version"] or 0,
|
||||
"preview_nsfw_level": row["preview_nsfw_level"] or 0,
|
||||
"loras": loras,
|
||||
"gen_params": gen_params,
|
||||
}
|
||||
|
||||
if tags:
|
||||
recipe["tags"] = tags
|
||||
|
||||
if checkpoint:
|
||||
recipe["checkpoint"] = checkpoint
|
||||
|
||||
return recipe
|
||||
|
||||
|
||||
def get_persistent_recipe_cache() -> PersistentRecipeCache:
|
||||
"""Get the default persistent recipe cache instance for the active library."""
|
||||
from .settings_manager import get_settings_manager
|
||||
|
||||
library_name = get_settings_manager().get_active_library_name()
|
||||
return PersistentRecipeCache.get_default(library_name)
|
||||
@@ -15,7 +15,7 @@ import threading
|
||||
import time
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
|
||||
from ..utils.settings_paths import get_settings_dir
|
||||
from ..utils.cache_paths import CacheType, resolve_cache_path_with_migration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -67,17 +67,11 @@ class RecipeFTSIndex:
|
||||
|
||||
def _resolve_default_path(self) -> str:
|
||||
"""Resolve the default database path."""
|
||||
override = os.environ.get("LORA_MANAGER_RECIPE_FTS_DB")
|
||||
if override:
|
||||
return override
|
||||
|
||||
try:
|
||||
settings_dir = get_settings_dir(create=True)
|
||||
except Exception as exc:
|
||||
logger.warning("Falling back to current directory for FTS index: %s", exc)
|
||||
settings_dir = "."
|
||||
|
||||
return os.path.join(settings_dir, self._DEFAULT_FILENAME)
|
||||
env_override = os.environ.get("LORA_MANAGER_RECIPE_FTS_DB")
|
||||
return resolve_cache_path_with_migration(
|
||||
CacheType.RECIPE_FTS,
|
||||
env_override=env_override,
|
||||
)
|
||||
|
||||
def get_database_path(self) -> str:
|
||||
"""Return the resolved database path."""
|
||||
@@ -403,6 +397,78 @@ class RecipeFTSIndex:
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def get_indexed_recipe_ids(self) -> Set[str]:
|
||||
"""Return all recipe IDs currently in the index.
|
||||
|
||||
Returns:
|
||||
Set of recipe ID strings.
|
||||
"""
|
||||
if not self._schema_initialized:
|
||||
self.initialize()
|
||||
|
||||
if not self._schema_initialized:
|
||||
return set()
|
||||
|
||||
try:
|
||||
with self._lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
cursor = conn.execute("SELECT recipe_id FROM recipe_fts")
|
||||
return {row[0] for row in cursor.fetchall() if row[0]}
|
||||
finally:
|
||||
conn.close()
|
||||
except FileNotFoundError:
|
||||
return set()
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to get indexed recipe IDs: %s", exc)
|
||||
return set()
|
||||
|
||||
def validate_index(self, recipe_count: int, recipe_ids: Set[str]) -> bool:
|
||||
"""Check if the FTS index matches the expected recipes.
|
||||
|
||||
This method validates whether the existing FTS index can be reused
|
||||
without a full rebuild. It checks:
|
||||
1. The index has been initialized
|
||||
2. The count matches
|
||||
3. The recipe IDs match
|
||||
|
||||
Args:
|
||||
recipe_count: Expected number of recipes.
|
||||
recipe_ids: Expected set of recipe IDs.
|
||||
|
||||
Returns:
|
||||
True if the index is valid and can be reused, False otherwise.
|
||||
"""
|
||||
if not self._schema_initialized:
|
||||
self.initialize()
|
||||
|
||||
if not self._schema_initialized:
|
||||
return False
|
||||
|
||||
try:
|
||||
indexed_count = self.get_indexed_count()
|
||||
if indexed_count != recipe_count:
|
||||
logger.debug(
|
||||
"FTS index count mismatch: indexed=%d, expected=%d",
|
||||
indexed_count, recipe_count
|
||||
)
|
||||
return False
|
||||
|
||||
indexed_ids = self.get_indexed_recipe_ids()
|
||||
if indexed_ids != recipe_ids:
|
||||
missing = recipe_ids - indexed_ids
|
||||
extra = indexed_ids - recipe_ids
|
||||
if missing:
|
||||
logger.debug("FTS index missing %d recipe IDs", len(missing))
|
||||
if extra:
|
||||
logger.debug("FTS index has %d extra recipe IDs", len(extra))
|
||||
return False
|
||||
|
||||
return True
|
||||
except Exception as exc:
|
||||
logger.debug("FTS index validation failed: %s", exc)
|
||||
return False
|
||||
|
||||
# Internal helpers
|
||||
|
||||
def _connect(self, readonly: bool = False) -> sqlite3.Connection:
|
||||
@@ -509,21 +575,20 @@ class RecipeFTSIndex:
|
||||
if not fields:
|
||||
return term_expr
|
||||
|
||||
# Build field-restricted query with OR between fields
|
||||
# Build field-restricted query where ALL words must match within at least one field
|
||||
field_clauses = []
|
||||
for field in fields:
|
||||
if field in self.FIELD_MAP:
|
||||
cols = self.FIELD_MAP[field]
|
||||
for col in cols:
|
||||
# FTS5 column filter syntax: column:term
|
||||
# Need to handle multiple terms properly
|
||||
for term in prefix_terms:
|
||||
field_clauses.append(f'{col}:{term}')
|
||||
# Create clause where ALL terms must match in this column (implicit AND)
|
||||
col_terms = [f'{col}:{term}' for term in prefix_terms]
|
||||
field_clauses.append('(' + ' '.join(col_terms) + ')')
|
||||
|
||||
if not field_clauses:
|
||||
return term_expr
|
||||
|
||||
# Combine field clauses with OR
|
||||
# Any field matching all terms is acceptable (OR between field clauses)
|
||||
return ' OR '.join(field_clauses)
|
||||
|
||||
def _escape_fts_query(self, text: str) -> str:
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from ..config import config
|
||||
from .recipe_cache import RecipeCache
|
||||
from .recipe_fts_index import RecipeFTSIndex
|
||||
from .persistent_recipe_cache import PersistentRecipeCache, get_persistent_recipe_cache, PersistedRecipeData
|
||||
from .service_registry import ServiceRegistry
|
||||
from .lora_scanner import LoraScanner
|
||||
from .metadata_service import get_default_metadata_provider
|
||||
@@ -78,6 +79,9 @@ class RecipeScanner:
|
||||
# FTS index for fast search
|
||||
self._fts_index: Optional[RecipeFTSIndex] = None
|
||||
self._fts_index_task: Optional[asyncio.Task] = None
|
||||
# Persistent cache for fast startup
|
||||
self._persistent_cache: Optional[PersistentRecipeCache] = None
|
||||
self._json_path_map: Dict[str, str] = {} # recipe_id -> json_path
|
||||
if lora_scanner:
|
||||
self._lora_scanner = lora_scanner
|
||||
if checkpoint_scanner:
|
||||
@@ -109,6 +113,11 @@ class RecipeScanner:
|
||||
self._fts_index.clear()
|
||||
self._fts_index = None
|
||||
|
||||
# Reset persistent cache instance for new library
|
||||
self._persistent_cache = None
|
||||
self._json_path_map = {}
|
||||
PersistentRecipeCache.clear_instances()
|
||||
|
||||
self._cache = None
|
||||
self._initialization_task = None
|
||||
self._is_initializing = False
|
||||
@@ -321,12 +330,17 @@ class RecipeScanner:
|
||||
with open(recipe_json_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(recipe, f, indent=4, ensure_ascii=False)
|
||||
|
||||
# 4. Update EXIF if image exists
|
||||
# 4. Update persistent SQLite cache
|
||||
if self._persistent_cache:
|
||||
self._persistent_cache.update_recipe(recipe, recipe_json_path)
|
||||
self._json_path_map[str(recipe_id)] = recipe_json_path
|
||||
|
||||
# 5. Update EXIF if image exists
|
||||
image_path = recipe.get('file_path')
|
||||
if image_path and os.path.exists(image_path):
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
ExifUtils.append_recipe_metadata(image_path, recipe)
|
||||
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error persisting recipe {recipe_id}: {e}")
|
||||
@@ -408,117 +422,281 @@ class RecipeScanner:
|
||||
logger.error(f"Recipe Scanner: Error initializing cache in background: {e}")
|
||||
|
||||
def _initialize_recipe_cache_sync(self):
|
||||
"""Synchronous version of recipe cache initialization for thread pool execution"""
|
||||
"""Synchronous version of recipe cache initialization for thread pool execution.
|
||||
|
||||
Uses persistent cache for fast startup when available:
|
||||
1. Try to load from persistent SQLite cache
|
||||
2. Reconcile with filesystem (check mtime/size for changes)
|
||||
3. Fall back to full directory scan if cache miss or reconciliation fails
|
||||
4. Persist results for next startup
|
||||
"""
|
||||
try:
|
||||
# Ensure cache exists to avoid None reference errors
|
||||
if self._cache is None:
|
||||
self._cache = RecipeCache(
|
||||
raw_data=[],
|
||||
sorted_by_name=[],
|
||||
sorted_by_date=[],
|
||||
folders=[],
|
||||
folder_tree={},
|
||||
)
|
||||
|
||||
# Create a new event loop for this thread
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
# Create a synchronous method to bypass the async lock
|
||||
def sync_initialize_cache():
|
||||
# We need to implement scan_all_recipes logic synchronously here
|
||||
# instead of calling the async method to avoid event loop issues
|
||||
recipes = []
|
||||
recipes_dir = self.recipes_dir
|
||||
|
||||
if not recipes_dir or not os.path.exists(recipes_dir):
|
||||
logger.warning(f"Recipes directory not found: {recipes_dir}")
|
||||
return recipes
|
||||
|
||||
# Get all recipe JSON files in the recipes directory
|
||||
recipe_files = []
|
||||
for root, _, files in os.walk(recipes_dir):
|
||||
recipe_count = sum(1 for f in files if f.lower().endswith('.recipe.json'))
|
||||
if recipe_count > 0:
|
||||
for file in files:
|
||||
if file.lower().endswith('.recipe.json'):
|
||||
recipe_files.append(os.path.join(root, file))
|
||||
|
||||
# Process each recipe file
|
||||
for recipe_path in recipe_files:
|
||||
try:
|
||||
with open(recipe_path, 'r', encoding='utf-8') as f:
|
||||
recipe_data = json.load(f)
|
||||
|
||||
# Validate recipe data
|
||||
if not recipe_data or not isinstance(recipe_data, dict):
|
||||
logger.warning(f"Invalid recipe data in {recipe_path}")
|
||||
continue
|
||||
|
||||
# Ensure required fields exist
|
||||
required_fields = ['id', 'file_path', 'title']
|
||||
if not all(field in recipe_data for field in required_fields):
|
||||
logger.warning(f"Missing required fields in {recipe_path}")
|
||||
continue
|
||||
|
||||
# Ensure the image file exists and prioritize local siblings
|
||||
image_path = recipe_data.get('file_path')
|
||||
if image_path:
|
||||
recipe_dir = os.path.dirname(recipe_path)
|
||||
image_filename = os.path.basename(image_path)
|
||||
local_sibling_path = os.path.normpath(os.path.join(recipe_dir, image_filename))
|
||||
|
||||
# If local sibling exists and stored path is different, prefer local
|
||||
if os.path.exists(local_sibling_path) and os.path.normpath(image_path) != local_sibling_path:
|
||||
recipe_data['file_path'] = local_sibling_path
|
||||
# Persist the repair
|
||||
try:
|
||||
with open(recipe_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
|
||||
logger.info(f"Updated recipe image path to local sibling: {local_sibling_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to persist repair for {recipe_path}: {e}")
|
||||
elif not os.path.exists(image_path):
|
||||
logger.warning(f"Recipe image not found and no local sibling: {image_path}")
|
||||
|
||||
# Ensure loras array exists
|
||||
if 'loras' not in recipe_data:
|
||||
recipe_data['loras'] = []
|
||||
|
||||
# Ensure gen_params exists
|
||||
if 'gen_params' not in recipe_data:
|
||||
recipe_data['gen_params'] = {}
|
||||
|
||||
# Add to list without async operations
|
||||
recipes.append(recipe_data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading recipe file {recipe_path}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
|
||||
# Update cache with the collected data
|
||||
self._cache.raw_data = recipes
|
||||
self._update_folder_metadata(self._cache)
|
||||
|
||||
# Create a simplified resort function that doesn't use await
|
||||
if hasattr(self._cache, "resort"):
|
||||
try:
|
||||
# Sort by name
|
||||
self._cache.sorted_by_name = natsorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('title', '').lower()
|
||||
)
|
||||
|
||||
# Sort by date (modified or created)
|
||||
self._cache.sorted_by_date = sorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('modified', x.get('created_date', 0)),
|
||||
reverse=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sorting recipe cache: {e}")
|
||||
|
||||
|
||||
# Initialize persistent cache
|
||||
if self._persistent_cache is None:
|
||||
self._persistent_cache = get_persistent_recipe_cache()
|
||||
|
||||
recipes_dir = self.recipes_dir
|
||||
if not recipes_dir or not os.path.exists(recipes_dir):
|
||||
logger.warning(f"Recipes directory not found: {recipes_dir}")
|
||||
return self._cache
|
||||
|
||||
# Run our sync initialization that avoids lock conflicts
|
||||
return sync_initialize_cache()
|
||||
|
||||
# Try to load from persistent cache first
|
||||
persisted = self._persistent_cache.load_cache()
|
||||
if persisted:
|
||||
recipes, changed, json_paths = self._reconcile_recipe_cache(persisted, recipes_dir)
|
||||
self._json_path_map = json_paths
|
||||
|
||||
if not changed:
|
||||
# Fast path: use cached data directly
|
||||
logger.info("Recipe cache hit: loaded %d recipes from persistent cache", len(recipes))
|
||||
self._cache.raw_data = recipes
|
||||
self._update_folder_metadata(self._cache)
|
||||
self._sort_cache_sync()
|
||||
return self._cache
|
||||
else:
|
||||
# Partial update: some files changed
|
||||
logger.info("Recipe cache partial hit: reconciled %d recipes with filesystem", len(recipes))
|
||||
self._cache.raw_data = recipes
|
||||
self._update_folder_metadata(self._cache)
|
||||
self._sort_cache_sync()
|
||||
# Persist updated cache
|
||||
self._persistent_cache.save_cache(recipes, json_paths)
|
||||
return self._cache
|
||||
|
||||
# Fall back to full directory scan
|
||||
logger.info("Recipe cache miss: performing full directory scan")
|
||||
recipes, json_paths = self._full_directory_scan_sync(recipes_dir)
|
||||
self._json_path_map = json_paths
|
||||
|
||||
# Update cache with the collected data
|
||||
self._cache.raw_data = recipes
|
||||
self._update_folder_metadata(self._cache)
|
||||
self._sort_cache_sync()
|
||||
|
||||
# Persist for next startup
|
||||
self._persistent_cache.save_cache(recipes, json_paths)
|
||||
|
||||
return self._cache
|
||||
except Exception as e:
|
||||
logger.error(f"Error in thread-based recipe cache initialization: {e}")
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return self._cache if hasattr(self, '_cache') else None
|
||||
finally:
|
||||
# Clean up the event loop
|
||||
loop.close()
|
||||
|
||||
def _reconcile_recipe_cache(
|
||||
self,
|
||||
persisted: PersistedRecipeData,
|
||||
recipes_dir: str,
|
||||
) -> Tuple[List[Dict], bool, Dict[str, str]]:
|
||||
"""Reconcile persisted cache with current filesystem state.
|
||||
|
||||
Args:
|
||||
persisted: The persisted recipe data from SQLite cache.
|
||||
recipes_dir: Path to the recipes directory.
|
||||
|
||||
Returns:
|
||||
Tuple of (recipes list, changed flag, json_paths dict).
|
||||
"""
|
||||
recipes: List[Dict] = []
|
||||
json_paths: Dict[str, str] = {}
|
||||
changed = False
|
||||
|
||||
# Build set of current recipe files
|
||||
current_files: Dict[str, Tuple[float, int]] = {}
|
||||
for root, _, files in os.walk(recipes_dir):
|
||||
for file in files:
|
||||
if file.lower().endswith('.recipe.json'):
|
||||
file_path = os.path.join(root, file)
|
||||
try:
|
||||
stat = os.stat(file_path)
|
||||
current_files[file_path] = (stat.st_mtime, stat.st_size)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
# Build recipe_id -> recipe lookup (O(n) instead of O(n²))
|
||||
recipe_by_id: Dict[str, Dict] = {
|
||||
str(r.get('id', '')): r for r in persisted.raw_data if r.get('id')
|
||||
}
|
||||
|
||||
# Build json_path -> recipe lookup from file_stats (O(m))
|
||||
persisted_by_path: Dict[str, Dict] = {}
|
||||
for json_path in persisted.file_stats.keys():
|
||||
basename = os.path.basename(json_path)
|
||||
if basename.lower().endswith('.recipe.json'):
|
||||
recipe_id = basename[:-len('.recipe.json')]
|
||||
if recipe_id in recipe_by_id:
|
||||
persisted_by_path[json_path] = recipe_by_id[recipe_id]
|
||||
|
||||
# Process current files
|
||||
for file_path, (current_mtime, current_size) in current_files.items():
|
||||
cached_stats = persisted.file_stats.get(file_path)
|
||||
|
||||
# Extract recipe_id from current file for fallback lookup
|
||||
basename = os.path.basename(file_path)
|
||||
recipe_id_from_file = basename[:-len('.recipe.json')] if basename.lower().endswith('.recipe.json') else None
|
||||
|
||||
if cached_stats:
|
||||
cached_mtime, cached_size = cached_stats
|
||||
# Check if file is unchanged
|
||||
if abs(current_mtime - cached_mtime) < 1.0 and current_size == cached_size:
|
||||
# Try direct path lookup first
|
||||
cached_recipe = persisted_by_path.get(file_path)
|
||||
# Fallback to recipe_id lookup if path lookup fails
|
||||
if not cached_recipe and recipe_id_from_file:
|
||||
cached_recipe = recipe_by_id.get(recipe_id_from_file)
|
||||
if cached_recipe:
|
||||
recipe_id = str(cached_recipe.get('id', ''))
|
||||
# Track folder from file path
|
||||
cached_recipe['folder'] = cached_recipe.get('folder') or self._calculate_folder(file_path)
|
||||
recipes.append(cached_recipe)
|
||||
json_paths[recipe_id] = file_path
|
||||
continue
|
||||
|
||||
# File is new or changed - need to re-read
|
||||
changed = True
|
||||
recipe_data = self._load_recipe_file_sync(file_path)
|
||||
if recipe_data:
|
||||
recipe_id = str(recipe_data.get('id', ''))
|
||||
recipes.append(recipe_data)
|
||||
json_paths[recipe_id] = file_path
|
||||
|
||||
# Check for deleted files
|
||||
for json_path in persisted.file_stats.keys():
|
||||
if json_path not in current_files:
|
||||
changed = True
|
||||
logger.debug("Recipe file deleted: %s", json_path)
|
||||
|
||||
return recipes, changed, json_paths
|
||||
|
||||
def _full_directory_scan_sync(self, recipes_dir: str) -> Tuple[List[Dict], Dict[str, str]]:
|
||||
"""Perform a full synchronous directory scan for recipes.
|
||||
|
||||
Args:
|
||||
recipes_dir: Path to the recipes directory.
|
||||
|
||||
Returns:
|
||||
Tuple of (recipes list, json_paths dict).
|
||||
"""
|
||||
recipes: List[Dict] = []
|
||||
json_paths: Dict[str, str] = {}
|
||||
|
||||
# Get all recipe JSON files
|
||||
recipe_files = []
|
||||
for root, _, files in os.walk(recipes_dir):
|
||||
for file in files:
|
||||
if file.lower().endswith('.recipe.json'):
|
||||
recipe_files.append(os.path.join(root, file))
|
||||
|
||||
# Process each recipe file
|
||||
for recipe_path in recipe_files:
|
||||
recipe_data = self._load_recipe_file_sync(recipe_path)
|
||||
if recipe_data:
|
||||
recipe_id = str(recipe_data.get('id', ''))
|
||||
recipes.append(recipe_data)
|
||||
json_paths[recipe_id] = recipe_path
|
||||
|
||||
return recipes, json_paths
|
||||
|
||||
def _load_recipe_file_sync(self, recipe_path: str) -> Optional[Dict]:
|
||||
"""Load a single recipe file synchronously.
|
||||
|
||||
Args:
|
||||
recipe_path: Path to the recipe JSON file.
|
||||
|
||||
Returns:
|
||||
Recipe dictionary if valid, None otherwise.
|
||||
"""
|
||||
try:
|
||||
with open(recipe_path, 'r', encoding='utf-8') as f:
|
||||
recipe_data = json.load(f)
|
||||
|
||||
# Validate recipe data
|
||||
if not recipe_data or not isinstance(recipe_data, dict):
|
||||
logger.warning(f"Invalid recipe data in {recipe_path}")
|
||||
return None
|
||||
|
||||
# Ensure required fields exist
|
||||
required_fields = ['id', 'file_path', 'title']
|
||||
if not all(field in recipe_data for field in required_fields):
|
||||
logger.warning(f"Missing required fields in {recipe_path}")
|
||||
return None
|
||||
|
||||
# Ensure the image file exists and prioritize local siblings
|
||||
image_path = recipe_data.get('file_path')
|
||||
path_updated = False
|
||||
if image_path:
|
||||
recipe_dir = os.path.dirname(recipe_path)
|
||||
image_filename = os.path.basename(image_path)
|
||||
local_sibling_path = os.path.normpath(os.path.join(recipe_dir, image_filename))
|
||||
|
||||
# If local sibling exists and stored path is different, prefer local
|
||||
if os.path.exists(local_sibling_path) and os.path.normpath(image_path) != local_sibling_path:
|
||||
recipe_data['file_path'] = local_sibling_path
|
||||
path_updated = True
|
||||
logger.info(f"Updated recipe image path to local sibling: {local_sibling_path}")
|
||||
elif not os.path.exists(image_path):
|
||||
logger.warning(f"Recipe image not found and no local sibling: {image_path}")
|
||||
|
||||
if path_updated:
|
||||
try:
|
||||
with open(recipe_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to persist repair for {recipe_path}: {e}")
|
||||
|
||||
# Track folder placement relative to recipes directory
|
||||
recipe_data['folder'] = recipe_data.get('folder') or self._calculate_folder(recipe_path)
|
||||
|
||||
# Ensure loras array exists
|
||||
if 'loras' not in recipe_data:
|
||||
recipe_data['loras'] = []
|
||||
|
||||
# Ensure gen_params exists
|
||||
if 'gen_params' not in recipe_data:
|
||||
recipe_data['gen_params'] = {}
|
||||
|
||||
return recipe_data
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading recipe file {recipe_path}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return None
|
||||
|
||||
def _sort_cache_sync(self) -> None:
|
||||
"""Sort cache data synchronously."""
|
||||
try:
|
||||
# Sort by name
|
||||
self._cache.sorted_by_name = natsorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: x.get('title', '').lower()
|
||||
)
|
||||
|
||||
# Sort by date (modified or created)
|
||||
self._cache.sorted_by_date = sorted(
|
||||
self._cache.raw_data,
|
||||
key=lambda x: (x.get('modified', x.get('created_date', 0)), x.get('file_path', '')),
|
||||
reverse=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sorting recipe cache: {e}")
|
||||
|
||||
async def _wait_for_lora_scanner(self) -> None:
|
||||
"""Ensure the LoRA scanner has initialized before recipe enrichment."""
|
||||
|
||||
@@ -570,7 +748,10 @@ class RecipeScanner:
|
||||
self._post_scan_task = loop.create_task(_run_enrichment(), name="recipe_cache_enrichment")
|
||||
|
||||
def _schedule_fts_index_build(self) -> None:
|
||||
"""Build FTS index in background without blocking."""
|
||||
"""Build FTS index in background without blocking.
|
||||
|
||||
Validates existing index first and reuses it if valid.
|
||||
"""
|
||||
|
||||
if self._fts_index_task and not self._fts_index_task.done():
|
||||
return # Already running
|
||||
@@ -587,7 +768,25 @@ class RecipeScanner:
|
||||
try:
|
||||
self._fts_index = RecipeFTSIndex()
|
||||
|
||||
# Run in thread pool (SQLite is blocking)
|
||||
# Check if existing index is valid
|
||||
recipe_ids = {str(r.get('id', '')) for r in self._cache.raw_data if r.get('id')}
|
||||
recipe_count = len(self._cache.raw_data)
|
||||
|
||||
# Run validation in thread pool
|
||||
is_valid = await loop.run_in_executor(
|
||||
None,
|
||||
self._fts_index.validate_index,
|
||||
recipe_count,
|
||||
recipe_ids
|
||||
)
|
||||
|
||||
if is_valid:
|
||||
logger.info("FTS index validated, reusing existing index with %d recipes", recipe_count)
|
||||
self._fts_index._ready.set()
|
||||
return
|
||||
|
||||
# Only rebuild if validation fails
|
||||
logger.info("FTS index invalid or outdated, rebuilding...")
|
||||
await loop.run_in_executor(
|
||||
None,
|
||||
self._fts_index.build_index,
|
||||
@@ -632,7 +831,12 @@ class RecipeScanner:
|
||||
fields = None
|
||||
|
||||
try:
|
||||
return self._fts_index.search(search, fields)
|
||||
result = self._fts_index.search(search, fields)
|
||||
# Return None if empty to trigger fuzzy fallback
|
||||
# Empty FTS results may indicate query syntax issues or need for fuzzy matching
|
||||
if not result:
|
||||
return None
|
||||
return result
|
||||
except Exception as exc:
|
||||
logger.debug("FTS search failed, falling back to fuzzy search: %s", exc)
|
||||
return None
|
||||
@@ -870,6 +1074,12 @@ class RecipeScanner:
|
||||
# Update FTS index
|
||||
self._update_fts_index_for_recipe(recipe_data, 'add')
|
||||
|
||||
# Persist to SQLite cache
|
||||
if self._persistent_cache:
|
||||
recipe_id = str(recipe_data.get('id', ''))
|
||||
json_path = self._json_path_map.get(recipe_id, '')
|
||||
self._persistent_cache.update_recipe(recipe_data, json_path)
|
||||
|
||||
async def remove_recipe(self, recipe_id: str) -> bool:
|
||||
"""Remove a recipe from the cache by ID."""
|
||||
|
||||
@@ -886,6 +1096,12 @@ class RecipeScanner:
|
||||
|
||||
# Update FTS index
|
||||
self._update_fts_index_for_recipe(recipe_id, 'remove')
|
||||
|
||||
# Remove from SQLite cache
|
||||
if self._persistent_cache:
|
||||
self._persistent_cache.remove_recipe(recipe_id)
|
||||
self._json_path_map.pop(recipe_id, None)
|
||||
|
||||
return True
|
||||
|
||||
async def bulk_remove(self, recipe_ids: Iterable[str]) -> int:
|
||||
@@ -895,9 +1111,13 @@ class RecipeScanner:
|
||||
removed = await cache.bulk_remove(recipe_ids, resort=False)
|
||||
if removed:
|
||||
self._schedule_resort()
|
||||
# Update FTS index for each removed recipe
|
||||
for recipe_id in (str(r.get('id', '')) for r in removed):
|
||||
# Update FTS index and persistent cache for each removed recipe
|
||||
for recipe in removed:
|
||||
recipe_id = str(recipe.get('id', ''))
|
||||
self._update_fts_index_for_recipe(recipe_id, 'remove')
|
||||
if self._persistent_cache:
|
||||
self._persistent_cache.remove_recipe(recipe_id)
|
||||
self._json_path_map.pop(recipe_id, None)
|
||||
return len(removed)
|
||||
|
||||
async def scan_all_recipes(self) -> List[Dict]:
|
||||
@@ -1131,8 +1351,9 @@ class RecipeScanner:
|
||||
|
||||
# Get hash from the first file
|
||||
for file_info in version_info.get('files', []):
|
||||
if file_info.get('hashes', {}).get('SHA256'):
|
||||
return file_info['hashes']['SHA256'], False # Return hash with False for isDeleted flag
|
||||
sha256_hash = (file_info.get('hashes') or {}).get('SHA256')
|
||||
if sha256_hash:
|
||||
return sha256_hash, False # Return hash with False for isDeleted flag
|
||||
|
||||
logger.debug(f"No SHA256 hash found in version info for ID: {model_version_id}")
|
||||
return None, False
|
||||
@@ -1690,11 +1911,11 @@ class RecipeScanner:
|
||||
|
||||
async def update_recipe_metadata(self, recipe_id: str, metadata: dict) -> bool:
|
||||
"""Update recipe metadata (like title and tags) in both file system and cache
|
||||
|
||||
|
||||
Args:
|
||||
recipe_id: The ID of the recipe to update
|
||||
metadata: Dictionary containing metadata fields to update (title, tags, etc.)
|
||||
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
@@ -1702,16 +1923,16 @@ class RecipeScanner:
|
||||
recipe_json_path = await self.get_recipe_json_path(recipe_id)
|
||||
if not recipe_json_path or not os.path.exists(recipe_json_path):
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
# Load existing recipe data
|
||||
with open(recipe_json_path, 'r', encoding='utf-8') as f:
|
||||
recipe_data = json.load(f)
|
||||
|
||||
|
||||
# Update fields
|
||||
for key, value in metadata.items():
|
||||
recipe_data[key] = value
|
||||
|
||||
|
||||
# Save updated recipe
|
||||
with open(recipe_json_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
|
||||
@@ -1724,6 +1945,11 @@ class RecipeScanner:
|
||||
# Update FTS index
|
||||
self._update_fts_index_for_recipe(recipe_data, 'update')
|
||||
|
||||
# Update persistent SQLite cache
|
||||
if self._persistent_cache:
|
||||
self._persistent_cache.update_recipe(recipe_data, recipe_json_path)
|
||||
self._json_path_map[recipe_id] = recipe_json_path
|
||||
|
||||
# If the recipe has an image, update its EXIF metadata
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
image_path = recipe_data.get('file_path')
|
||||
@@ -1795,6 +2021,11 @@ class RecipeScanner:
|
||||
# Update FTS index
|
||||
self._update_fts_index_for_recipe(recipe_data, 'update')
|
||||
|
||||
# Update persistent SQLite cache
|
||||
if self._persistent_cache:
|
||||
self._persistent_cache.update_recipe(recipe_data, recipe_json_path)
|
||||
self._json_path_map[recipe_id] = recipe_json_path
|
||||
|
||||
updated_lora = dict(lora_entry)
|
||||
if target_lora is not None:
|
||||
preview_url = target_lora.get('preview_url')
|
||||
@@ -1918,26 +2149,31 @@ class RecipeScanner:
|
||||
if not recipes_to_update:
|
||||
return 0, 0
|
||||
|
||||
# Persist changes to disk
|
||||
# Persist changes to disk and SQLite cache
|
||||
async with self._mutation_lock:
|
||||
for recipe in recipes_to_update:
|
||||
recipe_id = recipe.get('id')
|
||||
recipe_id = str(recipe.get('id', ''))
|
||||
if not recipe_id:
|
||||
continue
|
||||
|
||||
|
||||
recipe_path = os.path.join(self.recipes_dir, f"{recipe_id}.recipe.json")
|
||||
try:
|
||||
self._write_recipe_file(recipe_path, recipe)
|
||||
file_updated_count += 1
|
||||
logger.info(f"Updated file_name in recipe {recipe_path}: -> {new_file_name}")
|
||||
|
||||
# Update persistent SQLite cache
|
||||
if self._persistent_cache:
|
||||
self._persistent_cache.update_recipe(recipe, recipe_path)
|
||||
self._json_path_map[recipe_id] = recipe_path
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating recipe file {recipe_path}: {e}")
|
||||
|
||||
|
||||
# We don't necessarily need to resort because LoRA file_name isn't a sort key,
|
||||
# but we might want to schedule a resort if we're paranoid or if searching relies on sorted state.
|
||||
# Given it's a rename of a dependency, search results might change if searching by LoRA name.
|
||||
self._schedule_resort()
|
||||
|
||||
|
||||
return file_updated_count, cache_updated_count
|
||||
|
||||
async def find_recipes_by_fingerprint(self, fingerprint: str) -> list:
|
||||
@@ -1996,3 +2232,26 @@ class RecipeScanner:
|
||||
duplicate_groups = {k: v for k, v in fingerprint_groups.items() if len(v) > 1}
|
||||
|
||||
return duplicate_groups
|
||||
|
||||
async def find_duplicate_recipes_by_source(self) -> dict:
|
||||
"""Find all recipe duplicates based on source_path (Civitai image URLs)
|
||||
|
||||
Returns:
|
||||
Dictionary where keys are source URLs and values are lists of recipe IDs
|
||||
"""
|
||||
cache = await self.get_cached_data()
|
||||
|
||||
url_groups = {}
|
||||
for recipe in cache.raw_data:
|
||||
source_url = recipe.get('source_path', '').strip()
|
||||
if not source_url:
|
||||
continue
|
||||
|
||||
if source_url not in url_groups:
|
||||
url_groups[source_url] = []
|
||||
|
||||
url_groups[source_url].append(recipe.get('id'))
|
||||
|
||||
duplicate_groups = {k: v for k, v in url_groups.items() if len(v) > 1}
|
||||
|
||||
return duplicate_groups
|
||||
|
||||
@@ -28,6 +28,9 @@ CORE_USER_SETTING_KEYS: Tuple[str, ...] = (
|
||||
"folder_paths",
|
||||
)
|
||||
|
||||
# Threshold for aggressive cleanup: if file contains this many default keys, clean it up
|
||||
DEFAULT_KEYS_CLEANUP_THRESHOLD = 10
|
||||
|
||||
|
||||
DEFAULT_SETTINGS: Dict[str, Any] = {
|
||||
"civitai_api_key": "",
|
||||
@@ -35,6 +38,8 @@ DEFAULT_SETTINGS: Dict[str, Any] = {
|
||||
"hash_chunk_size_mb": DEFAULT_HASH_CHUNK_SIZE_MB,
|
||||
"language": "en",
|
||||
"show_only_sfw": False,
|
||||
"onboarding_completed": False,
|
||||
"dismissed_banners": [],
|
||||
"enable_metadata_archive_db": False,
|
||||
"proxy_enabled": False,
|
||||
"proxy_host": "",
|
||||
@@ -61,9 +66,10 @@ DEFAULT_SETTINGS: Dict[str, Any] = {
|
||||
"compact_mode": False,
|
||||
"priority_tags": DEFAULT_PRIORITY_TAG_CONFIG.copy(),
|
||||
"model_name_display": "model_name",
|
||||
"model_card_footer_action": "example_images",
|
||||
"model_card_footer_action": "replace_preview",
|
||||
"update_flag_strategy": "same_base",
|
||||
"auto_organize_exclusions": [],
|
||||
"metadata_refresh_skip_paths": [],
|
||||
}
|
||||
|
||||
|
||||
@@ -93,6 +99,9 @@ class SettingsManager:
|
||||
if self._needs_initial_save:
|
||||
self._save_settings()
|
||||
self._needs_initial_save = False
|
||||
else:
|
||||
# Clean up existing settings file by removing default values
|
||||
self._cleanup_default_values_from_disk()
|
||||
|
||||
def _detect_standalone_mode(self) -> bool:
|
||||
"""Return ``True`` when running in standalone mode."""
|
||||
@@ -224,7 +233,7 @@ class SettingsManager:
|
||||
return merged
|
||||
|
||||
def _ensure_default_settings(self) -> None:
|
||||
"""Ensure all default settings keys exist"""
|
||||
"""Ensure all default settings keys exist in memory (but don't save defaults to disk)"""
|
||||
defaults = self._get_default_settings()
|
||||
updated_existing = False
|
||||
inserted_defaults = False
|
||||
@@ -253,6 +262,17 @@ class SettingsManager:
|
||||
self.settings["auto_organize_exclusions"] = []
|
||||
inserted_defaults = True
|
||||
|
||||
if "metadata_refresh_skip_paths" in self.settings:
|
||||
normalized_skip_paths = self.normalize_metadata_refresh_skip_paths(
|
||||
self.settings.get("metadata_refresh_skip_paths")
|
||||
)
|
||||
if normalized_skip_paths != self.settings.get("metadata_refresh_skip_paths"):
|
||||
self.settings["metadata_refresh_skip_paths"] = normalized_skip_paths
|
||||
updated_existing = True
|
||||
else:
|
||||
self.settings["metadata_refresh_skip_paths"] = []
|
||||
inserted_defaults = True
|
||||
|
||||
for key, value in defaults.items():
|
||||
if key == "priority_tags":
|
||||
continue
|
||||
@@ -263,10 +283,10 @@ class SettingsManager:
|
||||
self.settings[key] = value
|
||||
inserted_defaults = True
|
||||
|
||||
if updated_existing or (
|
||||
inserted_defaults and self._bootstrap_reason in {"invalid", "unreadable"}
|
||||
):
|
||||
# Save only if existing values were normalized/updated
|
||||
if updated_existing:
|
||||
self._save_settings()
|
||||
# Note: inserted_defaults no longer triggers save - defaults stay in memory only
|
||||
|
||||
def _migrate_to_library_registry(self) -> None:
|
||||
"""Ensure settings include the multi-library registry structure."""
|
||||
@@ -709,6 +729,42 @@ class SettingsManager:
|
||||
|
||||
self._startup_messages.append(payload)
|
||||
|
||||
def _cleanup_default_values_from_disk(self) -> None:
|
||||
"""Remove default values from existing settings.json to keep it clean.
|
||||
|
||||
Only performs cleanup if the file contains a significant number of default
|
||||
values (indicating it's "bloated"). Small files (like template-based configs)
|
||||
are preserved as-is to avoid unexpected changes.
|
||||
"""
|
||||
# Only cleanup existing files (not new ones)
|
||||
if self._bootstrap_reason == "missing" or self._original_disk_payload is None:
|
||||
return
|
||||
|
||||
defaults = self._get_default_settings()
|
||||
disk_keys = set(self._original_disk_payload.keys())
|
||||
|
||||
# Count how many keys on disk are set to their default values
|
||||
default_value_keys = set()
|
||||
for key in disk_keys:
|
||||
if key in CORE_USER_SETTING_KEYS:
|
||||
continue # Core keys don't count as "cleanup candidates"
|
||||
disk_value = self._original_disk_payload.get(key)
|
||||
default_value = defaults.get(key)
|
||||
# Compare using JSON serialization for complex objects
|
||||
if json.dumps(disk_value, sort_keys=True, default=str) == json.dumps(default_value, sort_keys=True, default=str):
|
||||
default_value_keys.add(key)
|
||||
|
||||
# Only cleanup if there are "many" default keys (indicating a bloated file)
|
||||
# This preserves small/template-based configs while cleaning up legacy bloated files
|
||||
if len(default_value_keys) >= DEFAULT_KEYS_CLEANUP_THRESHOLD:
|
||||
logger.info(
|
||||
"Cleaning up %d default value(s) from settings.json to keep it minimal",
|
||||
len(default_value_keys)
|
||||
)
|
||||
self._save_settings()
|
||||
# Update original payload to match what we just saved
|
||||
self._original_disk_payload = self._serialize_settings_for_disk()
|
||||
|
||||
def _collect_configuration_warnings(self) -> None:
|
||||
if not self._standalone_mode:
|
||||
return
|
||||
@@ -761,6 +817,7 @@ class SettingsManager:
|
||||
defaults['priority_tags'] = DEFAULT_PRIORITY_TAG_CONFIG.copy()
|
||||
defaults.setdefault('folder_paths', {})
|
||||
defaults['auto_organize_exclusions'] = []
|
||||
defaults['metadata_refresh_skip_paths'] = []
|
||||
|
||||
library_name = defaults.get("active_library") or "default"
|
||||
default_library = self._build_library_payload(
|
||||
@@ -832,6 +889,44 @@ class SettingsManager:
|
||||
self._save_settings()
|
||||
return exclusions
|
||||
|
||||
def normalize_metadata_refresh_skip_paths(self, value: Any) -> List[str]:
|
||||
if value is None:
|
||||
return []
|
||||
|
||||
if isinstance(value, str):
|
||||
candidates: Iterable[str] = (
|
||||
value.replace("\n", ",").replace(";", ",").split(",")
|
||||
)
|
||||
elif isinstance(value, Sequence) and not isinstance(value, (bytes, bytearray, str)):
|
||||
candidates = value
|
||||
else:
|
||||
return []
|
||||
|
||||
paths: List[str] = []
|
||||
for raw in candidates:
|
||||
if isinstance(raw, str):
|
||||
token = raw.replace("\\", "/").strip().strip("/")
|
||||
if token:
|
||||
paths.append(token)
|
||||
|
||||
unique_paths: List[str] = []
|
||||
seen = set()
|
||||
for path in paths:
|
||||
if path not in seen:
|
||||
seen.add(path)
|
||||
unique_paths.append(path)
|
||||
|
||||
return unique_paths
|
||||
|
||||
def get_metadata_refresh_skip_paths(self) -> List[str]:
|
||||
skip_paths = self.normalize_metadata_refresh_skip_paths(
|
||||
self.settings.get("metadata_refresh_skip_paths")
|
||||
)
|
||||
if skip_paths != self.settings.get("metadata_refresh_skip_paths"):
|
||||
self.settings["metadata_refresh_skip_paths"] = skip_paths
|
||||
self._save_settings()
|
||||
return skip_paths
|
||||
|
||||
def get_startup_messages(self) -> List[Dict[str, Any]]:
|
||||
return [message.copy() for message in self._startup_messages]
|
||||
|
||||
@@ -869,6 +964,8 @@ class SettingsManager:
|
||||
"""Set setting value and save"""
|
||||
if key == "auto_organize_exclusions":
|
||||
value = self.normalize_auto_organize_exclusions(value)
|
||||
elif key == "metadata_refresh_skip_paths":
|
||||
value = self.normalize_metadata_refresh_skip_paths(value)
|
||||
self.settings[key] = value
|
||||
portable_switch_pending = False
|
||||
if key == "use_portable_settings" and isinstance(value, bool):
|
||||
@@ -897,6 +994,10 @@ class SettingsManager:
|
||||
self._save_settings()
|
||||
logger.info(f"Deleted setting: {key}")
|
||||
|
||||
def keys(self) -> Iterable[str]:
|
||||
"""Return all setting keys."""
|
||||
return self.settings.keys()
|
||||
|
||||
def _prepare_portable_switch(self, use_portable: bool) -> None:
|
||||
"""Prepare switching the settings storage location."""
|
||||
|
||||
@@ -1099,7 +1200,12 @@ class SettingsManager:
|
||||
self._seed_template = None
|
||||
|
||||
def _serialize_settings_for_disk(self) -> Dict[str, Any]:
|
||||
"""Return the settings payload that should be persisted to disk."""
|
||||
"""Return the settings payload that should be persisted to disk.
|
||||
|
||||
Only saves settings that differ from defaults, keeping the config file
|
||||
clean and focused on user customizations. Default values are still
|
||||
available at runtime via _get_default_settings().
|
||||
"""
|
||||
|
||||
if self._bootstrap_reason == "missing":
|
||||
minimal: Dict[str, Any] = {}
|
||||
@@ -1113,7 +1219,25 @@ class SettingsManager:
|
||||
|
||||
return minimal
|
||||
|
||||
return copy.deepcopy(self.settings)
|
||||
# Only save settings that differ from defaults
|
||||
defaults = self._get_default_settings()
|
||||
minimal = {}
|
||||
|
||||
for key, value in self.settings.items():
|
||||
default_value = defaults.get(key)
|
||||
|
||||
# Core settings are always saved (even if equal to default)
|
||||
if key in CORE_USER_SETTING_KEYS:
|
||||
minimal[key] = copy.deepcopy(value)
|
||||
# Complex objects need deep comparison
|
||||
elif isinstance(value, (dict, list)) and default_value is not None:
|
||||
if json.dumps(value, sort_keys=True, default=str) != json.dumps(default_value, sort_keys=True, default=str):
|
||||
minimal[key] = copy.deepcopy(value)
|
||||
# Simple values use direct comparison
|
||||
elif value != default_value:
|
||||
minimal[key] = copy.deepcopy(value)
|
||||
|
||||
return minimal
|
||||
|
||||
def get_libraries(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Return a copy of the registered libraries."""
|
||||
|
||||
680
py/services/tag_fts_index.py
Normal file
680
py/services/tag_fts_index.py
Normal file
@@ -0,0 +1,680 @@
|
||||
"""SQLite FTS5-based full-text search index for tags.
|
||||
|
||||
This module provides fast tag search using SQLite's FTS5 extension,
|
||||
enabling sub-100ms search times for 221k+ Danbooru/e621 tags.
|
||||
|
||||
Supports alias search: when a user searches for an alias (e.g., "miku"),
|
||||
the system returns the canonical tag (e.g., "hatsune_miku") and indicates
|
||||
which alias was matched.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sqlite3
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set
|
||||
|
||||
from ..utils.cache_paths import CacheType, resolve_cache_path_with_migration
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Schema version for tracking migrations
|
||||
SCHEMA_VERSION = 2 # Version 2: Added aliases support
|
||||
|
||||
|
||||
# Category definitions for Danbooru and e621
|
||||
CATEGORY_NAMES = {
|
||||
# Danbooru categories
|
||||
0: "general",
|
||||
1: "artist",
|
||||
3: "copyright",
|
||||
4: "character",
|
||||
5: "meta",
|
||||
# e621 categories
|
||||
7: "general",
|
||||
8: "artist",
|
||||
10: "copyright",
|
||||
11: "character",
|
||||
12: "species",
|
||||
14: "meta",
|
||||
15: "lore",
|
||||
}
|
||||
|
||||
# Map category names to their IDs (for filtering)
|
||||
CATEGORY_NAME_TO_IDS = {
|
||||
"general": [0, 7],
|
||||
"artist": [1, 8],
|
||||
"copyright": [3, 10],
|
||||
"character": [4, 11],
|
||||
"meta": [5, 14],
|
||||
"species": [12],
|
||||
"lore": [15],
|
||||
}
|
||||
|
||||
|
||||
class TagFTSIndex:
|
||||
"""SQLite FTS5-based full-text search index for tags.
|
||||
|
||||
Provides fast prefix-based search across the Danbooru/e621 tag database.
|
||||
Supports category-based filtering and returns enriched results with
|
||||
post counts and category information.
|
||||
"""
|
||||
|
||||
_DEFAULT_FILENAME = "tag_fts.sqlite"
|
||||
_CSV_FILENAME = "danbooru_e621_merged.csv"
|
||||
|
||||
def __init__(self, db_path: Optional[str] = None, csv_path: Optional[str] = None) -> None:
|
||||
"""Initialize the FTS index.
|
||||
|
||||
Args:
|
||||
db_path: Optional path to the SQLite database file.
|
||||
If not provided, uses the default location in settings directory.
|
||||
csv_path: Optional path to the CSV file containing tag data.
|
||||
If not provided, looks in the refs/ directory.
|
||||
"""
|
||||
self._db_path = db_path or self._resolve_default_db_path()
|
||||
self._csv_path = csv_path or self._resolve_default_csv_path()
|
||||
self._lock = threading.Lock()
|
||||
self._ready = threading.Event()
|
||||
self._indexing_in_progress = False
|
||||
self._schema_initialized = False
|
||||
self._warned_not_ready = False
|
||||
|
||||
# Ensure directory exists
|
||||
try:
|
||||
directory = os.path.dirname(self._db_path)
|
||||
if directory:
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
except Exception as exc:
|
||||
logger.warning("Could not create FTS index directory %s: %s", directory, exc)
|
||||
|
||||
def _resolve_default_db_path(self) -> str:
|
||||
"""Resolve the default database path."""
|
||||
env_override = os.environ.get("LORA_MANAGER_TAG_FTS_DB")
|
||||
return resolve_cache_path_with_migration(
|
||||
CacheType.TAG_FTS,
|
||||
env_override=env_override,
|
||||
)
|
||||
|
||||
def _resolve_default_csv_path(self) -> str:
|
||||
"""Resolve the default CSV file path."""
|
||||
# Look for the CSV in the refs/ directory relative to the package
|
||||
package_dir = Path(__file__).parent.parent.parent
|
||||
csv_path = package_dir / "refs" / self._CSV_FILENAME
|
||||
return str(csv_path)
|
||||
|
||||
def get_database_path(self) -> str:
|
||||
"""Return the resolved database path."""
|
||||
return self._db_path
|
||||
|
||||
def get_csv_path(self) -> str:
|
||||
"""Return the resolved CSV path."""
|
||||
return self._csv_path
|
||||
|
||||
def is_ready(self) -> bool:
|
||||
"""Check if the FTS index is ready for queries."""
|
||||
return self._ready.is_set()
|
||||
|
||||
def is_indexing(self) -> bool:
|
||||
"""Check if indexing is currently in progress."""
|
||||
return self._indexing_in_progress
|
||||
|
||||
def initialize(self) -> None:
|
||||
"""Initialize the database schema."""
|
||||
if self._schema_initialized:
|
||||
return
|
||||
|
||||
with self._lock:
|
||||
if self._schema_initialized:
|
||||
return
|
||||
|
||||
try:
|
||||
conn = self._connect()
|
||||
try:
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
|
||||
# Check if we need to migrate from old schema
|
||||
needs_rebuild = self._check_and_migrate_schema(conn)
|
||||
|
||||
conn.executescript("""
|
||||
-- FTS5 virtual table for full-text search
|
||||
-- searchable_text contains "tag_name alias1 alias2 ..." for alias matching
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS tag_fts USING fts5(
|
||||
searchable_text,
|
||||
tokenize='unicode61 remove_diacritics 2'
|
||||
);
|
||||
|
||||
-- Tags table with metadata and aliases
|
||||
CREATE TABLE IF NOT EXISTS tags (
|
||||
rowid INTEGER PRIMARY KEY,
|
||||
tag_name TEXT UNIQUE NOT NULL,
|
||||
category INTEGER NOT NULL DEFAULT 0,
|
||||
post_count INTEGER NOT NULL DEFAULT 0,
|
||||
aliases TEXT DEFAULT ''
|
||||
);
|
||||
|
||||
-- Indexes for efficient filtering
|
||||
CREATE INDEX IF NOT EXISTS idx_tags_category ON tags(category);
|
||||
CREATE INDEX IF NOT EXISTS idx_tags_post_count ON tags(post_count DESC);
|
||||
|
||||
-- Index version tracking
|
||||
CREATE TABLE IF NOT EXISTS fts_metadata (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT
|
||||
);
|
||||
""")
|
||||
|
||||
# Set schema version
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO fts_metadata (key, value) VALUES (?, ?)",
|
||||
("schema_version", str(SCHEMA_VERSION))
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
self._schema_initialized = True
|
||||
self._needs_rebuild = needs_rebuild
|
||||
logger.debug("Tag FTS index schema initialized at %s", self._db_path)
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception as exc:
|
||||
logger.error("Failed to initialize tag FTS schema: %s", exc)
|
||||
|
||||
def _check_and_migrate_schema(self, conn: sqlite3.Connection) -> bool:
|
||||
"""Check schema version and migrate if necessary.
|
||||
|
||||
Returns:
|
||||
True if the index needs to be rebuilt, False otherwise.
|
||||
"""
|
||||
try:
|
||||
# Check if fts_metadata table exists
|
||||
cursor = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='fts_metadata'"
|
||||
)
|
||||
if not cursor.fetchone():
|
||||
return False # Fresh database, no migration needed
|
||||
|
||||
# Check schema version
|
||||
cursor = conn.execute(
|
||||
"SELECT value FROM fts_metadata WHERE key='schema_version'"
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row:
|
||||
# Old schema without version, needs rebuild
|
||||
logger.info("Migrating tag FTS index to schema version %d (adding alias support)", SCHEMA_VERSION)
|
||||
self._drop_old_tables(conn)
|
||||
return True
|
||||
|
||||
current_version = int(row[0])
|
||||
if current_version < SCHEMA_VERSION:
|
||||
logger.info("Migrating tag FTS index from version %d to %d", current_version, SCHEMA_VERSION)
|
||||
self._drop_old_tables(conn)
|
||||
return True
|
||||
|
||||
return False
|
||||
except Exception as exc:
|
||||
logger.warning("Error checking schema version: %s", exc)
|
||||
return False
|
||||
|
||||
def _drop_old_tables(self, conn: sqlite3.Connection) -> None:
|
||||
"""Drop old tables for schema migration."""
|
||||
try:
|
||||
conn.executescript("""
|
||||
DROP TABLE IF EXISTS tag_fts;
|
||||
DROP TABLE IF EXISTS tags;
|
||||
""")
|
||||
conn.commit()
|
||||
except Exception as exc:
|
||||
logger.warning("Error dropping old tables: %s", exc)
|
||||
|
||||
def build_index(self) -> None:
|
||||
"""Build the FTS index from the CSV file.
|
||||
|
||||
This method parses the danbooru_e621_merged.csv file and creates
|
||||
the FTS index for fast searching. The CSV format is:
|
||||
tag_name,category,post_count,aliases
|
||||
|
||||
Where aliases is a comma-separated string (e.g., "miku,vocaloid_miku,39").
|
||||
"""
|
||||
if self._indexing_in_progress:
|
||||
logger.warning("Tag FTS indexing already in progress, skipping")
|
||||
return
|
||||
|
||||
if not os.path.exists(self._csv_path):
|
||||
logger.warning("CSV file not found at %s, cannot build tag index", self._csv_path)
|
||||
return
|
||||
|
||||
self._indexing_in_progress = True
|
||||
self._ready.clear()
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
self.initialize()
|
||||
if not self._schema_initialized:
|
||||
logger.error("Cannot build tag FTS index: schema not initialized")
|
||||
return
|
||||
|
||||
with self._lock:
|
||||
conn = self._connect()
|
||||
try:
|
||||
conn.execute("BEGIN")
|
||||
|
||||
# Clear existing data
|
||||
conn.execute("DELETE FROM tag_fts")
|
||||
conn.execute("DELETE FROM tags")
|
||||
|
||||
# Parse CSV and insert in batches
|
||||
batch_size = 500
|
||||
rows = []
|
||||
total_inserted = 0
|
||||
tags_with_aliases = 0
|
||||
|
||||
with open(self._csv_path, "r", encoding="utf-8") as f:
|
||||
reader = csv.reader(f)
|
||||
for row in reader:
|
||||
if len(row) < 3:
|
||||
continue
|
||||
|
||||
tag_name = row[0].strip()
|
||||
if not tag_name:
|
||||
continue
|
||||
|
||||
try:
|
||||
category = int(row[1])
|
||||
except (ValueError, IndexError):
|
||||
category = 0
|
||||
|
||||
try:
|
||||
post_count = int(row[2])
|
||||
except (ValueError, IndexError):
|
||||
post_count = 0
|
||||
|
||||
# Parse aliases from column 4 (if present)
|
||||
aliases = row[3].strip() if len(row) >= 4 else ""
|
||||
if aliases:
|
||||
tags_with_aliases += 1
|
||||
|
||||
rows.append((tag_name, category, post_count, aliases))
|
||||
|
||||
if len(rows) >= batch_size:
|
||||
self._insert_batch(conn, rows)
|
||||
total_inserted += len(rows)
|
||||
rows = []
|
||||
|
||||
# Insert remaining rows
|
||||
if rows:
|
||||
self._insert_batch(conn, rows)
|
||||
total_inserted += len(rows)
|
||||
|
||||
# Update metadata
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO fts_metadata (key, value) VALUES (?, ?)",
|
||||
("last_build_time", str(time.time()))
|
||||
)
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO fts_metadata (key, value) VALUES (?, ?)",
|
||||
("tag_count", str(total_inserted))
|
||||
)
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO fts_metadata (key, value) VALUES (?, ?)",
|
||||
("schema_version", str(SCHEMA_VERSION))
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
elapsed = time.time() - start_time
|
||||
logger.info(
|
||||
"Tag FTS index built: %d tags indexed (%d with aliases) in %.2fs",
|
||||
total_inserted, tags_with_aliases, elapsed
|
||||
)
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
self._ready.set()
|
||||
|
||||
except Exception as exc:
|
||||
logger.error("Failed to build tag FTS index: %s", exc, exc_info=True)
|
||||
finally:
|
||||
self._indexing_in_progress = False
|
||||
|
||||
def _insert_batch(self, conn: sqlite3.Connection, rows: List[tuple]) -> None:
|
||||
"""Insert a batch of rows into the database.
|
||||
|
||||
Each row is a tuple of (tag_name, category, post_count, aliases).
|
||||
The FTS searchable_text is built as "tag_name alias1 alias2 ..." for alias matching.
|
||||
"""
|
||||
# Insert into tags table (with aliases)
|
||||
conn.executemany(
|
||||
"INSERT OR IGNORE INTO tags (tag_name, category, post_count, aliases) VALUES (?, ?, ?, ?)",
|
||||
rows
|
||||
)
|
||||
|
||||
# Build a map of tag_name -> aliases for FTS insertion
|
||||
aliases_map = {row[0]: row[3] for row in rows}
|
||||
|
||||
# Get rowids and insert into FTS table with explicit rowid
|
||||
# to ensure tags.rowid matches tag_fts.rowid for JOINs
|
||||
tag_names = [row[0] for row in rows]
|
||||
placeholders = ",".join("?" * len(tag_names))
|
||||
cursor = conn.execute(
|
||||
f"SELECT rowid, tag_name FROM tags WHERE tag_name IN ({placeholders})",
|
||||
tag_names
|
||||
)
|
||||
|
||||
# Build FTS rows with (rowid, searchable_text) = (tags.rowid, "tag_name alias1 alias2 ...")
|
||||
fts_rows = []
|
||||
for rowid, tag_name in cursor.fetchall():
|
||||
aliases = aliases_map.get(tag_name, "")
|
||||
if aliases:
|
||||
# Replace commas with spaces to create searchable text
|
||||
# Strip "/" prefix from aliases as it's an FTS5 special character
|
||||
alias_parts = []
|
||||
for alias in aliases.split(","):
|
||||
alias = alias.strip()
|
||||
if alias.startswith("/"):
|
||||
alias = alias[1:] # Remove leading slash
|
||||
if alias:
|
||||
alias_parts.append(alias)
|
||||
searchable_text = f"{tag_name} {' '.join(alias_parts)}" if alias_parts else tag_name
|
||||
else:
|
||||
searchable_text = tag_name
|
||||
fts_rows.append((rowid, searchable_text))
|
||||
|
||||
if fts_rows:
|
||||
conn.executemany("INSERT INTO tag_fts (rowid, searchable_text) VALUES (?, ?)", fts_rows)
|
||||
|
||||
def ensure_ready(self) -> bool:
|
||||
"""Ensure the index is ready, building if necessary.
|
||||
|
||||
Returns:
|
||||
True if the index is ready, False otherwise.
|
||||
"""
|
||||
if self.is_ready():
|
||||
return True
|
||||
|
||||
# Check if index already exists and has data
|
||||
self.initialize()
|
||||
if self._schema_initialized:
|
||||
# Check if schema migration requires rebuild
|
||||
if getattr(self, "_needs_rebuild", False):
|
||||
logger.info("Schema migration requires index rebuild")
|
||||
self._needs_rebuild = False
|
||||
self.build_index()
|
||||
return self.is_ready()
|
||||
|
||||
count = self.get_indexed_count()
|
||||
if count > 0:
|
||||
self._ready.set()
|
||||
logger.debug("Tag FTS index already populated with %d tags", count)
|
||||
return True
|
||||
|
||||
# Build the index
|
||||
self.build_index()
|
||||
return self.is_ready()
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
categories: Optional[List[int]] = None,
|
||||
limit: int = 20
|
||||
) -> List[Dict]:
|
||||
"""Search tags using FTS5 with prefix matching.
|
||||
|
||||
Supports alias search: if the query matches an alias rather than
|
||||
the tag_name, the result will include a "matched_alias" field.
|
||||
|
||||
Args:
|
||||
query: The search query string.
|
||||
categories: Optional list of category IDs to filter by.
|
||||
limit: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of dictionaries with tag_name, category, post_count,
|
||||
and optionally matched_alias.
|
||||
"""
|
||||
# Ensure index is ready (lazy initialization)
|
||||
if not self.ensure_ready():
|
||||
if not self._warned_not_ready:
|
||||
logger.debug("Tag FTS index not ready, returning empty results")
|
||||
self._warned_not_ready = True
|
||||
return []
|
||||
|
||||
if not query or not query.strip():
|
||||
return []
|
||||
|
||||
fts_query = self._build_fts_query(query)
|
||||
if not fts_query:
|
||||
return []
|
||||
|
||||
try:
|
||||
with self._lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
# Build the SQL query - now also fetch aliases for matched_alias detection
|
||||
# Use subquery for category filter to ensure FTS is evaluated first
|
||||
if categories:
|
||||
placeholders = ",".join("?" * len(categories))
|
||||
sql = f"""
|
||||
SELECT t.tag_name, t.category, t.post_count, t.aliases
|
||||
FROM tags t
|
||||
WHERE t.rowid IN (
|
||||
SELECT rowid FROM tag_fts WHERE searchable_text MATCH ?
|
||||
)
|
||||
AND t.category IN ({placeholders})
|
||||
ORDER BY t.post_count DESC
|
||||
LIMIT ?
|
||||
"""
|
||||
params = [fts_query] + categories + [limit]
|
||||
else:
|
||||
sql = """
|
||||
SELECT t.tag_name, t.category, t.post_count, t.aliases
|
||||
FROM tag_fts f
|
||||
JOIN tags t ON f.rowid = t.rowid
|
||||
WHERE f.searchable_text MATCH ?
|
||||
ORDER BY t.post_count DESC
|
||||
LIMIT ?
|
||||
"""
|
||||
params = [fts_query, limit]
|
||||
|
||||
cursor = conn.execute(sql, params)
|
||||
results = []
|
||||
for row in cursor.fetchall():
|
||||
result = {
|
||||
"tag_name": row[0],
|
||||
"category": row[1],
|
||||
"post_count": row[2],
|
||||
}
|
||||
|
||||
# Check if search matched an alias rather than the tag_name
|
||||
matched_alias = self._find_matched_alias(query, row[0], row[3])
|
||||
if matched_alias:
|
||||
result["matched_alias"] = matched_alias
|
||||
|
||||
results.append(result)
|
||||
return results
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception as exc:
|
||||
logger.debug("Tag FTS search error for query '%s': %s", query, exc)
|
||||
return []
|
||||
|
||||
def _find_matched_alias(self, query: str, tag_name: str, aliases_str: str) -> Optional[str]:
|
||||
"""Find which alias matched the query, if any.
|
||||
|
||||
Args:
|
||||
query: The original search query.
|
||||
tag_name: The canonical tag name.
|
||||
aliases_str: Comma-separated string of aliases.
|
||||
|
||||
Returns:
|
||||
The matched alias string, or None if the query matched the tag_name directly.
|
||||
"""
|
||||
query_lower = query.lower().strip()
|
||||
if not query_lower:
|
||||
return None
|
||||
|
||||
# Strip leading "/" from query if present (FTS index strips these)
|
||||
query_normalized = query_lower.lstrip("/")
|
||||
|
||||
# Check if query matches tag_name prefix (direct match, no alias needed)
|
||||
if tag_name.lower().startswith(query_normalized):
|
||||
return None
|
||||
|
||||
# Check aliases first - if query matches an alias or a word within an alias, return it
|
||||
if aliases_str:
|
||||
for alias in aliases_str.split(","):
|
||||
alias = alias.strip()
|
||||
if not alias:
|
||||
continue
|
||||
# Normalize alias for comparison (strip leading slash)
|
||||
alias_normalized = alias.lower().lstrip("/")
|
||||
|
||||
# Check if alias starts with query
|
||||
if alias_normalized.startswith(query_normalized):
|
||||
return alias # Return original alias (with "/" if present)
|
||||
|
||||
# Check if any word within the alias starts with query
|
||||
# (mirrors FTS5 tokenization which splits on underscores)
|
||||
alias_words = alias_normalized.replace("_", " ").split()
|
||||
for word in alias_words:
|
||||
if word.startswith(query_normalized):
|
||||
return alias
|
||||
|
||||
# If no alias matched, check if query matches a word in tag_name
|
||||
# (handles cases like "long_hair" matching "long" - no alias indicator needed)
|
||||
tag_words = tag_name.lower().replace("_", " ").split()
|
||||
for word in tag_words:
|
||||
if word.startswith(query_normalized):
|
||||
return None
|
||||
|
||||
# Query matched via FTS but not tag_name words or aliases
|
||||
# This shouldn't normally happen, but return None for safety
|
||||
return None
|
||||
|
||||
def get_indexed_count(self) -> int:
|
||||
"""Return the number of tags currently indexed."""
|
||||
if not self._schema_initialized:
|
||||
return 0
|
||||
|
||||
try:
|
||||
with self._lock:
|
||||
conn = self._connect(readonly=True)
|
||||
try:
|
||||
cursor = conn.execute("SELECT COUNT(*) FROM tags")
|
||||
result = cursor.fetchone()
|
||||
return result[0] if result else 0
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
def clear(self) -> bool:
|
||||
"""Clear all data from the FTS index.
|
||||
|
||||
Returns:
|
||||
True if successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
with self._lock:
|
||||
conn = self._connect()
|
||||
try:
|
||||
conn.execute("DELETE FROM tag_fts")
|
||||
conn.execute("DELETE FROM tags")
|
||||
conn.commit()
|
||||
self._ready.clear()
|
||||
return True
|
||||
finally:
|
||||
conn.close()
|
||||
except Exception as exc:
|
||||
logger.error("Failed to clear tag FTS index: %s", exc)
|
||||
return False
|
||||
|
||||
# Internal helpers
|
||||
|
||||
def _connect(self, readonly: bool = False) -> sqlite3.Connection:
|
||||
"""Create a database connection."""
|
||||
uri = False
|
||||
path = self._db_path
|
||||
if readonly:
|
||||
if not os.path.exists(path):
|
||||
raise FileNotFoundError(path)
|
||||
path = f"file:{path}?mode=ro"
|
||||
uri = True
|
||||
conn = sqlite3.connect(path, check_same_thread=False, uri=uri)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
def _build_fts_query(self, query: str) -> str:
|
||||
"""Build an FTS5 query string with prefix matching.
|
||||
|
||||
Args:
|
||||
query: The user's search query.
|
||||
|
||||
Returns:
|
||||
FTS5 query string.
|
||||
"""
|
||||
# Split query into words and clean them
|
||||
words = query.lower().split()
|
||||
if not words:
|
||||
return ""
|
||||
|
||||
# Escape and add prefix wildcard to each word
|
||||
prefix_terms = []
|
||||
for word in words:
|
||||
escaped = self._escape_fts_query(word)
|
||||
if escaped:
|
||||
# Add prefix wildcard for substring-like matching
|
||||
prefix_terms.append(f"{escaped}*")
|
||||
|
||||
if not prefix_terms:
|
||||
return ""
|
||||
|
||||
# Combine terms with implicit AND (all words must match)
|
||||
return " ".join(prefix_terms)
|
||||
|
||||
def _escape_fts_query(self, text: str) -> str:
|
||||
"""Escape special FTS5 characters.
|
||||
|
||||
FTS5 special characters: " ( ) * : ^ - /
|
||||
We keep * for prefix matching but escape others.
|
||||
"""
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
# Replace FTS5 special characters with space
|
||||
# Note: "/" is special in FTS5 (column filter syntax), so we strip it
|
||||
special = ['"', "(", ")", "*", ":", "^", "-", "{", "}", "[", "]", "/"]
|
||||
result = text
|
||||
for char in special:
|
||||
result = result.replace(char, " ")
|
||||
|
||||
# Collapse multiple spaces and strip
|
||||
result = re.sub(r"\s+", " ", result).strip()
|
||||
return result
|
||||
|
||||
|
||||
# Singleton instance
|
||||
_tag_fts_index: Optional[TagFTSIndex] = None
|
||||
_tag_fts_lock = threading.Lock()
|
||||
|
||||
|
||||
def get_tag_fts_index() -> TagFTSIndex:
|
||||
"""Get the singleton TagFTSIndex instance."""
|
||||
global _tag_fts_index
|
||||
if _tag_fts_index is None:
|
||||
with _tag_fts_lock:
|
||||
if _tag_fts_index is None:
|
||||
_tag_fts_index = TagFTSIndex()
|
||||
return _tag_fts_index
|
||||
|
||||
|
||||
__all__ = [
|
||||
"TagFTSIndex",
|
||||
"get_tag_fts_index",
|
||||
"CATEGORY_NAMES",
|
||||
"CATEGORY_NAME_TO_IDS",
|
||||
]
|
||||
@@ -3,7 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, Optional, Protocol, Sequence
|
||||
from typing import Any, Dict, List, Optional, Protocol, Sequence
|
||||
|
||||
from ..metadata_sync_service import MetadataSyncService
|
||||
from ...utils.metadata_manager import MetadataManager
|
||||
@@ -43,14 +43,22 @@ class BulkMetadataRefreshUseCase:
|
||||
total_models = len(cache.raw_data)
|
||||
|
||||
enable_metadata_archive_db = self._settings.get("enable_metadata_archive_db", False)
|
||||
skip_paths = self._settings.get("metadata_refresh_skip_paths", [])
|
||||
to_process: Sequence[Dict[str, Any]] = [
|
||||
model
|
||||
for model in cache.raw_data
|
||||
if model.get("sha256")
|
||||
and not model.get("skip_metadata_refresh", False)
|
||||
and not self._is_in_skip_path(model.get("folder", ""), skip_paths)
|
||||
and (not model.get("civitai") or not model["civitai"].get("id"))
|
||||
and (
|
||||
(enable_metadata_archive_db and not model.get("db_checked", False))
|
||||
or (not enable_metadata_archive_db and model.get("from_civitai") is True)
|
||||
and not (
|
||||
# Skip models confirmed not on CivitAI when no need to retry
|
||||
model.get("from_civitai") is False
|
||||
and model.get("civitai_deleted") is True
|
||||
and (
|
||||
not enable_metadata_archive_db
|
||||
or model.get("db_checked", False)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
@@ -115,6 +123,21 @@ class BulkMetadataRefreshUseCase:
|
||||
|
||||
return {"success": True, "message": message, "processed": processed, "updated": success, "total": total_models}
|
||||
|
||||
@staticmethod
|
||||
def _is_in_skip_path(folder: str, skip_paths: List[str]) -> bool:
|
||||
if not skip_paths or not folder:
|
||||
return False
|
||||
normalized = folder.replace("\\", "/").strip("/")
|
||||
if not normalized:
|
||||
return False
|
||||
for sp in skip_paths:
|
||||
nsp = sp.replace("\\", "/").strip("/")
|
||||
if not nsp:
|
||||
continue
|
||||
if normalized == nsp or normalized.startswith(nsp + "/"):
|
||||
return True
|
||||
return False
|
||||
|
||||
async def execute_with_error_handling(
|
||||
self,
|
||||
*,
|
||||
|
||||
@@ -255,6 +255,42 @@ class WebSocketManager:
|
||||
self._download_progress.pop(download_id, None)
|
||||
logger.debug(f"Cleaned up old download progress for {download_id}")
|
||||
|
||||
async def broadcast_cache_health_warning(self, report: 'HealthReport', page_type: str = None):
|
||||
"""
|
||||
Broadcast cache health warning to frontend.
|
||||
|
||||
Args:
|
||||
report: HealthReport instance from CacheHealthMonitor
|
||||
page_type: The page type (loras, checkpoints, embeddings)
|
||||
"""
|
||||
from .cache_health_monitor import CacheHealthStatus
|
||||
|
||||
# Only broadcast if there are issues
|
||||
if report.status == CacheHealthStatus.HEALTHY:
|
||||
return
|
||||
|
||||
payload = {
|
||||
'type': 'cache_health_warning',
|
||||
'status': report.status.value,
|
||||
'message': report.message,
|
||||
'pageType': page_type,
|
||||
'details': {
|
||||
'total': report.total_entries,
|
||||
'valid': report.valid_entries,
|
||||
'invalid': report.invalid_entries,
|
||||
'repaired': report.repaired_entries,
|
||||
'corruption_rate': f"{report.corruption_rate:.1%}",
|
||||
'invalid_paths': report.invalid_paths[:5], # Limit to first 5
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Broadcasting cache health warning: {report.status.value} "
|
||||
f"({report.invalid_entries} invalid entries)"
|
||||
)
|
||||
|
||||
await self.broadcast(payload)
|
||||
|
||||
def get_connected_clients_count(self) -> int:
|
||||
"""Get number of connected clients"""
|
||||
return len(self._websockets)
|
||||
|
||||
421
py/utils/cache_paths.py
Normal file
421
py/utils/cache_paths.py
Normal file
@@ -0,0 +1,421 @@
|
||||
"""Centralized cache path resolution with automatic migration support.
|
||||
|
||||
This module provides a unified interface for resolving cache file paths,
|
||||
with automatic migration from legacy locations to the new organized
|
||||
cache directory structure.
|
||||
|
||||
Target structure:
|
||||
{settings_dir}/
|
||||
└── cache/
|
||||
├── symlink/
|
||||
│ └── symlink_map.json
|
||||
├── model/
|
||||
│ └── {library_name}.sqlite
|
||||
├── recipe/
|
||||
│ └── {library_name}.sqlite
|
||||
└── fts/
|
||||
├── recipe_fts.sqlite
|
||||
└── tag_fts.sqlite
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
from .settings_paths import get_project_root, get_settings_dir
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CacheType(Enum):
|
||||
"""Types of cache files managed by the cache path resolver."""
|
||||
|
||||
MODEL = "model"
|
||||
RECIPE = "recipe"
|
||||
RECIPE_FTS = "recipe_fts"
|
||||
TAG_FTS = "tag_fts"
|
||||
SYMLINK = "symlink"
|
||||
|
||||
|
||||
# Subdirectory structure for each cache type
|
||||
_CACHE_SUBDIRS = {
|
||||
CacheType.MODEL: "model",
|
||||
CacheType.RECIPE: "recipe",
|
||||
CacheType.RECIPE_FTS: "fts",
|
||||
CacheType.TAG_FTS: "fts",
|
||||
CacheType.SYMLINK: "symlink",
|
||||
}
|
||||
|
||||
# Filename patterns for each cache type
|
||||
_CACHE_FILENAMES = {
|
||||
CacheType.MODEL: "{library_name}.sqlite",
|
||||
CacheType.RECIPE: "{library_name}.sqlite",
|
||||
CacheType.RECIPE_FTS: "recipe_fts.sqlite",
|
||||
CacheType.TAG_FTS: "tag_fts.sqlite",
|
||||
CacheType.SYMLINK: "symlink_map.json",
|
||||
}
|
||||
|
||||
|
||||
def get_cache_base_dir(create: bool = True) -> str:
|
||||
"""Return the base cache directory path.
|
||||
|
||||
Args:
|
||||
create: Whether to create the directory if it does not exist.
|
||||
|
||||
Returns:
|
||||
The absolute path to the cache base directory ({settings_dir}/cache/).
|
||||
"""
|
||||
settings_dir = get_settings_dir(create=create)
|
||||
cache_dir = os.path.join(settings_dir, "cache")
|
||||
if create:
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
return cache_dir
|
||||
|
||||
|
||||
def _sanitize_library_name(library_name: Optional[str]) -> str:
|
||||
"""Sanitize a library name for use in filenames.
|
||||
|
||||
Args:
|
||||
library_name: The library name to sanitize.
|
||||
|
||||
Returns:
|
||||
A sanitized version safe for use in filenames.
|
||||
"""
|
||||
name = library_name or "default"
|
||||
return re.sub(r"[^A-Za-z0-9_.-]", "_", name)
|
||||
|
||||
|
||||
def get_cache_file_path(
|
||||
cache_type: CacheType,
|
||||
library_name: Optional[str] = None,
|
||||
create_dir: bool = True,
|
||||
) -> str:
|
||||
"""Get the canonical path for a cache file.
|
||||
|
||||
Args:
|
||||
cache_type: The type of cache file.
|
||||
library_name: The library name (only used for MODEL and RECIPE types).
|
||||
create_dir: Whether to create the parent directory if it does not exist.
|
||||
|
||||
Returns:
|
||||
The absolute path to the cache file in its canonical location.
|
||||
"""
|
||||
cache_base = get_cache_base_dir(create=create_dir)
|
||||
subdir = _CACHE_SUBDIRS[cache_type]
|
||||
cache_dir = os.path.join(cache_base, subdir)
|
||||
|
||||
if create_dir:
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
|
||||
filename_template = _CACHE_FILENAMES[cache_type]
|
||||
safe_name = _sanitize_library_name(library_name)
|
||||
filename = filename_template.format(library_name=safe_name)
|
||||
|
||||
return os.path.join(cache_dir, filename)
|
||||
|
||||
|
||||
def get_legacy_cache_paths(
|
||||
cache_type: CacheType,
|
||||
library_name: Optional[str] = None,
|
||||
) -> List[str]:
|
||||
"""Get a list of legacy cache file paths to check for migration.
|
||||
|
||||
The paths are returned in order of priority (most recent first).
|
||||
|
||||
Args:
|
||||
cache_type: The type of cache file.
|
||||
library_name: The library name (only used for MODEL and RECIPE types).
|
||||
|
||||
Returns:
|
||||
A list of potential legacy paths to check, in order of preference.
|
||||
"""
|
||||
try:
|
||||
settings_dir = get_settings_dir(create=False)
|
||||
except Exception:
|
||||
settings_dir = get_project_root()
|
||||
|
||||
safe_name = _sanitize_library_name(library_name)
|
||||
legacy_paths: List[str] = []
|
||||
|
||||
if cache_type == CacheType.MODEL:
|
||||
# Legacy per-library path: {settings_dir}/model_cache/{library}.sqlite
|
||||
legacy_paths.append(
|
||||
os.path.join(settings_dir, "model_cache", f"{safe_name}.sqlite")
|
||||
)
|
||||
# Legacy root-level single cache (for "default" library only)
|
||||
if safe_name.lower() in ("default", ""):
|
||||
legacy_paths.append(os.path.join(settings_dir, "model_cache.sqlite"))
|
||||
|
||||
elif cache_type == CacheType.RECIPE:
|
||||
# Legacy per-library path: {settings_dir}/recipe_cache/{library}.sqlite
|
||||
legacy_paths.append(
|
||||
os.path.join(settings_dir, "recipe_cache", f"{safe_name}.sqlite")
|
||||
)
|
||||
# Legacy root-level single cache (for "default" library only)
|
||||
if safe_name.lower() in ("default", ""):
|
||||
legacy_paths.append(os.path.join(settings_dir, "recipe_cache.sqlite"))
|
||||
|
||||
elif cache_type == CacheType.RECIPE_FTS:
|
||||
# Legacy root-level path
|
||||
legacy_paths.append(os.path.join(settings_dir, "recipe_fts.sqlite"))
|
||||
|
||||
elif cache_type == CacheType.TAG_FTS:
|
||||
# Legacy root-level path
|
||||
legacy_paths.append(os.path.join(settings_dir, "tag_fts.sqlite"))
|
||||
|
||||
elif cache_type == CacheType.SYMLINK:
|
||||
# Current location in cache/ but without subdirectory
|
||||
legacy_paths.append(
|
||||
os.path.join(settings_dir, "cache", "symlink_map.json")
|
||||
)
|
||||
|
||||
return legacy_paths
|
||||
|
||||
|
||||
def _cleanup_legacy_file_after_migration(
|
||||
legacy_path: str,
|
||||
canonical_path: str,
|
||||
) -> bool:
|
||||
"""Safely remove a legacy file after successful migration.
|
||||
|
||||
Args:
|
||||
legacy_path: The legacy file path to remove.
|
||||
canonical_path: The canonical path where the file was copied to.
|
||||
|
||||
Returns:
|
||||
True if cleanup succeeded, False otherwise.
|
||||
"""
|
||||
try:
|
||||
if not os.path.exists(canonical_path):
|
||||
logger.warning(
|
||||
"Skipping cleanup of %s: canonical file not found at %s",
|
||||
legacy_path,
|
||||
canonical_path,
|
||||
)
|
||||
return False
|
||||
|
||||
legacy_size = os.path.getsize(legacy_path)
|
||||
canonical_size = os.path.getsize(canonical_path)
|
||||
if legacy_size != canonical_size:
|
||||
logger.warning(
|
||||
"Skipping cleanup of %s: file size mismatch (legacy=%d, canonical=%d)",
|
||||
legacy_path,
|
||||
legacy_size,
|
||||
canonical_size,
|
||||
)
|
||||
return False
|
||||
|
||||
os.remove(legacy_path)
|
||||
logger.info("Cleaned up legacy cache file: %s", legacy_path)
|
||||
|
||||
_cleanup_empty_legacy_directories(legacy_path)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to cleanup legacy cache file %s: %s",
|
||||
legacy_path,
|
||||
exc,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _cleanup_empty_legacy_directories(legacy_path: str) -> None:
|
||||
"""Remove empty parent directories of a legacy file.
|
||||
|
||||
This function only removes directories if they are empty,
|
||||
using os.rmdir() which fails on non-empty directories.
|
||||
|
||||
Args:
|
||||
legacy_path: The legacy file path whose parent directories should be cleaned.
|
||||
"""
|
||||
try:
|
||||
parent_dir = os.path.dirname(legacy_path)
|
||||
|
||||
legacy_dir_names = ("model_cache", "recipe_cache")
|
||||
|
||||
current = parent_dir
|
||||
while current:
|
||||
base_name = os.path.basename(current)
|
||||
|
||||
if base_name in legacy_dir_names:
|
||||
if os.path.isdir(current) and not os.listdir(current):
|
||||
try:
|
||||
os.rmdir(current)
|
||||
logger.info("Removed empty legacy directory: %s", current)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
parent = os.path.dirname(current)
|
||||
if parent == current:
|
||||
break
|
||||
current = parent
|
||||
|
||||
except Exception as exc:
|
||||
logger.debug("Failed to cleanup empty legacy directories: %s", exc)
|
||||
|
||||
|
||||
def resolve_cache_path_with_migration(
|
||||
cache_type: CacheType,
|
||||
library_name: Optional[str] = None,
|
||||
env_override: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Resolve the cache file path, migrating from legacy locations if needed.
|
||||
|
||||
This function performs lazy migration: on first access, it checks if the
|
||||
file exists at the canonical location. If not, it looks for legacy files
|
||||
and copies them to the new location. After successful migration, the
|
||||
legacy file is automatically removed.
|
||||
|
||||
Args:
|
||||
cache_type: The type of cache file.
|
||||
library_name: The library name (only used for MODEL and RECIPE types).
|
||||
env_override: Optional environment variable value that overrides all
|
||||
path resolution. When set, returns this path directly without
|
||||
any migration.
|
||||
|
||||
Returns:
|
||||
The resolved path to use for the cache file.
|
||||
"""
|
||||
# Environment override bypasses all migration logic
|
||||
if env_override:
|
||||
return env_override
|
||||
|
||||
canonical_path = get_cache_file_path(cache_type, library_name, create_dir=True)
|
||||
|
||||
# If file already exists at canonical location, use it
|
||||
if os.path.exists(canonical_path):
|
||||
return canonical_path
|
||||
|
||||
# Check legacy paths for migration
|
||||
legacy_paths = get_legacy_cache_paths(cache_type, library_name)
|
||||
|
||||
for legacy_path in legacy_paths:
|
||||
if os.path.exists(legacy_path):
|
||||
try:
|
||||
shutil.copy2(legacy_path, canonical_path)
|
||||
logger.info(
|
||||
"Migrated %s cache from %s to %s",
|
||||
cache_type.value,
|
||||
legacy_path,
|
||||
canonical_path,
|
||||
)
|
||||
|
||||
_cleanup_legacy_file_after_migration(legacy_path, canonical_path)
|
||||
|
||||
return canonical_path
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Failed to migrate %s cache from %s: %s",
|
||||
cache_type.value,
|
||||
legacy_path,
|
||||
exc,
|
||||
)
|
||||
|
||||
# No legacy file found; return canonical path (will be created fresh)
|
||||
return canonical_path
|
||||
|
||||
|
||||
def get_legacy_cache_files_for_cleanup() -> List[str]:
|
||||
"""Get a list of legacy cache files that can be removed after migration.
|
||||
|
||||
This function returns files that exist in legacy locations and have
|
||||
corresponding files in the new canonical locations.
|
||||
|
||||
Returns:
|
||||
A list of legacy file paths that are safe to remove.
|
||||
"""
|
||||
files_to_remove: List[str] = []
|
||||
|
||||
try:
|
||||
settings_dir = get_settings_dir(create=False)
|
||||
except Exception:
|
||||
return files_to_remove
|
||||
|
||||
# Check each cache type for migrated legacy files
|
||||
for cache_type in CacheType:
|
||||
# For MODEL and RECIPE, we need to check each library
|
||||
if cache_type in (CacheType.MODEL, CacheType.RECIPE):
|
||||
# Check default library
|
||||
_check_legacy_for_cleanup(cache_type, "default", files_to_remove)
|
||||
# Check for any per-library caches in legacy directories
|
||||
legacy_dir_name = "model_cache" if cache_type == CacheType.MODEL else "recipe_cache"
|
||||
legacy_dir = os.path.join(settings_dir, legacy_dir_name)
|
||||
if os.path.isdir(legacy_dir):
|
||||
try:
|
||||
for filename in os.listdir(legacy_dir):
|
||||
if filename.endswith(".sqlite"):
|
||||
library_name = filename[:-7] # Remove .sqlite
|
||||
_check_legacy_for_cleanup(cache_type, library_name, files_to_remove)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
_check_legacy_for_cleanup(cache_type, None, files_to_remove)
|
||||
|
||||
return files_to_remove
|
||||
|
||||
|
||||
def _check_legacy_for_cleanup(
|
||||
cache_type: CacheType,
|
||||
library_name: Optional[str],
|
||||
files_to_remove: List[str],
|
||||
) -> None:
|
||||
"""Check if a legacy cache file can be removed after migration.
|
||||
|
||||
Args:
|
||||
cache_type: The type of cache file.
|
||||
library_name: The library name (only used for MODEL and RECIPE types).
|
||||
files_to_remove: List to append removable files to.
|
||||
"""
|
||||
canonical_path = get_cache_file_path(cache_type, library_name, create_dir=False)
|
||||
if not os.path.exists(canonical_path):
|
||||
return
|
||||
|
||||
legacy_paths = get_legacy_cache_paths(cache_type, library_name)
|
||||
for legacy_path in legacy_paths:
|
||||
if os.path.exists(legacy_path) and legacy_path not in files_to_remove:
|
||||
files_to_remove.append(legacy_path)
|
||||
|
||||
|
||||
def cleanup_legacy_cache_files(dry_run: bool = True) -> List[str]:
|
||||
"""Remove legacy cache files that have been migrated.
|
||||
|
||||
Args:
|
||||
dry_run: If True, only return the list of files that would be removed
|
||||
without actually removing them.
|
||||
|
||||
Returns:
|
||||
A list of files that were (or would be) removed.
|
||||
"""
|
||||
files = get_legacy_cache_files_for_cleanup()
|
||||
|
||||
if dry_run or not files:
|
||||
return files
|
||||
|
||||
removed: List[str] = []
|
||||
for file_path in files:
|
||||
try:
|
||||
os.remove(file_path)
|
||||
removed.append(file_path)
|
||||
logger.info("Removed legacy cache file: %s", file_path)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to remove legacy cache file %s: %s", file_path, exc)
|
||||
|
||||
# Try to remove empty legacy directories
|
||||
try:
|
||||
settings_dir = get_settings_dir(create=False)
|
||||
for legacy_dir_name in ("model_cache", "recipe_cache"):
|
||||
legacy_dir = os.path.join(settings_dir, legacy_dir_name)
|
||||
if os.path.isdir(legacy_dir) and not os.listdir(legacy_dir):
|
||||
os.rmdir(legacy_dir)
|
||||
logger.info("Removed empty legacy directory: %s", legacy_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return removed
|
||||
@@ -45,8 +45,13 @@ SUPPORTED_MEDIA_EXTENSIONS = {
|
||||
"videos": [".mp4", ".webm"],
|
||||
}
|
||||
|
||||
# Valid Lora types
|
||||
VALID_LORA_TYPES = ["lora", "locon", "dora"]
|
||||
# Valid sub-types for each scanner type
|
||||
VALID_LORA_SUB_TYPES = ["lora", "locon", "dora"]
|
||||
VALID_CHECKPOINT_SUB_TYPES = ["checkpoint", "diffusion_model"]
|
||||
VALID_EMBEDDING_SUB_TYPES = ["embedding"]
|
||||
|
||||
# Backward compatibility alias
|
||||
VALID_LORA_TYPES = VALID_LORA_SUB_TYPES
|
||||
|
||||
# Supported Civitai model types for user model queries (case-insensitive)
|
||||
CIVITAI_USER_MODEL_TYPES = [
|
||||
|
||||
@@ -121,101 +121,71 @@ class DownloadManager:
|
||||
async def start_download(self, options: dict):
|
||||
"""Start downloading example images for models."""
|
||||
|
||||
# Step 1: Parse options (fast, non-blocking)
|
||||
data = options or {}
|
||||
auto_mode = data.get("auto_mode", False)
|
||||
optimize = data.get("optimize", True)
|
||||
model_types = data.get("model_types", ["lora", "checkpoint"])
|
||||
delay = float(data.get("delay", 0.2))
|
||||
force = data.get("force", False)
|
||||
|
||||
# Step 2: Validate configuration (fast lookup)
|
||||
settings_manager = get_settings_manager()
|
||||
base_path = settings_manager.get("example_images_path")
|
||||
|
||||
if not base_path:
|
||||
error_msg = "Example images path not configured in settings"
|
||||
if auto_mode:
|
||||
logger.debug(error_msg)
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Example images path not configured, skipping auto download",
|
||||
}
|
||||
raise DownloadConfigurationError(error_msg)
|
||||
|
||||
active_library = settings_manager.get_active_library_name()
|
||||
output_dir = self._resolve_output_dir(active_library)
|
||||
if not output_dir:
|
||||
raise DownloadConfigurationError(
|
||||
"Example images path not configured in settings"
|
||||
)
|
||||
|
||||
# Step 3: Load progress file (I/O operation, done outside lock)
|
||||
processed_models = set()
|
||||
failed_models = set()
|
||||
|
||||
try:
|
||||
progress_file, processed_models, failed_models = await self._load_progress_file(output_dir)
|
||||
logger.debug(
|
||||
"Loaded previous progress, %s models already processed, %s models marked as failed",
|
||||
len(processed_models),
|
||||
len(failed_models),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load progress file: {e}")
|
||||
# Continue with empty sets
|
||||
|
||||
# Step 4: Quick state check and update (minimal lock time)
|
||||
async with self._state_lock:
|
||||
if self._is_downloading:
|
||||
raise DownloadInProgressError(self._progress.snapshot())
|
||||
|
||||
try:
|
||||
data = options or {}
|
||||
auto_mode = data.get("auto_mode", False)
|
||||
optimize = data.get("optimize", True)
|
||||
model_types = data.get("model_types", ["lora", "checkpoint"])
|
||||
delay = float(data.get("delay", 0.2))
|
||||
force = data.get("force", False)
|
||||
|
||||
settings_manager = get_settings_manager()
|
||||
base_path = settings_manager.get("example_images_path")
|
||||
|
||||
if not base_path:
|
||||
error_msg = "Example images path not configured in settings"
|
||||
if auto_mode:
|
||||
logger.debug(error_msg)
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Example images path not configured, skipping auto download",
|
||||
}
|
||||
raise DownloadConfigurationError(error_msg)
|
||||
|
||||
active_library = get_settings_manager().get_active_library_name()
|
||||
output_dir = self._resolve_output_dir(active_library)
|
||||
if not output_dir:
|
||||
raise DownloadConfigurationError(
|
||||
"Example images path not configured in settings"
|
||||
)
|
||||
|
||||
# Reset progress with loaded data
|
||||
self._progress.reset()
|
||||
self._progress["processed_models"] = processed_models
|
||||
self._progress["failed_models"] = failed_models
|
||||
self._stop_requested = False
|
||||
self._progress["status"] = "running"
|
||||
self._progress["start_time"] = time.time()
|
||||
self._progress["end_time"] = None
|
||||
|
||||
progress_file = os.path.join(output_dir, ".download_progress.json")
|
||||
progress_source = progress_file
|
||||
if uses_library_scoped_folders():
|
||||
legacy_root = (
|
||||
get_settings_manager().get("example_images_path") or ""
|
||||
)
|
||||
legacy_progress = (
|
||||
os.path.join(legacy_root, ".download_progress.json")
|
||||
if legacy_root
|
||||
else ""
|
||||
)
|
||||
if (
|
||||
legacy_progress
|
||||
and os.path.exists(legacy_progress)
|
||||
and not os.path.exists(progress_file)
|
||||
):
|
||||
try:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
shutil.move(legacy_progress, progress_file)
|
||||
logger.info(
|
||||
"Migrated legacy download progress file '%s' to '%s'",
|
||||
legacy_progress,
|
||||
progress_file,
|
||||
)
|
||||
except OSError as exc:
|
||||
logger.warning(
|
||||
"Failed to migrate download progress file from '%s' to '%s': %s",
|
||||
legacy_progress,
|
||||
progress_file,
|
||||
exc,
|
||||
)
|
||||
progress_source = legacy_progress
|
||||
|
||||
if os.path.exists(progress_source):
|
||||
try:
|
||||
with open(progress_source, "r", encoding="utf-8") as f:
|
||||
saved_progress = json.load(f)
|
||||
self._progress["processed_models"] = set(
|
||||
saved_progress.get("processed_models", [])
|
||||
)
|
||||
self._progress["failed_models"] = set(
|
||||
saved_progress.get("failed_models", [])
|
||||
)
|
||||
logger.debug(
|
||||
"Loaded previous progress, %s models already processed, %s models marked as failed",
|
||||
len(self._progress["processed_models"]),
|
||||
len(self._progress["failed_models"]),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load progress file: {e}")
|
||||
self._progress["processed_models"] = set()
|
||||
self._progress["failed_models"] = set()
|
||||
else:
|
||||
self._progress["processed_models"] = set()
|
||||
self._progress["failed_models"] = set()
|
||||
|
||||
self._is_downloading = True
|
||||
snapshot = self._progress.snapshot()
|
||||
|
||||
# Create the download task without awaiting it
|
||||
# This ensures the HTTP response is returned immediately
|
||||
# while the actual processing happens in the background
|
||||
self._download_task = asyncio.create_task(
|
||||
self._download_all_example_images(
|
||||
output_dir,
|
||||
@@ -227,7 +197,10 @@ class DownloadManager:
|
||||
)
|
||||
)
|
||||
|
||||
snapshot = self._progress.snapshot()
|
||||
# Add a callback to handle task completion/errors
|
||||
self._download_task.add_done_callback(
|
||||
lambda t: self._handle_download_task_done(t, output_dir)
|
||||
)
|
||||
except ExampleImagesDownloadError:
|
||||
# Re-raise our own exception types without wrapping
|
||||
self._is_downloading = False
|
||||
@@ -241,11 +214,26 @@ class DownloadManager:
|
||||
)
|
||||
raise ExampleImagesDownloadError(str(e)) from e
|
||||
|
||||
await self._broadcast_progress(status="running")
|
||||
# Broadcast progress in the background without blocking the response
|
||||
# This ensures the HTTP response is returned immediately
|
||||
asyncio.create_task(self._broadcast_progress(status="running"))
|
||||
|
||||
return {"success": True, "message": "Download started", "status": snapshot}
|
||||
|
||||
async def get_status(self, request):
|
||||
def _handle_download_task_done(self, task: asyncio.Task, output_dir: str) -> None:
|
||||
"""Handle download task completion, including saving progress on error."""
|
||||
try:
|
||||
# This will re-raise any exception from the task
|
||||
task.result()
|
||||
except Exception as e:
|
||||
logger.error(f"Download task failed with error: {e}", exc_info=True)
|
||||
# Ensure progress is saved even on failure
|
||||
try:
|
||||
self._save_progress(output_dir)
|
||||
except Exception as save_error:
|
||||
logger.error(f"Failed to save progress after task failure: {save_error}")
|
||||
|
||||
async def get_status(self, request) -> dict:
|
||||
"""Get the current status of example images download."""
|
||||
|
||||
return {
|
||||
@@ -254,6 +242,198 @@ class DownloadManager:
|
||||
"status": self._progress.snapshot(),
|
||||
}
|
||||
|
||||
async def _load_progress_file(self, output_dir: str) -> tuple[str, set, set]:
|
||||
"""Load progress file from disk. Returns (progress_file_path, processed_models, failed_models).
|
||||
|
||||
This is a separate async method to allow running in executor to avoid blocking event loop.
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None, self._load_progress_file_sync, output_dir
|
||||
)
|
||||
|
||||
def _load_progress_file_sync(self, output_dir: str) -> tuple[str, set, set]:
|
||||
"""Synchronous implementation of progress file loading."""
|
||||
progress_file = os.path.join(output_dir, ".download_progress.json")
|
||||
progress_source = progress_file
|
||||
|
||||
# Handle legacy migration if needed
|
||||
if uses_library_scoped_folders():
|
||||
legacy_root = get_settings_manager().get("example_images_path") or ""
|
||||
legacy_progress = (
|
||||
os.path.join(legacy_root, ".download_progress.json")
|
||||
if legacy_root
|
||||
else ""
|
||||
)
|
||||
if (
|
||||
legacy_progress
|
||||
and os.path.exists(legacy_progress)
|
||||
and not os.path.exists(progress_file)
|
||||
):
|
||||
try:
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
shutil.move(legacy_progress, progress_file)
|
||||
logger.info(
|
||||
"Migrated legacy download progress file '%s' to '%s'",
|
||||
legacy_progress,
|
||||
progress_file,
|
||||
)
|
||||
except OSError as exc:
|
||||
logger.warning(
|
||||
"Failed to migrate download progress file from '%s' to '%s': %s",
|
||||
legacy_progress,
|
||||
progress_file,
|
||||
exc,
|
||||
)
|
||||
progress_source = legacy_progress
|
||||
|
||||
processed_models = set()
|
||||
failed_models = set()
|
||||
|
||||
if os.path.exists(progress_source):
|
||||
try:
|
||||
with open(progress_source, "r", encoding="utf-8") as f:
|
||||
saved_progress = json.load(f)
|
||||
processed_models = set(saved_progress.get("processed_models", []))
|
||||
failed_models = set(saved_progress.get("failed_models", []))
|
||||
except Exception:
|
||||
# Return empty sets on error
|
||||
pass
|
||||
|
||||
return progress_file, processed_models, failed_models
|
||||
|
||||
def _load_progress_sets_sync(self, progress_file: str) -> tuple[set, set]:
|
||||
"""Load only the processed and failed model sets from progress file.
|
||||
|
||||
This is a lighter version for quick checks without legacy migration.
|
||||
Returns (processed_models, failed_models).
|
||||
"""
|
||||
processed_models = set()
|
||||
failed_models = set()
|
||||
|
||||
if os.path.exists(progress_file):
|
||||
try:
|
||||
with open(progress_file, "r", encoding="utf-8") as f:
|
||||
saved_progress = json.load(f)
|
||||
processed_models = set(saved_progress.get("processed_models", []))
|
||||
failed_models = set(saved_progress.get("failed_models", []))
|
||||
except Exception:
|
||||
# Return empty sets on error
|
||||
pass
|
||||
|
||||
return processed_models, failed_models
|
||||
|
||||
async def check_pending_models(self, model_types: list[str]) -> dict:
|
||||
"""Quickly check how many models need example images downloaded.
|
||||
|
||||
This is a lightweight check that avoids the overhead of starting
|
||||
a full download task when no work is needed.
|
||||
|
||||
Returns:
|
||||
dict with keys:
|
||||
- total_models: Total number of models across specified types
|
||||
- pending_count: Number of models needing example images
|
||||
- processed_count: Number of already processed models
|
||||
- failed_count: Number of models marked as failed
|
||||
- needs_download: True if there are pending models to process
|
||||
"""
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
|
||||
if self._is_downloading:
|
||||
return {
|
||||
"success": True,
|
||||
"is_downloading": True,
|
||||
"total_models": 0,
|
||||
"pending_count": 0,
|
||||
"processed_count": 0,
|
||||
"failed_count": 0,
|
||||
"needs_download": False,
|
||||
"message": "Download already in progress",
|
||||
}
|
||||
|
||||
try:
|
||||
# Get scanners
|
||||
scanners = []
|
||||
if "lora" in model_types:
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
scanners.append(("lora", lora_scanner))
|
||||
|
||||
if "checkpoint" in model_types:
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
scanners.append(("checkpoint", checkpoint_scanner))
|
||||
|
||||
if "embedding" in model_types:
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
scanners.append(("embedding", embedding_scanner))
|
||||
|
||||
# Load progress file to check processed models (async to avoid blocking)
|
||||
settings_manager = get_settings_manager()
|
||||
active_library = settings_manager.get_active_library_name()
|
||||
output_dir = self._resolve_output_dir(active_library)
|
||||
|
||||
processed_models: set[str] = set()
|
||||
failed_models: set[str] = set()
|
||||
|
||||
if output_dir:
|
||||
progress_file = os.path.join(output_dir, ".download_progress.json")
|
||||
loop = asyncio.get_event_loop()
|
||||
processed_models, failed_models = await loop.run_in_executor(
|
||||
None, self._load_progress_sets_sync, progress_file
|
||||
)
|
||||
|
||||
# Collect all models and count in a single pass per scanner
|
||||
total_models = 0
|
||||
all_models_with_hash: list[tuple[str, str]] = [] # (hash, name) pairs
|
||||
|
||||
for scanner_type, scanner in scanners:
|
||||
cache = await scanner.get_cached_data()
|
||||
if cache and cache.raw_data:
|
||||
for model in cache.raw_data:
|
||||
total_models += 1
|
||||
raw_hash = model.get("sha256")
|
||||
if raw_hash:
|
||||
model_hash = raw_hash.lower()
|
||||
all_models_with_hash.append((model_hash, model.get("model_name", "Unknown")))
|
||||
|
||||
models_with_hash = len(all_models_with_hash)
|
||||
|
||||
# Calculate pending count: check which models actually need processing
|
||||
# A model is pending if it has a hash, is not in processed_models,
|
||||
# and its folder doesn't exist or is empty
|
||||
pending_hashes = set()
|
||||
for model_hash, model_name in all_models_with_hash:
|
||||
if model_hash not in processed_models:
|
||||
# Check if model folder exists with files
|
||||
model_dir = ExampleImagePathResolver.get_model_folder(
|
||||
model_hash, active_library
|
||||
)
|
||||
if not _model_directory_has_files(model_dir):
|
||||
pending_hashes.add(model_hash)
|
||||
|
||||
pending_count = len(pending_hashes)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"is_downloading": False,
|
||||
"total_models": total_models,
|
||||
"pending_count": pending_count,
|
||||
"processed_count": len(processed_models),
|
||||
"failed_count": len(failed_models),
|
||||
"needs_download": pending_count > 0,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking pending models: {e}", exc_info=True)
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"total_models": 0,
|
||||
"pending_count": 0,
|
||||
"processed_count": 0,
|
||||
"failed_count": 0,
|
||||
"needs_download": False,
|
||||
}
|
||||
|
||||
async def pause_download(self, request):
|
||||
"""Pause the example images download."""
|
||||
|
||||
|
||||
@@ -43,8 +43,15 @@ class ExampleImagesProcessor:
|
||||
return media_url
|
||||
|
||||
@staticmethod
|
||||
def _get_file_extension_from_content_or_headers(content, headers, fallback_url=None):
|
||||
"""Determine file extension from content magic bytes or headers"""
|
||||
def _get_file_extension_from_content_or_headers(content, headers, fallback_url=None, media_type_hint=None):
|
||||
"""Determine file extension from content magic bytes or headers
|
||||
|
||||
Args:
|
||||
content: File content bytes
|
||||
headers: HTTP response headers
|
||||
fallback_url: Original URL for extension extraction
|
||||
media_type_hint: Optional media type hint from metadata (e.g., "video" or "image")
|
||||
"""
|
||||
# Check magic bytes for common formats
|
||||
if content:
|
||||
if content.startswith(b'\xFF\xD8\xFF'):
|
||||
@@ -82,6 +89,10 @@ class ExampleImagesProcessor:
|
||||
if ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or ext in SUPPORTED_MEDIA_EXTENSIONS['videos']:
|
||||
return ext
|
||||
|
||||
# Use media type hint from metadata if available
|
||||
if media_type_hint == "video":
|
||||
return '.mp4'
|
||||
|
||||
# Default fallback
|
||||
return '.jpg'
|
||||
|
||||
@@ -136,7 +147,7 @@ class ExampleImagesProcessor:
|
||||
if success:
|
||||
# Determine file extension from content or headers
|
||||
media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
|
||||
content, headers, original_url
|
||||
content, headers, original_url, image.get("type")
|
||||
)
|
||||
|
||||
# Check if the detected file type is supported
|
||||
@@ -219,7 +230,7 @@ class ExampleImagesProcessor:
|
||||
if success:
|
||||
# Determine file extension from content or headers
|
||||
media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
|
||||
content, headers, original_url
|
||||
content, headers, original_url, image.get("type")
|
||||
)
|
||||
|
||||
# Check if the detected file type is supported
|
||||
|
||||
@@ -17,7 +17,7 @@ async def extract_lora_metadata(file_path: str) -> Dict:
|
||||
base_model = determine_base_model(metadata.get("ss_base_model_version"))
|
||||
return {"base_model": base_model}
|
||||
except Exception as e:
|
||||
print(f"Error reading metadata from {file_path}: {str(e)}")
|
||||
logger.error(f"Error reading metadata from {file_path}: {str(e)}")
|
||||
return {"base_model": "Unknown"}
|
||||
|
||||
async def extract_checkpoint_metadata(file_path: str) -> dict:
|
||||
|
||||
@@ -223,7 +223,7 @@ class MetadataManager:
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription="",
|
||||
model_type="checkpoint",
|
||||
sub_type="checkpoint",
|
||||
from_civitai=True
|
||||
)
|
||||
elif model_class.__name__ == "EmbeddingMetadata":
|
||||
@@ -238,6 +238,7 @@ class MetadataManager:
|
||||
preview_url=normalize_path(preview_url),
|
||||
tags=[],
|
||||
modelDescription="",
|
||||
sub_type="embedding",
|
||||
from_civitai=True
|
||||
)
|
||||
else: # Default to LoraMetadata
|
||||
|
||||
@@ -25,6 +25,7 @@ class BaseModelMetadata:
|
||||
favorite: bool = False # Whether the model is a favorite
|
||||
exclude: bool = False # Whether to exclude this model from the cache
|
||||
db_checked: bool = False # Whether checked in archive DB
|
||||
skip_metadata_refresh: bool = False # Whether to skip this model during bulk metadata refresh
|
||||
metadata_source: Optional[str] = None # Last provider that supplied metadata
|
||||
last_checked_at: float = 0 # Last checked timestamp
|
||||
_unknown_fields: Dict[str, Any] = field(default_factory=dict, repr=False, compare=False) # Store unknown fields
|
||||
@@ -142,27 +143,27 @@ class LoraMetadata(BaseModelMetadata):
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
|
||||
"""Create LoraMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
file_name = file_info.get('name', '')
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
model_data = version_info.get('model') or {}
|
||||
if 'tags' in model_data:
|
||||
tags = model_data['tags']
|
||||
if 'description' in model_data:
|
||||
description = model_data['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
model_name=model_data.get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
sha256=(file_info.get('hashes') or {}).get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_url='', # Will be updated after preview download
|
||||
preview_nsfw_level=0, # Will be updated after preview download
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
@@ -173,37 +174,37 @@ class LoraMetadata(BaseModelMetadata):
|
||||
@dataclass
|
||||
class CheckpointMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for a Checkpoint model"""
|
||||
model_type: str = "checkpoint" # Model type (checkpoint, diffusion_model, etc.)
|
||||
sub_type: str = "checkpoint" # Model sub-type (checkpoint, diffusion_model, etc.)
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'CheckpointMetadata':
|
||||
"""Create CheckpointMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
file_name = file_info.get('name', '')
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
model_type = version_info.get('type', 'checkpoint')
|
||||
|
||||
sub_type = version_info.get('type', 'checkpoint')
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
model_data = version_info.get('model') or {}
|
||||
if 'tags' in model_data:
|
||||
tags = model_data['tags']
|
||||
if 'description' in model_data:
|
||||
description = model_data['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
model_name=model_data.get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
sha256=(file_info.get('hashes') or {}).get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_url='', # Will be updated after preview download
|
||||
preview_nsfw_level=0,
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
model_type=model_type,
|
||||
sub_type=sub_type,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
@@ -211,37 +212,37 @@ class CheckpointMetadata(BaseModelMetadata):
|
||||
@dataclass
|
||||
class EmbeddingMetadata(BaseModelMetadata):
|
||||
"""Represents the metadata structure for an Embedding model"""
|
||||
model_type: str = "embedding" # Model type (embedding, textual_inversion, etc.)
|
||||
sub_type: str = "embedding"
|
||||
|
||||
@classmethod
|
||||
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'EmbeddingMetadata':
|
||||
"""Create EmbeddingMetadata instance from Civitai version info"""
|
||||
file_name = file_info['name']
|
||||
file_name = file_info.get('name', '')
|
||||
base_model = determine_base_model(version_info.get('baseModel', ''))
|
||||
model_type = version_info.get('type', 'embedding')
|
||||
|
||||
sub_type = version_info.get('type', 'embedding')
|
||||
|
||||
# Extract tags and description if available
|
||||
tags = []
|
||||
description = ""
|
||||
if 'model' in version_info:
|
||||
if 'tags' in version_info['model']:
|
||||
tags = version_info['model']['tags']
|
||||
if 'description' in version_info['model']:
|
||||
description = version_info['model']['description']
|
||||
|
||||
model_data = version_info.get('model') or {}
|
||||
if 'tags' in model_data:
|
||||
tags = model_data['tags']
|
||||
if 'description' in model_data:
|
||||
description = model_data['description']
|
||||
|
||||
return cls(
|
||||
file_name=os.path.splitext(file_name)[0],
|
||||
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
|
||||
model_name=model_data.get('name', os.path.splitext(file_name)[0]),
|
||||
file_path=save_path.replace(os.sep, '/'),
|
||||
size=file_info.get('sizeKB', 0) * 1024,
|
||||
modified=datetime.now().timestamp(),
|
||||
sha256=file_info['hashes'].get('SHA256', '').lower(),
|
||||
sha256=(file_info.get('hashes') or {}).get('SHA256', '').lower(),
|
||||
base_model=base_model,
|
||||
preview_url=None, # Will be updated after preview download
|
||||
preview_url='', # Will be updated after preview download
|
||||
preview_nsfw_level=0,
|
||||
from_civitai=True,
|
||||
civitai=version_info,
|
||||
model_type=model_type,
|
||||
sub_type=sub_type,
|
||||
tags=tags,
|
||||
modelDescription=description
|
||||
)
|
||||
|
||||
@@ -138,19 +138,15 @@ def calculate_recipe_fingerprint(loras):
|
||||
if not loras:
|
||||
return ""
|
||||
|
||||
# Filter valid entries and extract hash and strength
|
||||
valid_loras = []
|
||||
for lora in loras:
|
||||
# Skip excluded loras
|
||||
if lora.get("exclude", False):
|
||||
continue
|
||||
|
||||
# Get the hash - use modelVersionId as fallback if hash is empty
|
||||
hash_value = lora.get("hash", "").lower()
|
||||
if not hash_value and lora.get("isDeleted", False) and lora.get("modelVersionId"):
|
||||
if not hash_value and lora.get("modelVersionId"):
|
||||
hash_value = str(lora.get("modelVersionId"))
|
||||
|
||||
# Skip entries without a valid hash
|
||||
if not hash_value:
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "comfyui-lora-manager"
|
||||
description = "Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!"
|
||||
version = "0.9.13"
|
||||
version = "0.9.16"
|
||||
license = {file = "LICENSE"}
|
||||
dependencies = [
|
||||
"aiohttp",
|
||||
|
||||
@@ -4,9 +4,13 @@ testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_classes = Test*
|
||||
python_functions = test_*
|
||||
# Register async marker for coroutine-style tests
|
||||
# Asyncio configuration
|
||||
asyncio_mode = auto
|
||||
asyncio_default_fixture_loop_scope = function
|
||||
# Register markers
|
||||
markers =
|
||||
asyncio: execute test within asyncio event loop
|
||||
no_settings_dir_isolation: allow tests to use real settings paths
|
||||
integration: integration tests requiring external resources
|
||||
# Skip problematic directories to avoid import conflicts
|
||||
norecursedirs = .git .tox dist build *.egg __pycache__ py
|
||||
norecursedirs = .git .tox dist build *.egg __pycache__ py .hypothesis
|
||||
221787
refs/danbooru_e621_merged.csv
Normal file
221787
refs/danbooru_e621_merged.csv
Normal file
File diff suppressed because one or more lines are too long
@@ -1,3 +1,7 @@
|
||||
-r requirements.txt
|
||||
pytest>=7.4
|
||||
pytest-cov>=4.1
|
||||
pytest-asyncio>=0.21.0
|
||||
hypothesis>=6.0
|
||||
syrupy>=5.0
|
||||
pytest-benchmark>=5.0
|
||||
|
||||
0
scripts/sync_translation_keys.py
Normal file → Executable file
0
scripts/sync_translation_keys.py
Normal file → Executable file
@@ -154,6 +154,7 @@ class StandaloneServer:
|
||||
self.app = web.Application(
|
||||
logger=logger,
|
||||
middlewares=[cache_control],
|
||||
client_max_size=256 * 1024 * 1024,
|
||||
handler_args={
|
||||
"max_field_size": HEADER_SIZE_LIMIT,
|
||||
"max_line_size": HEADER_SIZE_LIMIT,
|
||||
|
||||
@@ -60,6 +60,9 @@ body {
|
||||
--badge-update-bg: oklch(72% 0.2 220);
|
||||
--badge-update-text: oklch(28% 0.03 220);
|
||||
--badge-update-glow: oklch(72% 0.2 220 / 0.28);
|
||||
--badge-skip-refresh-bg: oklch(82% 0.12 45);
|
||||
--badge-skip-refresh-text: oklch(35% 0.02 45);
|
||||
--badge-skip-refresh-glow: oklch(82% 0.12 45 / 0.15);
|
||||
|
||||
/* Spacing Scale */
|
||||
--space-1: calc(8px * 1);
|
||||
@@ -114,6 +117,9 @@ html[data-theme="light"] {
|
||||
--badge-update-bg: oklch(62% 0.18 220);
|
||||
--badge-update-text: oklch(98% 0.02 240);
|
||||
--badge-update-glow: oklch(62% 0.18 220 / 0.4);
|
||||
--badge-skip-refresh-bg: oklch(82% 0.12 45);
|
||||
--badge-skip-refresh-text: oklch(98% 0.02 45);
|
||||
--badge-skip-refresh-glow: oklch(82% 0.12 45 / 0.15);
|
||||
}
|
||||
|
||||
body {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user