mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-21 21:22:11 -03:00
Compare commits
116 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e1a2e3bb7 | ||
|
|
40cbb2155c | ||
|
|
a8d7070832 | ||
|
|
ab7266f3a4 | ||
|
|
3053b13fcb | ||
|
|
f3544b3471 | ||
|
|
1610048974 | ||
|
|
fc6f1bf95b | ||
|
|
67b274c1b2 | ||
|
|
fb0d6b5641 | ||
|
|
d30fbeb286 | ||
|
|
46e430ebbb | ||
|
|
bc4cd45fcb | ||
|
|
bdc86ddf15 | ||
|
|
ded17c1479 | ||
|
|
933e2fc01d | ||
|
|
1cddeee264 | ||
|
|
183c000080 | ||
|
|
adf7b6d4b2 | ||
|
|
0566d50346 | ||
|
|
4275dc3003 | ||
|
|
30956aeefc | ||
|
|
64e1dd3dd6 | ||
|
|
0dc4b6f728 | ||
|
|
86074c87d7 | ||
|
|
6f9245df01 | ||
|
|
4540e47055 | ||
|
|
4bb8981e78 | ||
|
|
c49be91aa0 | ||
|
|
2b847039d4 | ||
|
|
1147725fd7 | ||
|
|
26891e12a4 | ||
|
|
2f7e44a76f | ||
|
|
9366d3d2d0 | ||
|
|
6b606a5cc8 | ||
|
|
e5339c178a | ||
|
|
1a76f74482 | ||
|
|
13f13eb095 | ||
|
|
125fdecd61 | ||
|
|
d05076d258 | ||
|
|
00b77581fc | ||
|
|
897787d17c | ||
|
|
d5a280cf2b | ||
|
|
a0c2d9b5ad | ||
|
|
e713bd1ca2 | ||
|
|
beb8ff1dd1 | ||
|
|
6a8f0867d9 | ||
|
|
51ad1c9a33 | ||
|
|
34872eb612 | ||
|
|
8b4e3128ff | ||
|
|
c66cbc800b | ||
|
|
21941521a0 | ||
|
|
0d33884052 | ||
|
|
415df49377 | ||
|
|
f5f45002c7 | ||
|
|
1edf7126bb | ||
|
|
a1a55a1002 | ||
|
|
45f5cb46bd | ||
|
|
1b5e608a27 | ||
|
|
a7df8ae15c | ||
|
|
47ce0d0fe2 | ||
|
|
b220e288d0 | ||
|
|
1fc8b45b68 | ||
|
|
62f06302f0 | ||
|
|
3e5cb223f3 | ||
|
|
4ee5b7481c | ||
|
|
e104b78c01 | ||
|
|
ba1ac58721 | ||
|
|
a4fbeb6295 | ||
|
|
68f8871403 | ||
|
|
6fd74952b7 | ||
|
|
1ea468cfc4 | ||
|
|
14721c265f | ||
|
|
821827a375 | ||
|
|
9ba3e2c204 | ||
|
|
d287883671 | ||
|
|
ead34818db | ||
|
|
a060010b96 | ||
|
|
76a92ac847 | ||
|
|
74bc490383 | ||
|
|
510d476323 | ||
|
|
1e7257fd53 | ||
|
|
4ff1f51b1c | ||
|
|
74507cef05 | ||
|
|
c23ab04d90 | ||
|
|
d50dde6cf6 | ||
|
|
fcb1fb39be | ||
|
|
b0ef74f802 | ||
|
|
f332aef41d | ||
|
|
1f91a3da8e | ||
|
|
16840c321d | ||
|
|
c109e392ad | ||
|
|
5e69671366 | ||
|
|
52d23d9b75 | ||
|
|
4c4e6d7a7b | ||
|
|
03b6e78705 | ||
|
|
24c01141d7 | ||
|
|
6dc2811af4 | ||
|
|
e6425dce32 | ||
|
|
95e2ff5f1e | ||
|
|
92ac487128 | ||
|
|
3250fa89cb | ||
|
|
7475de366b | ||
|
|
affb507b37 | ||
|
|
3320b80150 | ||
|
|
fb2b69b787 | ||
|
|
29a05f6533 | ||
|
|
9fa3fac973 | ||
|
|
904b0d104a | ||
|
|
1d31dae110 | ||
|
|
476ecb7423 | ||
|
|
4eb67cf6da | ||
|
|
a5a9f7ed83 | ||
|
|
c0b029e228 | ||
|
|
9bebcc9a4b | ||
|
|
8c6311355d |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -5,3 +5,4 @@ output/*
|
||||
py/run_test.py
|
||||
.vscode/
|
||||
cache/
|
||||
civitai/
|
||||
|
||||
70
README.md
70
README.md
@@ -34,6 +34,19 @@ Enhance your Civitai browsing experience with our companion browser extension! S
|
||||
|
||||
## Release Notes
|
||||
|
||||
### v0.9.3
|
||||
* **Metadata Archive Database Support** - Added the ability to download and utilize a metadata archive database, enabling access to metadata for models that have been deleted from CivitAI.
|
||||
* **App-Level Proxy Settings** - Introduced support for configuring a global proxy within the application, making it easier to use the manager behind network restrictions.
|
||||
* **Bug Fixes** - Various bug fixes for improved stability and reliability.
|
||||
|
||||
### v0.9.2
|
||||
* **Bulk Auto-Organization Action** - Added a new bulk auto-organization feature. You can now select multiple models and automatically organize them according to your current path template settings for streamlined management.
|
||||
* **Bug Fixes** - Addressed several bugs to improve stability and reliability.
|
||||
|
||||
### v0.9.1
|
||||
* **Enhanced Bulk Operations** - Improved bulk operations with Marquee Selection and a bulk operation context menu, providing a more intuitive, desktop-application-like user experience.
|
||||
* **New Bulk Actions** - Added bulk operations for adding tags and setting base models to multiple models simultaneously.
|
||||
|
||||
### v0.9.0
|
||||
* **UI Overhaul for Enhanced Navigation** - Replaced the top flat folder tags with a new folder sidebar and breadcrumb navigation system for a more intuitive folder browsing and selection experience.
|
||||
* **Dual-Mode Folder Sidebar** - The new folder sidebar offers two display modes: 'List Mode,' which mirrors the classic folder view, and 'Tree Mode,' which presents a hierarchical folder structure for effortless navigation through nested directories.
|
||||
@@ -69,61 +82,6 @@ Enhance your Civitai browsing experience with our companion browser extension! S
|
||||
* **Enhanced Node Usability** - Improved user experience for Lora Loader, Lora Stacker, and WanVideo Lora Select nodes by fixing the maximum height of the text input area. Users can now freely and conveniently adjust the LoRA region within these nodes.
|
||||
* **Compatibility Fixes** - Resolved compatibility issues with ComfyUI and certain custom nodes, including ComfyUI-Custom-Scripts, ensuring smoother integration and operation.
|
||||
|
||||
### v0.8.25
|
||||
* **LoRA List Reordering**
|
||||
- Drag & Drop: Easily rearrange LoRA entries using the drag handle.
|
||||
- Keyboard Shortcuts:
|
||||
- Arrow keys: Navigate between LoRAs
|
||||
- Ctrl/Cmd + Arrow: Move selected LoRA up/down
|
||||
- Ctrl/Cmd + Home/End: Move selected LoRA to top/bottom
|
||||
- Delete/Backspace: Remove selected LoRA
|
||||
- Context Menu: Right-click for quick actions like Move Up, Move Down, Move to Top, Move to Bottom.
|
||||
* **Bulk Operations for Checkpoints & Embeddings**
|
||||
- Bulk Mode: Select multiple checkpoints or embeddings for batch actions.
|
||||
- Bulk Refresh: Update Civitai metadata for selected models.
|
||||
- Bulk Delete: Remove multiple models at once.
|
||||
- Bulk Move (Embeddings): Move selected embeddings to a different folder.
|
||||
* **New Setting: Auto Download Example Images**
|
||||
- Automatically fetch example images for models missing previews (requires download location to be set). Enabled by default.
|
||||
* **General Improvements**
|
||||
- Various user experience enhancements and stability fixes.
|
||||
|
||||
### v0.8.22
|
||||
* **Embeddings Management** - Added Embeddings page for comprehensive embedding model management.
|
||||
* **Advanced Sorting Options** - Introduced flexible sorting controls, allowing sorting by name, added date, or file size in both ascending and descending order.
|
||||
* **Custom Download Path Templates & Base Model Mapping** - Implemented UI settings for configuring download path templates and base model path mappings, allowing customized model organization and storage location when downloading models via LM Civitai Extension.
|
||||
* **LM Civitai Extension Enhancements** - Improved concurrent download performance and stability, with new support for canceling active downloads directly from the extension interface.
|
||||
* **Update Feature** - Added update functionality, allowing users to update LoRA Manager to the latest release version directly from the LoRA Manager UI.
|
||||
* **Bulk Operations: Refresh All** - Added bulk refresh functionality, allowing users to update Civitai metadata across multiple LoRAs.
|
||||
|
||||
### v0.8.20
|
||||
* **LM Civitai Extension** - Released [browser extension through Chrome Web Store](https://chromewebstore.google.com/detail/lm-civitai-extension/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) that works seamlessly with LoRA Manager to enhance Civitai browsing experience, showing which models are already in your local library, enabling one-click downloads, and providing queue and parallel download support
|
||||
* **Enhanced Lora Loader** - Added support for nunchaku, improving convenience when working with ComfyUI-nunchaku workflows, plus new template workflows for quick onboarding
|
||||
* **WanVideo Integration** - Introduced WanVideo Lora Select (LoraManager) node compatible with ComfyUI-WanVideoWrapper for streamlined lora usage in video workflows, including a template workflow to help you get started quickly
|
||||
|
||||
### v0.8.19
|
||||
* **Analytics Dashboard** - Added new Statistics page providing comprehensive visual analysis of model collection and usage patterns for better library insights
|
||||
* **Target Node Selection** - Enhanced workflow integration with intelligent target choosing when sending LoRAs/recipes to workflows with multiple loader/stacker nodes; a visual selector now appears showing node color, type, ID, and title for precise targeting
|
||||
* **Enhanced NSFW Controls** - Added support for setting NSFW levels on recipes with automatic content blurring based on user preferences
|
||||
* **Customizable Card Display** - New display settings allowing users to choose whether card information and action buttons are always visible or only revealed on hover
|
||||
* **Expanded Compatibility** - Added support for efficiency-nodes-comfyui in Save Recipe and Save Image nodes, plus fixed compatibility with ComfyUI_Custom_Nodes_AlekPet
|
||||
|
||||
### v0.8.18
|
||||
* **Custom Example Images** - Added ability to import your own example images for LoRAs and checkpoints with automatic metadata extraction from embedded information
|
||||
* **Enhanced Example Management** - New action buttons to set specific examples as previews or delete custom examples
|
||||
* **Improved Duplicate Detection** - Enhanced "Find Duplicates" with hash verification feature to eliminate false positives when identifying duplicate models
|
||||
* **Tag Management** - Added tag editing functionality allowing users to customize and manage model tags
|
||||
* **Advanced Selection Controls** - Implemented Ctrl+A shortcut for quickly selecting all filtered LoRAs, automatically entering bulk mode when needed
|
||||
* **Note**: Cache file functionality temporarily disabled pending rework
|
||||
|
||||
### v0.8.17
|
||||
* **Duplicate Model Detection** - Added "Find Duplicates" functionality for LoRAs and checkpoints using model file hash detection, enabling convenient viewing and batch deletion of duplicate models
|
||||
* **Enhanced URL Recipe Imports** - Optimized import recipe via URL functionality using CivitAI API calls instead of web scraping, now supporting all rated images (including NSFW) for recipe imports
|
||||
* **Improved TriggerWord Control** - Enhanced TriggerWord Toggle node with new default_active switch to set the initial state (active/inactive) when trigger words are added
|
||||
* **Centralized Example Management** - Added "Migrate Existing Example Images" feature to consolidate downloaded example images from model folders into central storage with customizable naming patterns
|
||||
* **Intelligent Word Suggestions** - Implemented smart trigger word suggestions by reading class tokens and tag frequency from safetensors files, displaying recommendations when editing trigger words
|
||||
* **Model Version Management** - Added "Re-link to CivitAI" context menu option for connecting models to different CivitAI versions when needed
|
||||
|
||||
[View Update History](./update_logs.md)
|
||||
|
||||
---
|
||||
@@ -181,7 +139,7 @@ Enhance your Civitai browsing experience with our companion browser extension! S
|
||||
|
||||
### Option 2: **Portable Standalone Edition** (No ComfyUI required)
|
||||
|
||||
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.8.26/lora_manager_portable.7z)
|
||||
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.9.2/lora_manager_portable.7z)
|
||||
2. Copy the provided `settings.json.example` file to create a new file named `settings.json` in `comfyui-lora-manager` folder
|
||||
3. Edit `settings.json` to include your correct model folder paths and CivitAI API key
|
||||
4. Run run.bat
|
||||
|
||||
@@ -5,6 +5,7 @@ from .py.nodes.lora_stacker import LoraStacker
|
||||
from .py.nodes.save_image import SaveImage
|
||||
from .py.nodes.debug_metadata import DebugMetadata
|
||||
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
|
||||
from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraSelectFromText
|
||||
# Import metadata collector to install hooks on startup
|
||||
from .py.metadata_collector import init as init_metadata_collector
|
||||
|
||||
@@ -15,7 +16,8 @@ NODE_CLASS_MAPPINGS = {
|
||||
LoraStacker.NAME: LoraStacker,
|
||||
SaveImage.NAME: SaveImage,
|
||||
DebugMetadata.NAME: DebugMetadata,
|
||||
WanVideoLoraSelect.NAME: WanVideoLoraSelect
|
||||
WanVideoLoraSelect.NAME: WanVideoLoraSelect,
|
||||
WanVideoLoraSelectFromText.NAME: WanVideoLoraSelectFromText
|
||||
}
|
||||
|
||||
WEB_DIRECTORY = "./web/comfyui"
|
||||
|
||||
176
docs/LM-Extension-Wiki.md
Normal file
176
docs/LM-Extension-Wiki.md
Normal file
@@ -0,0 +1,176 @@
|
||||
## Overview
|
||||
|
||||
The **LoRA Manager Civitai Extension** is a Browser extension designed to work seamlessly with [LoRA Manager](https://github.com/willmiao/ComfyUI-Lora-Manager) to significantly enhance your browsing experience on [Civitai](https://civitai.com). With this extension, you can:
|
||||
|
||||
✅ Instantly see which models are already present in your local library
|
||||
✅ Download new models with a single click
|
||||
✅ Manage downloads efficiently with queue and parallel download support
|
||||
✅ Keep your downloaded models automatically organized according to your custom settings
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Why Are All Features for Supporters Only?
|
||||
|
||||
I love building tools for the Stable Diffusion and ComfyUI communities, and LoRA Manager is a passion project that I've poured countless hours into. When I created this companion extension, my hope was to offer its core features for free, as a thank-you to all of you.
|
||||
|
||||
Unfortunately, I've reached a point where I need to be realistic. The level of support from the free model has been far lower than what's needed to justify the continuous development and maintenance for both projects. It was a difficult decision, but I've chosen to make the extension's features exclusive to supporters.
|
||||
|
||||
This change is crucial for me to be able to continue dedicating my time to improving the free and open-source LoRA Manager, which I'm committed to keeping available for everyone.
|
||||
|
||||
Your support does more than just unlock a few features—it allows me to keep innovating and ensures the core LoRA Manager project thrives. I'm incredibly grateful for your understanding and any support you can offer. ❤️
|
||||
|
||||
(_For those who previously supported me on Ko-fi with a one-time donation, I'll be sending out license keys individually as a thank-you._)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### Supported Browsers & Installation Methods
|
||||
|
||||
| Browser | Installation Method |
|
||||
|--------------------|-------------------------------------------------------------------------------------|
|
||||
| **Google Chrome** | [Chrome Web Store link](https://chromewebstore.google.com/detail/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) |
|
||||
| **Microsoft Edge** | Install via Chrome Web Store (compatible) |
|
||||
| **Brave Browser** | Install via Chrome Web Store (compatible) |
|
||||
| **Opera** | Install via Chrome Web Store (compatible) |
|
||||
| **Firefox** | <div id="firefox-install" class="install-ok"><a href="https://github.com/willmiao/lm-civitai-extension-firefox/releases/latest/download/extension.xpi">📦 Install Firefox Extension (reviewed and verified by Mozilla)</a></div> |
|
||||
|
||||
For non-Chrome browsers (e.g., Microsoft Edge), you can typically install extensions from the Chrome Web Store by following these steps: open the extension’s Chrome Web Store page, click 'Get extension', then click 'Allow' when prompted to enable installations from other stores, and finally click 'Add extension' to complete the installation.
|
||||
|
||||
---
|
||||
|
||||
## Privacy & Security
|
||||
|
||||
I understand concerns around browser extensions and privacy, and I want to be fully transparent about how the **LM Civitai Extension** works:
|
||||
|
||||
- **Reviewed and Verified**
|
||||
This extension has been **manually reviewed and approved by the Chrome Web Store**. The Firefox version uses the **exact same code** (only the packaging format differs) and has passed **Mozilla’s Add-on review**.
|
||||
|
||||
- **Minimal Network Access**
|
||||
The only external server this extension connects to is:
|
||||
**`https://willmiao.shop`** — used solely for **license validation**.
|
||||
|
||||
It does **not collect, transmit, or store any personal or usage data**.
|
||||
No browsing history, no user IDs, no analytics, no hidden trackers.
|
||||
|
||||
- **Local-Only Model Detection**
|
||||
Model detection and LoRA Manager communication all happen **locally** within your browser, directly interacting with your local LoRA Manager backend.
|
||||
|
||||
I value your trust and are committed to keeping your local setup private and secure. If you have any questions, feel free to reach out!
|
||||
|
||||
---
|
||||
|
||||
## How to Use
|
||||
|
||||
After installing the extension, you'll automatically receive a **7-day trial** to explore all features.
|
||||
|
||||
When the extension is correctly installed and your license is valid:
|
||||
|
||||
- Open **Civitai**, and you'll see visual indicators added by the extension on model cards, showing:
|
||||
- ✅ Models already present in your local library
|
||||
- ⬇️ A download button for models not in your library
|
||||
|
||||
Clicking the download button adds the corresponding model version to the download queue, waiting to be downloaded. You can set up to **5 models to download simultaneously**.
|
||||
|
||||
### Visual Indicators Appear On:
|
||||
|
||||
- **Home Page** — Featured models
|
||||
- **Models Page**
|
||||
- **Creator Profiles** — If the creator has set their models to be visible
|
||||
- **Recommended Resources** — On individual model pages
|
||||
|
||||
### Version Buttons on Model Pages
|
||||
|
||||
On a specific model page, visual indicators also appear on version buttons, showing which versions are already in your local library.
|
||||
|
||||
When switching to a specific version by clicking a version button:
|
||||
|
||||
- Clicking the download button will open a dropdown:
|
||||
- Download via **LoRA Manager**
|
||||
- Download via **Original Download** (browser download)
|
||||
|
||||
You can check **Remember my choice** to set your preferred default. You can change this setting anytime in the extension's settings.
|
||||
|
||||

|
||||
|
||||
### Resources on Image Pages (2025-08-05) — now shows in-library indicators for image resources. ‘Import image as recipe’ coming soon!
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Model Download Location & LoRA Manager Settings
|
||||
|
||||
To use the **one-click download function**, you must first set:
|
||||
|
||||
- Your **Default LoRAs Root**
|
||||
- Your **Default Checkpoints Root**
|
||||
|
||||
These are set within LoRA Manager's settings.
|
||||
|
||||
When everything is configured, downloaded model files will be placed in:
|
||||
|
||||
`<Default_Models_Root>/<Base_Model_of_the_Model>/<First_Tag_of_the_Model>`
|
||||
|
||||
|
||||
### Update: Default Path Customization (2025-07-21)
|
||||
|
||||
A new setting to customize the default download path has been added in the nightly version. You can now personalize where models are saved when downloading via the LM Civitai Extension.
|
||||
|
||||

|
||||
|
||||
The previous YAML path mapping file will be deprecated—settings will now be unified in settings.json to simplify configuration.
|
||||
|
||||
---
|
||||
|
||||
## Backend Port Configuration
|
||||
|
||||
If your **ComfyUI** or **LoRA Manager** backend is running on a port **other than the default 8188**, you must configure the backend port in the extension's settings.
|
||||
|
||||
After correctly setting and saving the port, you'll see in the extension's header area:
|
||||
- A **Healthy** status with the tooltip: `Connected to LoRA Manager on port xxxx`
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Connecting to a Remote LoRA Manager
|
||||
|
||||
If your LoRA Manager is running on another computer, you can still connect from your browser using port forwarding.
|
||||
|
||||
> **Why can't you set a remote IP directly?**
|
||||
>
|
||||
> For privacy and security, the extension only requests access to `http://127.0.0.1/*`. Supporting remote IPs would require much broader permissions, which may be rejected by browser stores and could raise user concerns.
|
||||
|
||||
**Solution: Port Forwarding with `socat`**
|
||||
|
||||
On your browser computer, run:
|
||||
|
||||
`socat TCP-LISTEN:8188,bind=127.0.0.1,fork TCP:REMOTE.IP.ADDRESS.HERE:8188`
|
||||
|
||||
- Replace `REMOTE.IP.ADDRESS.HERE` with the IP of the machine running LoRA Manager.
|
||||
- Adjust the port if needed.
|
||||
|
||||
This lets the extension connect to `127.0.0.1:8188` as usual, with traffic forwarded to your remote server.
|
||||
|
||||
_Thanks to user **Temikus** for sharing this solution!_
|
||||
|
||||
---
|
||||
|
||||
## Roadmap
|
||||
|
||||
The extension will evolve alongside **LoRA Manager** improvements. Planned features include:
|
||||
|
||||
- [x] Support for **additional model types** (e.g., embeddings)
|
||||
- [ ] One-click **Recipe Import**
|
||||
- [x] Display of in-library status for all resources in the **Resources Used** section of the image page
|
||||
- [x] One-click **Auto-organize Models**
|
||||
|
||||
**Stay tuned — and thank you for your support!**
|
||||
|
||||
---
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
# i18n System Migration Complete
|
||||
|
||||
## 概要 (Summary)
|
||||
|
||||
成功完成了从JavaScript ES6模块到JSON格式的国际化系统迁移,包含完整的多语言翻译和代码更新。
|
||||
|
||||
Successfully completed the migration from JavaScript ES6 modules to JSON format for the internationalization system, including complete multilingual translations and code updates.
|
||||
|
||||
## 完成的工作 (Completed Work)
|
||||
|
||||
### 1. 文件结构重组 (File Structure Reorganization)
|
||||
- **新建目录**: `/locales/` - 集中存放所有JSON翻译文件
|
||||
- **移除目录**: `/static/js/i18n/locales/` - 删除了旧的JavaScript文件
|
||||
|
||||
### 2. 格式转换 (Format Conversion)
|
||||
- **转换前**: ES6模块格式 (`export const en = { ... }`)
|
||||
- **转换后**: 标准JSON格式 (`{ ... }`)
|
||||
- **支持语言**: 9种语言完全转换
|
||||
- English (en)
|
||||
- 简体中文 (zh-CN)
|
||||
- 繁體中文 (zh-TW)
|
||||
- 日本語 (ja)
|
||||
- Русский (ru)
|
||||
- Deutsch (de)
|
||||
- Français (fr)
|
||||
- Español (es)
|
||||
- 한국어 (ko)
|
||||
|
||||
### 3. 翻译完善 (Translation Completion)
|
||||
- **翻译条目**: 每种语言386个翻译键值对
|
||||
- **覆盖范围**: 完整覆盖所有UI元素
|
||||
- **质量保证**: 所有翻译键在各语言间保持一致
|
||||
|
||||
### 4. JavaScript代码更新 (JavaScript Code Updates)
|
||||
|
||||
#### 主要修改文件: `static/js/i18n/index.js`
|
||||
```javascript
|
||||
// 旧版本: 静态导入
|
||||
import { en } from './locales/en.js';
|
||||
|
||||
// 新版本: 动态JSON加载
|
||||
async loadLocale(locale) {
|
||||
const response = await fetch(`/locales/${locale}.json`);
|
||||
return await response.json();
|
||||
}
|
||||
```
|
||||
|
||||
#### 核心功能更新:
|
||||
- **构造函数**: 从静态导入改为配置驱动
|
||||
- **语言加载**: 异步JSON获取机制
|
||||
- **初始化**: 支持Promise-based的异步初始化
|
||||
- **错误处理**: 增强的回退机制到英语
|
||||
- **向后兼容**: 保持现有API接口不变
|
||||
|
||||
### 5. Python服务端更新 (Python Server-side Updates)
|
||||
|
||||
#### 修改文件: `py/services/server_i18n.py`
|
||||
```python
|
||||
# 旧版本: 解析JavaScript文件
|
||||
def _load_locale_file(self, path, filename, locale_code):
|
||||
# 复杂的JS到JSON转换逻辑
|
||||
|
||||
# 新版本: 直接加载JSON
|
||||
def _load_locale_file(self, path, filename, locale_code):
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
translations = json.load(f)
|
||||
```
|
||||
|
||||
#### 路径更新:
|
||||
- **旧路径**: `static/js/i18n/locales/*.js`
|
||||
- **新路径**: `locales/*.json`
|
||||
|
||||
### 6. 服务器路由配置 (Server Route Configuration)
|
||||
|
||||
#### 修改文件: `standalone.py`
|
||||
```python
|
||||
# 新增静态路由服务JSON文件
|
||||
app.router.add_static('/locales', locales_path)
|
||||
```
|
||||
|
||||
## 技术架构 (Technical Architecture)
|
||||
|
||||
### 前端 (Frontend)
|
||||
```
|
||||
Browser → JavaScript i18n Manager → fetch('/locales/{lang}.json') → JSON Response
|
||||
```
|
||||
|
||||
### 后端 (Backend)
|
||||
```
|
||||
Python Server → ServerI18nManager → Direct JSON loading → Template Rendering
|
||||
```
|
||||
|
||||
### 文件组织 (File Organization)
|
||||
```
|
||||
ComfyUI-Lora-Manager/
|
||||
├── locales/ # 新的JSON翻译文件目录
|
||||
│ ├── en.json # 英语翻译 (基准)
|
||||
│ ├── zh-CN.json # 简体中文翻译
|
||||
│ ├── zh-TW.json # 繁体中文翻译
|
||||
│ ├── ja.json # 日语翻译
|
||||
│ ├── ru.json # 俄语翻译
|
||||
│ ├── de.json # 德语翻译
|
||||
│ ├── fr.json # 法语翻译
|
||||
│ ├── es.json # 西班牙语翻译
|
||||
│ └── ko.json # 韩语翻译
|
||||
├── static/js/i18n/
|
||||
│ └── index.js # 更新的JavaScript i18n管理器
|
||||
└── py/services/
|
||||
└── server_i18n.py # 更新的Python服务端i18n
|
||||
```
|
||||
|
||||
## 测试验证 (Testing & Validation)
|
||||
|
||||
### 测试脚本: `test_i18n.py`
|
||||
```bash
|
||||
🚀 Testing updated i18n system...
|
||||
✅ All JSON locale files are valid (9 languages)
|
||||
✅ Server-side i18n system working correctly
|
||||
✅ All languages have complete translations (386 keys each)
|
||||
🎉 All tests passed!
|
||||
```
|
||||
|
||||
### 验证内容:
|
||||
1. **JSON文件完整性**: 所有文件格式正确,语法有效
|
||||
2. **翻译完整性**: 各语言翻译键值一致,无缺失
|
||||
3. **服务端功能**: Python i18n服务正常加载和翻译
|
||||
4. **参数插值**: 动态参数替换功能正常
|
||||
|
||||
## 优势与改进 (Benefits & Improvements)
|
||||
|
||||
### 1. 维护性提升
|
||||
- **简化格式**: JSON比JavaScript对象更易于编辑和维护
|
||||
- **工具支持**: 更好的编辑器语法高亮和验证支持
|
||||
- **版本控制**: 更清晰的diff显示,便于追踪更改
|
||||
|
||||
### 2. 性能优化
|
||||
- **按需加载**: 只加载当前所需语言,减少初始加载时间
|
||||
- **缓存友好**: JSON文件可以被浏览器和CDN更好地缓存
|
||||
- **压缩效率**: JSON格式压缩率通常更高
|
||||
|
||||
### 3. 开发体验
|
||||
- **动态切换**: 支持运行时语言切换,无需重新加载页面
|
||||
- **易于扩展**: 添加新语言只需增加JSON文件
|
||||
- **调试友好**: 更容易定位翻译问题和缺失键
|
||||
|
||||
### 4. 部署便利
|
||||
- **静态资源**: JSON文件可以作为静态资源部署
|
||||
- **CDN支持**: 可以通过CDN分发翻译文件
|
||||
- **版本管理**: 更容易管理不同版本的翻译
|
||||
|
||||
## 兼容性保证 (Compatibility Assurance)
|
||||
|
||||
- **API兼容**: 所有现有的JavaScript API保持不变
|
||||
- **调用方式**: 现有代码无需修改即可工作
|
||||
- **错误处理**: 增强的回退机制确保用户体验
|
||||
- **性能**: 新系统性能与旧系统相当或更好
|
||||
|
||||
## 后续建议 (Future Recommendations)
|
||||
|
||||
1. **监控**: 部署后监控翻译加载性能和错误率
|
||||
2. **优化**: 考虑实施翻译缓存策略以进一步提升性能
|
||||
3. **扩展**: 可以考虑添加翻译管理界面,便于非技术人员更新翻译
|
||||
4. **自动化**: 实施CI/CD流程自动验证翻译完整性
|
||||
|
||||
---
|
||||
|
||||
**迁移完成时间**: 2024年
|
||||
**影响文件数量**: 21个文件 (9个新JSON + 2个JS更新 + 1个Python更新 + 1个服务器配置)
|
||||
**翻译键总数**: 386个 × 9种语言 = 3,474个翻译条目
|
||||
**测试状态**: ✅ 全部通过
|
||||
129
locales/de.json
129
locales/de.json
@@ -16,7 +16,9 @@
|
||||
"loading": "Wird geladen...",
|
||||
"unknown": "Unbekannt",
|
||||
"date": "Datum",
|
||||
"version": "Version"
|
||||
"version": "Version",
|
||||
"enabled": "Aktiviert",
|
||||
"disabled": "Deaktiviert"
|
||||
},
|
||||
"language": {
|
||||
"select": "Sprache",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "Ordner-Einstellungen",
|
||||
"downloadPathTemplates": "Download-Pfad-Vorlagen",
|
||||
"exampleImages": "Beispielbilder",
|
||||
"misc": "Verschiedenes"
|
||||
"misc": "Verschiedenes",
|
||||
"metadataArchive": "Metadaten-Archiv-Datenbank",
|
||||
"proxySettings": "Proxy-Einstellungen"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "NSFW-Inhalte unscharf stellen",
|
||||
@@ -194,7 +198,7 @@
|
||||
"displayDensity": "Anzeige-Dichte",
|
||||
"displayDensityOptions": {
|
||||
"default": "Standard",
|
||||
"medium": "Mittel",
|
||||
"medium": "Mittel",
|
||||
"compact": "Kompakt"
|
||||
},
|
||||
"displayDensityHelp": "Wählen Sie, wie viele Karten pro Zeile angezeigt werden sollen:",
|
||||
@@ -231,17 +235,18 @@
|
||||
"templateOptions": {
|
||||
"flatStructure": "Flache Struktur",
|
||||
"byBaseModel": "Nach Basis-Modell",
|
||||
"byAuthor": "Nach Autor",
|
||||
"byAuthor": "Nach Autor",
|
||||
"byFirstTag": "Nach erstem Tag",
|
||||
"baseModelFirstTag": "Basis-Modell + Erster Tag",
|
||||
"baseModelAuthor": "Basis-Modell + Autor",
|
||||
"authorFirstTag": "Autor + Erster Tag",
|
||||
"baseModelAuthorFirstTag": "Basis-Modell + Autor + Erster Tag",
|
||||
"customTemplate": "Benutzerdefinierte Vorlage"
|
||||
},
|
||||
"customTemplatePlaceholder": "Benutzerdefinierte Vorlage eingeben (z.B. {base_model}/{author}/{first_tag})",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA",
|
||||
"checkpoint": "Checkpoint",
|
||||
"checkpoint": "Checkpoint",
|
||||
"embedding": "Embedding"
|
||||
},
|
||||
"baseModelPathMappings": "Basis-Modell-Pfad-Zuordnungen",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "Trigger Words in LoRA-Syntax einschließen",
|
||||
"includeTriggerWordsHelp": "Trainierte Trigger Words beim Kopieren der LoRA-Syntax in die Zwischenablage einschließen"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "Metadaten-Archiv-Datenbank aktivieren",
|
||||
"enableArchiveDbHelp": "Verwenden Sie eine lokale Datenbank, um auf Metadaten von Modellen zuzugreifen, die von Civitai gelöscht wurden.",
|
||||
"status": "Status",
|
||||
"statusAvailable": "Verfügbar",
|
||||
"statusUnavailable": "Nicht verfügbar",
|
||||
"enabled": "Aktiviert",
|
||||
"management": "Datenbankverwaltung",
|
||||
"managementHelp": "Laden Sie die Metadaten-Archiv-Datenbank herunter oder entfernen Sie sie",
|
||||
"downloadButton": "Datenbank herunterladen",
|
||||
"downloadingButton": "Wird heruntergeladen...",
|
||||
"downloadedButton": "Heruntergeladen",
|
||||
"removeButton": "Datenbank entfernen",
|
||||
"removingButton": "Wird entfernt...",
|
||||
"downloadSuccess": "Metadaten-Archiv-Datenbank erfolgreich heruntergeladen",
|
||||
"downloadError": "Fehler beim Herunterladen der Metadaten-Archiv-Datenbank",
|
||||
"removeSuccess": "Metadaten-Archiv-Datenbank erfolgreich entfernt",
|
||||
"removeError": "Fehler beim Entfernen der Metadaten-Archiv-Datenbank",
|
||||
"removeConfirm": "Sind Sie sicher, dass Sie die Metadaten-Archiv-Datenbank entfernen möchten? Dadurch wird die lokale Datenbankdatei gelöscht und Sie müssen sie erneut herunterladen, um diese Funktion zu nutzen.",
|
||||
"preparing": "Download wird vorbereitet...",
|
||||
"connecting": "Verbindung zum Download-Server wird hergestellt...",
|
||||
"completed": "Abgeschlossen",
|
||||
"downloadComplete": "Download erfolgreich abgeschlossen"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "App-Proxy aktivieren",
|
||||
"enableProxyHelp": "Aktivieren Sie benutzerdefinierte Proxy-Einstellungen für diese Anwendung. Überschreibt die System-Proxy-Einstellungen.",
|
||||
"proxyType": "Proxy-Typ",
|
||||
"proxyTypeHelp": "Wählen Sie den Typ des Proxy-Servers (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "Proxy-Host",
|
||||
"proxyHostPlaceholder": "proxy.beispiel.de",
|
||||
"proxyHostHelp": "Der Hostname oder die IP-Adresse Ihres Proxy-Servers",
|
||||
"proxyPort": "Proxy-Port",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "Die Portnummer Ihres Proxy-Servers",
|
||||
"proxyUsername": "Benutzername (optional)",
|
||||
"proxyUsernamePlaceholder": "benutzername",
|
||||
"proxyUsernameHelp": "Benutzername für die Proxy-Authentifizierung (falls erforderlich)",
|
||||
"proxyPassword": "Passwort (optional)",
|
||||
"proxyPasswordPlaceholder": "passwort",
|
||||
"proxyPasswordHelp": "Passwort für die Proxy-Authentifizierung (falls erforderlich)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "{count} ausgewählt",
|
||||
"selectedSuffix": "ausgewählt",
|
||||
"viewSelected": "Klicken Sie, um ausgewählte Elemente anzuzeigen",
|
||||
"sendToWorkflow": "An Workflow senden",
|
||||
"copyAll": "Alle kopieren",
|
||||
"refreshAll": "Alle aktualisieren",
|
||||
"moveAll": "Alle verschieben",
|
||||
"deleteAll": "Alle löschen",
|
||||
"clear": "Leeren"
|
||||
"viewSelected": "Auswahl anzeigen",
|
||||
"addTags": "Allen Tags hinzufügen",
|
||||
"setBaseModel": "Basis-Modell für alle festlegen",
|
||||
"copyAll": "Alle Syntax kopieren",
|
||||
"refreshAll": "Alle Metadaten aktualisieren",
|
||||
"moveAll": "Alle in Ordner verschieben",
|
||||
"autoOrganize": "Automatisch organisieren",
|
||||
"deleteAll": "Alle Modelle löschen",
|
||||
"clear": "Auswahl löschen",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Automatische Organisation wird initialisiert...",
|
||||
"starting": "Automatische Organisation für {type} wird gestartet...",
|
||||
"processing": "Verarbeitung ({processed}/{total}) – {success} verschoben, {skipped} übersprungen, {failures} fehlgeschlagen",
|
||||
"cleaning": "Leere Verzeichnisse werden bereinigt...",
|
||||
"completed": "Abgeschlossen: {success} verschoben, {skipped} übersprungen, {failures} fehlgeschlagen",
|
||||
"complete": "Automatische Organisation abgeschlossen",
|
||||
"error": "Fehler: {error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Civitai-Daten aktualisieren",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "Modelle werden dauerhaft gelöscht.",
|
||||
"action": "Alle löschen"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "Tags zu mehreren Modellen hinzufügen",
|
||||
"description": "Tags hinzufügen zu",
|
||||
"models": "Modelle",
|
||||
"tagsToAdd": "Hinzugefügte Tags",
|
||||
"placeholder": "Tag eingeben und Enter drücken...",
|
||||
"appendTags": "Tags anhängen",
|
||||
"replaceTags": "Tags ersetzen",
|
||||
"saveChanges": "Änderungen speichern"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "Basis-Modell für mehrere Modelle festlegen",
|
||||
"description": "Basis-Modell festlegen für",
|
||||
"models": "Modelle",
|
||||
"selectBaseModel": "Basis-Modell auswählen",
|
||||
"save": "Basis-Modell aktualisieren",
|
||||
"cancel": "Abbrechen"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Lokale Beispielbilder",
|
||||
"message": "Keine lokalen Beispielbilder für dieses Modell gefunden. Ansichtsoptionen:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "Basis-Modell bearbeiten",
|
||||
"viewOnCivitai": "Auf Civitai anzeigen",
|
||||
"viewOnCivitaiText": "Auf Civitai anzeigen",
|
||||
"viewCreatorProfile": "Ersteller-Profil anzeigen"
|
||||
"viewCreatorProfile": "Ersteller-Profil anzeigen",
|
||||
"openFileLocation": "Dateispeicherort öffnen"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Dateispeicherort erfolgreich geöffnet",
|
||||
"failed": "Fehler beim Öffnen des Dateispeicherorts"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Version",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "{completed} von {total} LoRAs heruntergeladen. {accessFailures} fehlgeschlagen aufgrund von Zugriffsbeschränkungen. Überprüfen Sie Ihren API-Schlüssel in den Einstellungen oder den Early Access-Status.",
|
||||
"pleaseSelectVersion": "Bitte wählen Sie eine Version aus",
|
||||
"versionExists": "Diese Version existiert bereits in Ihrer Bibliothek",
|
||||
"downloadCompleted": "Download erfolgreich abgeschlossen"
|
||||
"downloadCompleted": "Download erfolgreich abgeschlossen",
|
||||
"autoOrganizeSuccess": "Automatische Organisation für {count} {type} erfolgreich abgeschlossen",
|
||||
"autoOrganizePartialSuccess": "Automatische Organisation abgeschlossen: {success} verschoben, {failures} fehlgeschlagen von insgesamt {total} Modellen",
|
||||
"autoOrganizeFailed": "Automatische Organisation fehlgeschlagen: {error}",
|
||||
"noModelsSelected": "Keine Modelle ausgewählt"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "Fehler beim Abrufen der Rezepte: {message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "Fehler: {error}",
|
||||
"deleteFailedGeneral": "Fehler beim Löschen der Modelle",
|
||||
"selectedAdditional": "{count} zusätzliche {type}(s) ausgewählt",
|
||||
"marqueeSelectionComplete": "{count} {type}(s) mit Rahmenauswahl ausgewählt",
|
||||
"refreshMetadataFailed": "Fehler beim Aktualisieren der Metadaten",
|
||||
"nameCannotBeEmpty": "Modellname darf nicht leer sein",
|
||||
"nameUpdatedSuccessfully": "Modellname erfolgreich aktualisiert",
|
||||
"nameUpdateFailed": "Fehler beim Aktualisieren des Modellnamens",
|
||||
"baseModelUpdated": "Basis-Modell erfolgreich aktualisiert",
|
||||
"baseModelUpdateFailed": "Fehler beim Aktualisieren des Basis-Modells",
|
||||
"baseModelNotSelected": "Bitte ein Basis-Modell auswählen",
|
||||
"bulkBaseModelUpdating": "Basis-Modell wird für {count} Modell(e) aktualisiert...",
|
||||
"bulkBaseModelUpdateSuccess": "Basis-Modell erfolgreich für {count} Modell(e) aktualisiert",
|
||||
"bulkBaseModelUpdatePartial": "{success} Modelle aktualisiert, {failed} fehlgeschlagen",
|
||||
"bulkBaseModelUpdateFailed": "Aktualisierung des Basis-Modells für ausgewählte Modelle fehlgeschlagen",
|
||||
"invalidCharactersRemoved": "Ungültige Zeichen aus Dateiname entfernt",
|
||||
"filenameCannotBeEmpty": "Dateiname darf nicht leer sein",
|
||||
"renameFailed": "Fehler beim Umbenennen der Datei: {message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "Diese Gruppe wurde bereits verifiziert",
|
||||
"verificationCompleteMismatch": "Verifikation abgeschlossen. {count} Datei(en) haben unterschiedliche tatsächliche Hashes.",
|
||||
"verificationCompleteSuccess": "Verifikation abgeschlossen. Alle Dateien sind bestätigte Duplikate.",
|
||||
"verificationFailed": "Fehler beim Verifizieren der Hashes: {message}"
|
||||
"verificationFailed": "Fehler beim Verifizieren der Hashes: {message}",
|
||||
"noTagsToAdd": "Keine Tags zum Hinzufügen",
|
||||
"tagsAddedSuccessfully": "Erfolgreich {tagCount} Tag(s) zu {count} {type}(s) hinzugefügt",
|
||||
"tagsReplacedSuccessfully": "Tags für {count} {type}(s) erfolgreich durch {tagCount} Tag(s) ersetzt",
|
||||
"tagsAddFailed": "Fehler beim Hinzufügen von Tags zu {count} Modell(en)",
|
||||
"tagsReplaceFailed": "Fehler beim Ersetzen von Tags für {count} Modell(e)",
|
||||
"bulkTagsAddFailed": "Fehler beim Hinzufügen von Tags zu Modellen",
|
||||
"bulkTagsReplaceFailed": "Fehler beim Ersetzen von Tags für Modelle"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "Mindestens eine Suchoption muss ausgewählt werden"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "Beispielbilder-Pfad erfolgreich aktualisiert",
|
||||
"pathUpdateFailed": "Fehler beim Aktualisieren des Beispielbilder-Pfads: {message}",
|
||||
"downloadInProgress": "Download bereits in Bearbeitung",
|
||||
"enterLocationFirst": "Bitte geben Sie zuerst einen Download-Speicherort ein",
|
||||
"downloadStarted": "Beispielbilder-Download gestartet",
|
||||
|
||||
123
locales/en.json
123
locales/en.json
@@ -16,7 +16,9 @@
|
||||
"loading": "Loading...",
|
||||
"unknown": "Unknown",
|
||||
"date": "Date",
|
||||
"version": "Version"
|
||||
"version": "Version",
|
||||
"enabled": "Enabled",
|
||||
"disabled": "Disabled"
|
||||
},
|
||||
"language": {
|
||||
"select": "Language",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "Folder Settings",
|
||||
"downloadPathTemplates": "Download Path Templates",
|
||||
"exampleImages": "Example Images",
|
||||
"misc": "Misc."
|
||||
"misc": "Misc.",
|
||||
"metadataArchive": "Metadata Archive Database",
|
||||
"proxySettings": "Proxy Settings"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "Blur NSFW Content",
|
||||
@@ -236,6 +240,7 @@
|
||||
"baseModelFirstTag": "Base Model + First Tag",
|
||||
"baseModelAuthor": "Base Model + Author",
|
||||
"authorFirstTag": "Author + First Tag",
|
||||
"baseModelAuthorFirstTag": "Base Model + Author + First Tag",
|
||||
"customTemplate": "Custom Template"
|
||||
},
|
||||
"customTemplatePlaceholder": "Enter custom template (e.g., {base_model}/{author}/{first_tag})",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "Include Trigger Words in LoRA Syntax",
|
||||
"includeTriggerWordsHelp": "Include trained trigger words when copying LoRA syntax to clipboard"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "Enable Metadata Archive Database",
|
||||
"enableArchiveDbHelp": "Use a local database to access metadata for models that have been deleted from Civitai.",
|
||||
"status": "Status",
|
||||
"statusAvailable": "Available",
|
||||
"statusUnavailable": "Not Available",
|
||||
"enabled": "Enabled",
|
||||
"management": "Database Management",
|
||||
"managementHelp": "Download or remove the metadata archive database",
|
||||
"downloadButton": "Download Database",
|
||||
"downloadingButton": "Downloading...",
|
||||
"downloadedButton": "Downloaded",
|
||||
"removeButton": "Remove Database",
|
||||
"removingButton": "Removing...",
|
||||
"downloadSuccess": "Metadata archive database downloaded successfully",
|
||||
"downloadError": "Failed to download metadata archive database",
|
||||
"removeSuccess": "Metadata archive database removed successfully",
|
||||
"removeError": "Failed to remove metadata archive database",
|
||||
"removeConfirm": "Are you sure you want to remove the metadata archive database? This will delete the local database file and you'll need to download it again to use this feature.",
|
||||
"preparing": "Preparing download...",
|
||||
"connecting": "Connecting to download server...",
|
||||
"completed": "Completed",
|
||||
"downloadComplete": "Download completed successfully"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "Enable App-level Proxy",
|
||||
"enableProxyHelp": "Enable custom proxy settings for this application, overriding system proxy settings",
|
||||
"proxyType": "Proxy Type",
|
||||
"proxyTypeHelp": "Select the type of proxy server (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "Proxy Host",
|
||||
"proxyHostPlaceholder": "proxy.example.com",
|
||||
"proxyHostHelp": "The hostname or IP address of your proxy server",
|
||||
"proxyPort": "Proxy Port",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "The port number of your proxy server",
|
||||
"proxyUsername": "Username (Optional)",
|
||||
"proxyUsernamePlaceholder": "username",
|
||||
"proxyUsernameHelp": "Username for proxy authentication (if required)",
|
||||
"proxyPassword": "Password (Optional)",
|
||||
"proxyPasswordPlaceholder": "password",
|
||||
"proxyPasswordHelp": "Password for proxy authentication (if required)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "{count} selected",
|
||||
"selectedSuffix": "selected",
|
||||
"viewSelected": "Click to view selected items",
|
||||
"sendToWorkflow": "Send to Workflow",
|
||||
"copyAll": "Copy All",
|
||||
"refreshAll": "Refresh All",
|
||||
"moveAll": "Move All",
|
||||
"deleteAll": "Delete All",
|
||||
"clear": "Clear"
|
||||
"viewSelected": "View Selected",
|
||||
"addTags": "Add Tags to All",
|
||||
"setBaseModel": "Set Base Model for All",
|
||||
"copyAll": "Copy All Syntax",
|
||||
"refreshAll": "Refresh All Metadata",
|
||||
"moveAll": "Move All to Folder",
|
||||
"autoOrganize": "Auto-Organize Selected",
|
||||
"deleteAll": "Delete All Models",
|
||||
"clear": "Clear Selection",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Initializing auto-organize...",
|
||||
"starting": "Starting auto-organize for {type}...",
|
||||
"processing": "Processing ({processed}/{total}) - {success} moved, {skipped} skipped, {failures} failed",
|
||||
"cleaning": "Cleaning up empty directories...",
|
||||
"completed": "Completed: {success} moved, {skipped} skipped, {failures} failed",
|
||||
"complete": "Auto-organize complete",
|
||||
"error": "Error: {error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Refresh Civitai Data",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "models will be permanently deleted.",
|
||||
"action": "Delete All"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "Add Tags to Multiple Models",
|
||||
"description": "Add tags to",
|
||||
"models": "models",
|
||||
"tagsToAdd": "Tags to Add",
|
||||
"placeholder": "Enter tag and press Enter...",
|
||||
"appendTags": "Append Tags",
|
||||
"replaceTags": "Replace Tags",
|
||||
"saveChanges": "Save changes"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "Set Base Model for Multiple Models",
|
||||
"description": "Set base model for",
|
||||
"models": "models",
|
||||
"selectBaseModel": "Select Base Model",
|
||||
"save": "Update Base Model",
|
||||
"cancel": "Cancel"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Local Example Images",
|
||||
"message": "No local example images found for this model. View options:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "Edit base model",
|
||||
"viewOnCivitai": "View on Civitai",
|
||||
"viewOnCivitaiText": "View on Civitai",
|
||||
"viewCreatorProfile": "View Creator Profile"
|
||||
"viewCreatorProfile": "View Creator Profile",
|
||||
"openFileLocation": "Open File Location"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "File location opened successfully",
|
||||
"failed": "Failed to open file location"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Version",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "Downloaded {completed} of {total} LoRAs. {accessFailures} failed due to access restrictions. Check your API key in settings or early access status.",
|
||||
"pleaseSelectVersion": "Please select a version",
|
||||
"versionExists": "This version already exists in your library",
|
||||
"downloadCompleted": "Download completed successfully"
|
||||
"downloadCompleted": "Download completed successfully",
|
||||
"autoOrganizeSuccess": "Auto-organize completed successfully for {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Auto-organize completed with {success} moved, {failures} failed out of {total} models",
|
||||
"autoOrganizeFailed": "Auto-organize failed: {error}",
|
||||
"noModelsSelected": "No models selected"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "Failed to fetch recipes: {message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "Error: {error}",
|
||||
"deleteFailedGeneral": "Failed to delete models",
|
||||
"selectedAdditional": "Selected {count} additional {type}(s)",
|
||||
"marqueeSelectionComplete": "Selected {count} {type}(s) with marquee selection",
|
||||
"refreshMetadataFailed": "Failed to refresh metadata",
|
||||
"nameCannotBeEmpty": "Model name cannot be empty",
|
||||
"nameUpdatedSuccessfully": "Model name updated successfully",
|
||||
"nameUpdateFailed": "Failed to update model name",
|
||||
"baseModelUpdated": "Base model updated successfully",
|
||||
"baseModelUpdateFailed": "Failed to update base model",
|
||||
"baseModelNotSelected": "Please select a base model",
|
||||
"bulkBaseModelUpdating": "Updating base model for {count} model(s)...",
|
||||
"bulkBaseModelUpdateSuccess": "Successfully updated base model for {count} model(s)",
|
||||
"bulkBaseModelUpdatePartial": "Updated {success} model(s), failed {failed} model(s)",
|
||||
"bulkBaseModelUpdateFailed": "Failed to update base model for selected models",
|
||||
"invalidCharactersRemoved": "Invalid characters removed from filename",
|
||||
"filenameCannotBeEmpty": "File name cannot be empty",
|
||||
"renameFailed": "Failed to rename file: {message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "This group has already been verified",
|
||||
"verificationCompleteMismatch": "Verification complete. {count} file(s) have different actual hashes.",
|
||||
"verificationCompleteSuccess": "Verification complete. All files are confirmed duplicates.",
|
||||
"verificationFailed": "Failed to verify hashes: {message}"
|
||||
"verificationFailed": "Failed to verify hashes: {message}",
|
||||
"noTagsToAdd": "No tags to add",
|
||||
"tagsAddedSuccessfully": "Successfully added {tagCount} tag(s) to {count} {type}(s)",
|
||||
"tagsReplacedSuccessfully": "Successfully replaced tags for {count} {type}(s) with {tagCount} tag(s)",
|
||||
"tagsAddFailed": "Failed to add tags to {count} model(s)",
|
||||
"tagsReplaceFailed": "Failed to replace tags for {count} model(s)",
|
||||
"bulkTagsAddFailed": "Failed to add tags to models",
|
||||
"bulkTagsReplaceFailed": "Failed to replace tags for models"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "At least one search option must be selected"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "Example images path updated successfully",
|
||||
"pathUpdateFailed": "Failed to update example images path: {message}",
|
||||
"downloadInProgress": "Download already in progress",
|
||||
"enterLocationFirst": "Please enter a download location first",
|
||||
"downloadStarted": "Example images download started",
|
||||
|
||||
129
locales/es.json
129
locales/es.json
@@ -16,7 +16,9 @@
|
||||
"loading": "Cargando...",
|
||||
"unknown": "Desconocido",
|
||||
"date": "Fecha",
|
||||
"version": "Versión"
|
||||
"version": "Versión",
|
||||
"enabled": "Habilitado",
|
||||
"disabled": "Deshabilitado"
|
||||
},
|
||||
"language": {
|
||||
"select": "Idioma",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "Configuración de carpetas",
|
||||
"downloadPathTemplates": "Plantillas de rutas de descarga",
|
||||
"exampleImages": "Imágenes de ejemplo",
|
||||
"misc": "Varios"
|
||||
"misc": "Varios",
|
||||
"metadataArchive": "Base de datos de archivo de metadatos",
|
||||
"proxySettings": "Configuración de proxy"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "Difuminar contenido NSFW",
|
||||
@@ -194,7 +198,7 @@
|
||||
"displayDensity": "Densidad de visualización",
|
||||
"displayDensityOptions": {
|
||||
"default": "Predeterminado",
|
||||
"medium": "Medio",
|
||||
"medium": "Medio",
|
||||
"compact": "Compacto"
|
||||
},
|
||||
"displayDensityHelp": "Elige cuántas tarjetas mostrar por fila:",
|
||||
@@ -231,17 +235,18 @@
|
||||
"templateOptions": {
|
||||
"flatStructure": "Estructura plana",
|
||||
"byBaseModel": "Por modelo base",
|
||||
"byAuthor": "Por autor",
|
||||
"byAuthor": "Por autor",
|
||||
"byFirstTag": "Por primera etiqueta",
|
||||
"baseModelFirstTag": "Modelo base + primera etiqueta",
|
||||
"baseModelAuthor": "Modelo base + autor",
|
||||
"authorFirstTag": "Autor + primera etiqueta",
|
||||
"baseModelAuthorFirstTag": "Modelo base + autor + primera etiqueta",
|
||||
"customTemplate": "Plantilla personalizada"
|
||||
},
|
||||
"customTemplatePlaceholder": "Introduce plantilla personalizada (ej., {base_model}/{author}/{first_tag})",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA",
|
||||
"checkpoint": "Checkpoint",
|
||||
"checkpoint": "Checkpoint",
|
||||
"embedding": "Embedding"
|
||||
},
|
||||
"baseModelPathMappings": "Mapeos de rutas de modelo base",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "Incluir palabras clave en la sintaxis de LoRA",
|
||||
"includeTriggerWordsHelp": "Incluir palabras clave entrenadas al copiar la sintaxis de LoRA al portapapeles"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "Habilitar base de datos de archivo de metadatos",
|
||||
"enableArchiveDbHelp": "Utiliza una base de datos local para acceder a metadatos de modelos que han sido eliminados de Civitai.",
|
||||
"status": "Estado",
|
||||
"statusAvailable": "Disponible",
|
||||
"statusUnavailable": "No disponible",
|
||||
"enabled": "Habilitado",
|
||||
"management": "Gestión de base de datos",
|
||||
"managementHelp": "Descargar o eliminar la base de datos de archivo de metadatos",
|
||||
"downloadButton": "Descargar base de datos",
|
||||
"downloadingButton": "Descargando...",
|
||||
"downloadedButton": "Descargado",
|
||||
"removeButton": "Eliminar base de datos",
|
||||
"removingButton": "Eliminando...",
|
||||
"downloadSuccess": "Base de datos de archivo de metadatos descargada exitosamente",
|
||||
"downloadError": "Error al descargar la base de datos de archivo de metadatos",
|
||||
"removeSuccess": "Base de datos de archivo de metadatos eliminada exitosamente",
|
||||
"removeError": "Error al eliminar la base de datos de archivo de metadatos",
|
||||
"removeConfirm": "¿Estás seguro de que quieres eliminar la base de datos de archivo de metadatos? Esto eliminará el archivo de base de datos local y tendrás que descargarlo de nuevo para usar esta función.",
|
||||
"preparing": "Preparando descarga...",
|
||||
"connecting": "Conectando al servidor de descarga...",
|
||||
"completed": "Completado",
|
||||
"downloadComplete": "Descarga completada exitosamente"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "Habilitar proxy a nivel de aplicación",
|
||||
"enableProxyHelp": "Habilita la configuración de proxy personalizada para esta aplicación, sobrescribiendo la configuración de proxy del sistema",
|
||||
"proxyType": "Tipo de proxy",
|
||||
"proxyTypeHelp": "Selecciona el tipo de servidor proxy (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "Host del proxy",
|
||||
"proxyHostPlaceholder": "proxy.ejemplo.com",
|
||||
"proxyHostHelp": "El nombre de host o dirección IP de tu servidor proxy",
|
||||
"proxyPort": "Puerto del proxy",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "El número de puerto de tu servidor proxy",
|
||||
"proxyUsername": "Usuario (opcional)",
|
||||
"proxyUsernamePlaceholder": "usuario",
|
||||
"proxyUsernameHelp": "Usuario para autenticación de proxy (si es necesario)",
|
||||
"proxyPassword": "Contraseña (opcional)",
|
||||
"proxyPasswordPlaceholder": "contraseña",
|
||||
"proxyPasswordHelp": "Contraseña para autenticación de proxy (si es necesario)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "{count} seleccionados",
|
||||
"selectedSuffix": "seleccionados",
|
||||
"viewSelected": "Clic para ver elementos seleccionados",
|
||||
"sendToWorkflow": "Enviar al flujo de trabajo",
|
||||
"copyAll": "Copiar todo",
|
||||
"refreshAll": "Actualizar todo",
|
||||
"moveAll": "Mover todo",
|
||||
"deleteAll": "Eliminar todo",
|
||||
"clear": "Limpiar"
|
||||
"viewSelected": "Ver seleccionados",
|
||||
"addTags": "Añadir etiquetas a todos",
|
||||
"setBaseModel": "Establecer modelo base para todos",
|
||||
"copyAll": "Copiar toda la sintaxis",
|
||||
"refreshAll": "Actualizar todos los metadatos",
|
||||
"moveAll": "Mover todos a carpeta",
|
||||
"autoOrganize": "Auto-organizar seleccionados",
|
||||
"deleteAll": "Eliminar todos los modelos",
|
||||
"clear": "Limpiar selección",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Inicializando auto-organización...",
|
||||
"starting": "Iniciando auto-organización para {type}...",
|
||||
"processing": "Procesando ({processed}/{total}) - {success} movidos, {skipped} omitidos, {failures} fallidos",
|
||||
"cleaning": "Limpiando directorios vacíos...",
|
||||
"completed": "Completado: {success} movidos, {skipped} omitidos, {failures} fallidos",
|
||||
"complete": "Auto-organización completada",
|
||||
"error": "Error: {error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Actualizar datos de Civitai",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "modelos serán eliminados permanentemente.",
|
||||
"action": "Eliminar todo"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "Añadir etiquetas a múltiples modelos",
|
||||
"description": "Añadir etiquetas a",
|
||||
"models": "modelos",
|
||||
"tagsToAdd": "Etiquetas a añadir",
|
||||
"placeholder": "Introduce una etiqueta y presiona Enter...",
|
||||
"appendTags": "Añadir etiquetas",
|
||||
"replaceTags": "Reemplazar etiquetas",
|
||||
"saveChanges": "Guardar cambios"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "Establecer modelo base para múltiples modelos",
|
||||
"description": "Establecer modelo base para",
|
||||
"models": "modelos",
|
||||
"selectBaseModel": "Seleccionar modelo base",
|
||||
"save": "Actualizar modelo base",
|
||||
"cancel": "Cancelar"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Imágenes de ejemplo locales",
|
||||
"message": "No se encontraron imágenes de ejemplo locales para este modelo. Opciones de visualización:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "Editar modelo base",
|
||||
"viewOnCivitai": "Ver en Civitai",
|
||||
"viewOnCivitaiText": "Ver en Civitai",
|
||||
"viewCreatorProfile": "Ver perfil del creador"
|
||||
"viewCreatorProfile": "Ver perfil del creador",
|
||||
"openFileLocation": "Abrir ubicación del archivo"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Ubicación del archivo abierta exitosamente",
|
||||
"failed": "Error al abrir la ubicación del archivo"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Versión",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "Descargados {completed} de {total} LoRAs. {accessFailures} fallaron debido a restricciones de acceso. Revisa tu clave API en configuración o estado de acceso temprano.",
|
||||
"pleaseSelectVersion": "Por favor selecciona una versión",
|
||||
"versionExists": "Esta versión ya existe en tu biblioteca",
|
||||
"downloadCompleted": "Descarga completada exitosamente"
|
||||
"downloadCompleted": "Descarga completada exitosamente",
|
||||
"autoOrganizeSuccess": "Auto-organización completada exitosamente para {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Auto-organización completada con {success} movidos, {failures} fallidos de un total de {total} modelos",
|
||||
"autoOrganizeFailed": "Auto-organización fallida: {error}",
|
||||
"noModelsSelected": "No hay modelos seleccionados"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "Error al obtener recetas: {message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "Error: {error}",
|
||||
"deleteFailedGeneral": "Error al eliminar modelos",
|
||||
"selectedAdditional": "Seleccionados {count} {type}(s) adicionales",
|
||||
"marqueeSelectionComplete": "Seleccionados {count} {type}(s) con selección de marco",
|
||||
"refreshMetadataFailed": "Error al actualizar metadatos",
|
||||
"nameCannotBeEmpty": "El nombre del modelo no puede estar vacío",
|
||||
"nameUpdatedSuccessfully": "Nombre del modelo actualizado exitosamente",
|
||||
"nameUpdateFailed": "Error al actualizar nombre del modelo",
|
||||
"baseModelUpdated": "Modelo base actualizado exitosamente",
|
||||
"baseModelUpdateFailed": "Error al actualizar modelo base",
|
||||
"baseModelNotSelected": "Por favor selecciona un modelo base",
|
||||
"bulkBaseModelUpdating": "Actualizando modelo base para {count} modelo(s)...",
|
||||
"bulkBaseModelUpdateSuccess": "Modelo base actualizado exitosamente para {count} modelo(s)",
|
||||
"bulkBaseModelUpdatePartial": "Actualizados {success} modelo(s), fallaron {failed} modelo(s)",
|
||||
"bulkBaseModelUpdateFailed": "Error al actualizar el modelo base para los modelos seleccionados",
|
||||
"invalidCharactersRemoved": "Caracteres inválidos eliminados del nombre de archivo",
|
||||
"filenameCannotBeEmpty": "El nombre de archivo no puede estar vacío",
|
||||
"renameFailed": "Error al renombrar archivo: {message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "Este grupo ya ha sido verificado",
|
||||
"verificationCompleteMismatch": "Verificación completa. {count} archivo(s) tienen hashes reales diferentes.",
|
||||
"verificationCompleteSuccess": "Verificación completa. Todos los archivos son confirmados duplicados.",
|
||||
"verificationFailed": "Error al verificar hashes: {message}"
|
||||
"verificationFailed": "Error al verificar hashes: {message}",
|
||||
"noTagsToAdd": "No hay etiquetas para añadir",
|
||||
"tagsAddedSuccessfully": "Se añadieron exitosamente {tagCount} etiqueta(s) a {count} {type}(s)",
|
||||
"tagsReplacedSuccessfully": "Se reemplazaron exitosamente las etiquetas de {count} {type}(s) con {tagCount} etiqueta(s)",
|
||||
"tagsAddFailed": "Error al añadir etiquetas a {count} modelo(s)",
|
||||
"tagsReplaceFailed": "Error al reemplazar etiquetas para {count} modelo(s)",
|
||||
"bulkTagsAddFailed": "Error al añadir etiquetas a los modelos",
|
||||
"bulkTagsReplaceFailed": "Error al reemplazar etiquetas para los modelos"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "Al menos una opción de búsqueda debe estar seleccionada"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "Ruta de imágenes de ejemplo actualizada exitosamente",
|
||||
"pathUpdateFailed": "Error al actualizar la ruta de imágenes de ejemplo: {message}",
|
||||
"downloadInProgress": "Descarga ya en progreso",
|
||||
"enterLocationFirst": "Por favor introduce primero una ubicación de descarga",
|
||||
"downloadStarted": "Descarga de imágenes de ejemplo iniciada",
|
||||
|
||||
129
locales/fr.json
129
locales/fr.json
@@ -16,7 +16,9 @@
|
||||
"loading": "Chargement...",
|
||||
"unknown": "Inconnu",
|
||||
"date": "Date",
|
||||
"version": "Version"
|
||||
"version": "Version",
|
||||
"enabled": "Activé",
|
||||
"disabled": "Désactivé"
|
||||
},
|
||||
"language": {
|
||||
"select": "Langue",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "Paramètres des dossiers",
|
||||
"downloadPathTemplates": "Modèles de chemin de téléchargement",
|
||||
"exampleImages": "Images d'exemple",
|
||||
"misc": "Divers"
|
||||
"misc": "Divers",
|
||||
"metadataArchive": "Base de données d'archive des métadonnées",
|
||||
"proxySettings": "Paramètres du proxy"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "Flouter le contenu NSFW",
|
||||
@@ -194,7 +198,7 @@
|
||||
"displayDensity": "Densité d'affichage",
|
||||
"displayDensityOptions": {
|
||||
"default": "Par défaut",
|
||||
"medium": "Moyen",
|
||||
"medium": "Moyen",
|
||||
"compact": "Compact"
|
||||
},
|
||||
"displayDensityHelp": "Choisissez combien de cartes afficher par ligne :",
|
||||
@@ -231,17 +235,18 @@
|
||||
"templateOptions": {
|
||||
"flatStructure": "Structure plate",
|
||||
"byBaseModel": "Par modèle de base",
|
||||
"byAuthor": "Par auteur",
|
||||
"byAuthor": "Par auteur",
|
||||
"byFirstTag": "Par premier tag",
|
||||
"baseModelFirstTag": "Modèle de base + Premier tag",
|
||||
"baseModelAuthor": "Modèle de base + Auteur",
|
||||
"authorFirstTag": "Auteur + Premier tag",
|
||||
"baseModelAuthorFirstTag": "Modèle de base + Auteur + Premier tag",
|
||||
"customTemplate": "Modèle personnalisé"
|
||||
},
|
||||
"customTemplatePlaceholder": "Entrez un modèle personnalisé (ex: {base_model}/{author}/{first_tag})",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA",
|
||||
"checkpoint": "Checkpoint",
|
||||
"checkpoint": "Checkpoint",
|
||||
"embedding": "Embedding"
|
||||
},
|
||||
"baseModelPathMappings": "Mappages de chemin de modèle de base",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "Inclure les mots-clés dans la syntaxe LoRA",
|
||||
"includeTriggerWordsHelp": "Inclure les mots-clés d'entraînement lors de la copie de la syntaxe LoRA dans le presse-papiers"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "Activer la base de données d'archive des métadonnées",
|
||||
"enableArchiveDbHelp": "Utiliser une base de données locale pour accéder aux métadonnées des modèles supprimés de Civitai.",
|
||||
"status": "Statut",
|
||||
"statusAvailable": "Disponible",
|
||||
"statusUnavailable": "Non disponible",
|
||||
"enabled": "Activé",
|
||||
"management": "Gestion de la base de données",
|
||||
"managementHelp": "Télécharger ou supprimer la base de données d'archive des métadonnées",
|
||||
"downloadButton": "Télécharger la base de données",
|
||||
"downloadingButton": "Téléchargement...",
|
||||
"downloadedButton": "Téléchargé",
|
||||
"removeButton": "Supprimer la base de données",
|
||||
"removingButton": "Suppression...",
|
||||
"downloadSuccess": "Base de données d'archive des métadonnées téléchargée avec succès",
|
||||
"downloadError": "Échec du téléchargement de la base de données d'archive des métadonnées",
|
||||
"removeSuccess": "Base de données d'archive des métadonnées supprimée avec succès",
|
||||
"removeError": "Échec de la suppression de la base de données d'archive des métadonnées",
|
||||
"removeConfirm": "Êtes-vous sûr de vouloir supprimer la base de données d'archive des métadonnées ? Cela supprimera le fichier local et vous devrez la télécharger à nouveau pour utiliser cette fonctionnalité.",
|
||||
"preparing": "Préparation du téléchargement...",
|
||||
"connecting": "Connexion au serveur de téléchargement...",
|
||||
"completed": "Terminé",
|
||||
"downloadComplete": "Téléchargement terminé avec succès"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "Activer le proxy au niveau de l'application",
|
||||
"enableProxyHelp": "Activer les paramètres de proxy personnalisés pour cette application, remplaçant les paramètres de proxy système",
|
||||
"proxyType": "Type de proxy",
|
||||
"proxyTypeHelp": "Sélectionnez le type de serveur proxy (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "Hôte du proxy",
|
||||
"proxyHostPlaceholder": "proxy.exemple.com",
|
||||
"proxyHostHelp": "Le nom d'hôte ou l'adresse IP de votre serveur proxy",
|
||||
"proxyPort": "Port du proxy",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "Le numéro de port de votre serveur proxy",
|
||||
"proxyUsername": "Nom d'utilisateur (optionnel)",
|
||||
"proxyUsernamePlaceholder": "nom_utilisateur",
|
||||
"proxyUsernameHelp": "Nom d'utilisateur pour l'authentification proxy (si nécessaire)",
|
||||
"proxyPassword": "Mot de passe (optionnel)",
|
||||
"proxyPasswordPlaceholder": "mot_de_passe",
|
||||
"proxyPasswordHelp": "Mot de passe pour l'authentification proxy (si nécessaire)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "{count} sélectionné(s)",
|
||||
"selectedSuffix": "sélectionné(s)",
|
||||
"viewSelected": "Cliquez pour voir les éléments sélectionnés",
|
||||
"sendToWorkflow": "Envoyer vers le workflow",
|
||||
"copyAll": "Tout copier",
|
||||
"refreshAll": "Tout actualiser",
|
||||
"moveAll": "Tout déplacer",
|
||||
"deleteAll": "Tout supprimer",
|
||||
"clear": "Effacer"
|
||||
"viewSelected": "Voir la sélection",
|
||||
"addTags": "Ajouter des tags à tous",
|
||||
"setBaseModel": "Définir le modèle de base pour tous",
|
||||
"copyAll": "Copier toute la syntaxe",
|
||||
"refreshAll": "Actualiser toutes les métadonnées",
|
||||
"moveAll": "Déplacer tout vers un dossier",
|
||||
"autoOrganize": "Auto-organiser la sélection",
|
||||
"deleteAll": "Supprimer tous les modèles",
|
||||
"clear": "Effacer la sélection",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Initialisation de l'auto-organisation...",
|
||||
"starting": "Démarrage de l'auto-organisation pour {type}...",
|
||||
"processing": "Traitement ({processed}/{total}) - {success} déplacés, {skipped} ignorés, {failures} échecs",
|
||||
"cleaning": "Nettoyage des répertoires vides...",
|
||||
"completed": "Terminé : {success} déplacés, {skipped} ignorés, {failures} échecs",
|
||||
"complete": "Auto-organisation terminée",
|
||||
"error": "Erreur : {error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Actualiser les données Civitai",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "modèles seront définitivement supprimés.",
|
||||
"action": "Tout supprimer"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "Ajouter des tags à plusieurs modèles",
|
||||
"description": "Ajouter des tags à",
|
||||
"models": "modèles",
|
||||
"tagsToAdd": "Tags à ajouter",
|
||||
"placeholder": "Entrez un tag et appuyez sur Entrée...",
|
||||
"appendTags": "Ajouter les tags",
|
||||
"replaceTags": "Remplacer les tags",
|
||||
"saveChanges": "Enregistrer les modifications"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "Définir le modèle de base pour plusieurs modèles",
|
||||
"description": "Définir le modèle de base pour",
|
||||
"models": "modèles",
|
||||
"selectBaseModel": "Sélectionner le modèle de base",
|
||||
"save": "Mettre à jour le modèle de base",
|
||||
"cancel": "Annuler"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Images d'exemple locales",
|
||||
"message": "Aucune image d'exemple locale trouvée pour ce modèle. Options d'affichage :",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "Modifier le modèle de base",
|
||||
"viewOnCivitai": "Voir sur Civitai",
|
||||
"viewOnCivitaiText": "Voir sur Civitai",
|
||||
"viewCreatorProfile": "Voir le profil du créateur"
|
||||
"viewCreatorProfile": "Voir le profil du créateur",
|
||||
"openFileLocation": "Ouvrir l'emplacement du fichier"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Emplacement du fichier ouvert avec succès",
|
||||
"failed": "Échec de l'ouverture de l'emplacement du fichier"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Version",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "{completed} sur {total} LoRAs téléchargés. {accessFailures} ont échoué en raison de restrictions d'accès. Vérifiez votre clé API dans les paramètres ou le statut d'accès anticipé.",
|
||||
"pleaseSelectVersion": "Veuillez sélectionner une version",
|
||||
"versionExists": "Cette version existe déjà dans votre bibliothèque",
|
||||
"downloadCompleted": "Téléchargement terminé avec succès"
|
||||
"downloadCompleted": "Téléchargement terminé avec succès",
|
||||
"autoOrganizeSuccess": "Auto-organisation terminée avec succès pour {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Auto-organisation terminée avec {success} déplacés, {failures} échecs sur {total} modèles",
|
||||
"autoOrganizeFailed": "Échec de l'auto-organisation : {error}",
|
||||
"noModelsSelected": "Aucun modèle sélectionné"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "Échec de la récupération des recipes : {message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "Erreur : {error}",
|
||||
"deleteFailedGeneral": "Échec de la suppression des modèles",
|
||||
"selectedAdditional": "{count} {type}(s) supplémentaire(s) sélectionné(s)",
|
||||
"marqueeSelectionComplete": "{count} {type}(s) sélectionné(s) avec la sélection par glisser-déposer",
|
||||
"refreshMetadataFailed": "Échec de l'actualisation des métadonnées",
|
||||
"nameCannotBeEmpty": "Le nom du modèle ne peut pas être vide",
|
||||
"nameUpdatedSuccessfully": "Nom du modèle mis à jour avec succès",
|
||||
"nameUpdateFailed": "Échec de la mise à jour du nom du modèle",
|
||||
"baseModelUpdated": "Modèle de base mis à jour avec succès",
|
||||
"baseModelUpdateFailed": "Échec de la mise à jour du modèle de base",
|
||||
"baseModelNotSelected": "Veuillez sélectionner un modèle de base",
|
||||
"bulkBaseModelUpdating": "Mise à jour du modèle de base pour {count} modèle(s)...",
|
||||
"bulkBaseModelUpdateSuccess": "Modèle de base mis à jour avec succès pour {count} modèle(s)",
|
||||
"bulkBaseModelUpdatePartial": "{success} modèle(s) mis à jour, {failed} modèle(s) en échec",
|
||||
"bulkBaseModelUpdateFailed": "Échec de la mise à jour du modèle de base pour les modèles sélectionnés",
|
||||
"invalidCharactersRemoved": "Caractères invalides supprimés du nom de fichier",
|
||||
"filenameCannotBeEmpty": "Le nom de fichier ne peut pas être vide",
|
||||
"renameFailed": "Échec du renommage du fichier : {message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "Ce groupe a déjà été vérifié",
|
||||
"verificationCompleteMismatch": "Vérification terminée. {count} fichier(s) ont des hash différents.",
|
||||
"verificationCompleteSuccess": "Vérification terminée. Tous les fichiers sont confirmés comme doublons.",
|
||||
"verificationFailed": "Échec de la vérification des hash : {message}"
|
||||
"verificationFailed": "Échec de la vérification des hash : {message}",
|
||||
"noTagsToAdd": "Aucun tag à ajouter",
|
||||
"tagsAddedSuccessfully": "{tagCount} tag(s) ajouté(s) avec succès à {count} {type}(s)",
|
||||
"tagsReplacedSuccessfully": "Tags remplacés avec succès pour {count} {type}(s) avec {tagCount} tag(s)",
|
||||
"tagsAddFailed": "Échec de l'ajout des tags à {count} modèle(s)",
|
||||
"tagsReplaceFailed": "Échec du remplacement des tags pour {count} modèle(s)",
|
||||
"bulkTagsAddFailed": "Échec de l'ajout des tags aux modèles",
|
||||
"bulkTagsReplaceFailed": "Échec du remplacement des tags pour les modèles"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "Au moins une option de recherche doit être sélectionnée"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "Chemin des images d'exemple mis à jour avec succès",
|
||||
"pathUpdateFailed": "Échec de la mise à jour du chemin des images d'exemple : {message}",
|
||||
"downloadInProgress": "Téléchargement déjà en cours",
|
||||
"enterLocationFirst": "Veuillez d'abord entrer un emplacement de téléchargement",
|
||||
"downloadStarted": "Téléchargement des images d'exemple démarré",
|
||||
|
||||
123
locales/ja.json
123
locales/ja.json
@@ -16,7 +16,9 @@
|
||||
"loading": "読み込み中...",
|
||||
"unknown": "不明",
|
||||
"date": "日付",
|
||||
"version": "バージョン"
|
||||
"version": "バージョン",
|
||||
"enabled": "有効",
|
||||
"disabled": "無効"
|
||||
},
|
||||
"language": {
|
||||
"select": "言語",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "フォルダ設定",
|
||||
"downloadPathTemplates": "ダウンロードパステンプレート",
|
||||
"exampleImages": "例画像",
|
||||
"misc": "その他"
|
||||
"misc": "その他",
|
||||
"metadataArchive": "メタデータアーカイブデータベース",
|
||||
"proxySettings": "プロキシ設定"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "NSFWコンテンツをぼかす",
|
||||
@@ -236,6 +240,7 @@
|
||||
"baseModelFirstTag": "ベースモデル + 最初のタグ",
|
||||
"baseModelAuthor": "ベースモデル + 作成者",
|
||||
"authorFirstTag": "作成者 + 最初のタグ",
|
||||
"baseModelAuthorFirstTag": "ベースモデル + 作成者 + 最初のタグ",
|
||||
"customTemplate": "カスタムテンプレート"
|
||||
},
|
||||
"customTemplatePlaceholder": "カスタムテンプレートを入力(例:{base_model}/{author}/{first_tag})",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "LoRA構文にトリガーワードを含める",
|
||||
"includeTriggerWordsHelp": "LoRA構文をクリップボードにコピーする際、学習済みトリガーワードを含めます"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "メタデータアーカイブデータベースを有効化",
|
||||
"enableArchiveDbHelp": "Civitaiから削除されたモデルのメタデータにアクセスするためにローカルデータベースを使用します。",
|
||||
"status": "ステータス",
|
||||
"statusAvailable": "利用可能",
|
||||
"statusUnavailable": "利用不可",
|
||||
"enabled": "有効",
|
||||
"management": "データベース管理",
|
||||
"managementHelp": "メタデータアーカイブデータベースのダウンロードまたは削除",
|
||||
"downloadButton": "データベースをダウンロード",
|
||||
"downloadingButton": "ダウンロード中...",
|
||||
"downloadedButton": "ダウンロード済み",
|
||||
"removeButton": "データベースを削除",
|
||||
"removingButton": "削除中...",
|
||||
"downloadSuccess": "メタデータアーカイブデータベースのダウンロードが完了しました",
|
||||
"downloadError": "メタデータアーカイブデータベースのダウンロードに失敗しました",
|
||||
"removeSuccess": "メタデータアーカイブデータベースが削除されました",
|
||||
"removeError": "メタデータアーカイブデータベースの削除に失敗しました",
|
||||
"removeConfirm": "本当にメタデータアーカイブデータベースを削除しますか?ローカルのデータベースファイルが削除され、この機能を再度利用するには再ダウンロードが必要です。",
|
||||
"preparing": "ダウンロードを準備中...",
|
||||
"connecting": "ダウンロードサーバーに接続中...",
|
||||
"completed": "完了",
|
||||
"downloadComplete": "ダウンロードが正常に完了しました"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "アプリレベルのプロキシを有効化",
|
||||
"enableProxyHelp": "このアプリケーション専用のカスタムプロキシ設定を有効にします(システムのプロキシ設定を上書きします)",
|
||||
"proxyType": "プロキシタイプ",
|
||||
"proxyTypeHelp": "プロキシサーバーの種類を選択(HTTP、HTTPS、SOCKS4、SOCKS5)",
|
||||
"proxyHost": "プロキシホスト",
|
||||
"proxyHostPlaceholder": "proxy.example.com",
|
||||
"proxyHostHelp": "プロキシサーバーのホスト名またはIPアドレス",
|
||||
"proxyPort": "プロキシポート",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "プロキシサーバーのポート番号",
|
||||
"proxyUsername": "ユーザー名(任意)",
|
||||
"proxyUsernamePlaceholder": "ユーザー名",
|
||||
"proxyUsernameHelp": "プロキシ認証用のユーザー名(必要な場合)",
|
||||
"proxyPassword": "パスワード(任意)",
|
||||
"proxyPasswordPlaceholder": "パスワード",
|
||||
"proxyPasswordHelp": "プロキシ認証用のパスワード(必要な場合)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "{count} 選択中",
|
||||
"selectedSuffix": "選択中",
|
||||
"viewSelected": "選択したアイテムを表示するにはクリック",
|
||||
"sendToWorkflow": "ワークフローに送信",
|
||||
"copyAll": "すべてコピー",
|
||||
"refreshAll": "すべて更新",
|
||||
"moveAll": "すべて移動",
|
||||
"deleteAll": "すべて削除",
|
||||
"clear": "クリア"
|
||||
"viewSelected": "選択中を表示",
|
||||
"addTags": "すべてにタグを追加",
|
||||
"setBaseModel": "すべてにベースモデルを設定",
|
||||
"copyAll": "すべての構文をコピー",
|
||||
"refreshAll": "すべてのメタデータを更新",
|
||||
"moveAll": "すべてをフォルダに移動",
|
||||
"autoOrganize": "自動整理を実行",
|
||||
"deleteAll": "すべてのモデルを削除",
|
||||
"clear": "選択をクリア",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "自動整理を初期化中...",
|
||||
"starting": "{type}の自動整理を開始中...",
|
||||
"processing": "処理中({processed}/{total})- {success} 移動、{skipped} スキップ、{failures} 失敗",
|
||||
"cleaning": "空のディレクトリをクリーンアップ中...",
|
||||
"completed": "完了:{success} 移動、{skipped} スキップ、{failures} 失敗",
|
||||
"complete": "自動整理が完了しました",
|
||||
"error": "エラー:{error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Civitaiデータを更新",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "モデルが完全に削除されます。",
|
||||
"action": "すべて削除"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "複数モデルにタグを追加",
|
||||
"description": "タグを追加するモデル:",
|
||||
"models": "モデル",
|
||||
"tagsToAdd": "追加するタグ",
|
||||
"placeholder": "タグを入力してEnterを押してください...",
|
||||
"appendTags": "タグを追加",
|
||||
"replaceTags": "タグを置換",
|
||||
"saveChanges": "変更を保存"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "複数モデルのベースモデルを設定",
|
||||
"description": "ベースモデルを設定するモデル:",
|
||||
"models": "モデル",
|
||||
"selectBaseModel": "ベースモデルを選択",
|
||||
"save": "ベースモデルを更新",
|
||||
"cancel": "キャンセル"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "ローカル例画像",
|
||||
"message": "このモデルのローカル例画像が見つかりませんでした。表示オプション:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "ベースモデルを編集",
|
||||
"viewOnCivitai": "Civitaiで表示",
|
||||
"viewOnCivitaiText": "Civitaiで表示",
|
||||
"viewCreatorProfile": "作成者プロフィールを表示"
|
||||
"viewCreatorProfile": "作成者プロフィールを表示",
|
||||
"openFileLocation": "ファイルの場所を開く"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "ファイルの場所を正常に開きました",
|
||||
"failed": "ファイルの場所を開くのに失敗しました"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "バージョン",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "{total} LoRAのうち {completed} がダウンロードされました。{accessFailures} はアクセス制限により失敗しました。設定でAPIキーまたはアーリーアクセス状況を確認してください。",
|
||||
"pleaseSelectVersion": "バージョンを選択してください",
|
||||
"versionExists": "このバージョンは既にライブラリに存在します",
|
||||
"downloadCompleted": "ダウンロードが正常に完了しました"
|
||||
"downloadCompleted": "ダウンロードが正常に完了しました",
|
||||
"autoOrganizeSuccess": "{count} {type} の自動整理が正常に完了しました",
|
||||
"autoOrganizePartialSuccess": "自動整理が完了しました:{total} モデル中 {success} 移動、{failures} 失敗",
|
||||
"autoOrganizeFailed": "自動整理に失敗しました:{error}",
|
||||
"noModelsSelected": "モデルが選択されていません"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "レシピの取得に失敗しました:{message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "エラー:{error}",
|
||||
"deleteFailedGeneral": "モデルの削除に失敗しました",
|
||||
"selectedAdditional": "{count} 追加{type}が選択されました",
|
||||
"marqueeSelectionComplete": "マーキー選択で {count} の{type}が選択されました",
|
||||
"refreshMetadataFailed": "メタデータの更新に失敗しました",
|
||||
"nameCannotBeEmpty": "モデル名を空にすることはできません",
|
||||
"nameUpdatedSuccessfully": "モデル名が正常に更新されました",
|
||||
"nameUpdateFailed": "モデル名の更新に失敗しました",
|
||||
"baseModelUpdated": "ベースモデルが正常に更新されました",
|
||||
"baseModelUpdateFailed": "ベースモデルの更新に失敗しました",
|
||||
"baseModelNotSelected": "ベースモデルを選択してください",
|
||||
"bulkBaseModelUpdating": "{count} モデルのベースモデルを更新中...",
|
||||
"bulkBaseModelUpdateSuccess": "{count} モデルのベースモデルが正常に更新されました",
|
||||
"bulkBaseModelUpdatePartial": "{success} モデルを更新、{failed} モデルは失敗しました",
|
||||
"bulkBaseModelUpdateFailed": "選択したモデルのベースモデルの更新に失敗しました",
|
||||
"invalidCharactersRemoved": "ファイル名から無効な文字が削除されました",
|
||||
"filenameCannotBeEmpty": "ファイル名を空にすることはできません",
|
||||
"renameFailed": "ファイル名の変更に失敗しました:{message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "このグループは既に検証済みです",
|
||||
"verificationCompleteMismatch": "検証完了。{count} ファイルの実際のハッシュが異なります。",
|
||||
"verificationCompleteSuccess": "検証完了。すべてのファイルが重複であることが確認されました。",
|
||||
"verificationFailed": "ハッシュの検証に失敗しました:{message}"
|
||||
"verificationFailed": "ハッシュの検証に失敗しました:{message}",
|
||||
"noTagsToAdd": "追加するタグがありません",
|
||||
"tagsAddedSuccessfully": "{count} {type} に {tagCount} 個のタグを追加しました",
|
||||
"tagsReplacedSuccessfully": "{count} {type} のタグを {tagCount} 個に置換しました",
|
||||
"tagsAddFailed": "{count} モデルへのタグ追加に失敗しました",
|
||||
"tagsReplaceFailed": "{count} モデルのタグ置換に失敗しました",
|
||||
"bulkTagsAddFailed": "モデルへのタグ追加に失敗しました",
|
||||
"bulkTagsReplaceFailed": "モデルのタグ置換に失敗しました"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "少なくとも1つの検索オプションを選択する必要があります"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "例画像パスが正常に更新されました",
|
||||
"pathUpdateFailed": "例画像パスの更新に失敗しました:{message}",
|
||||
"downloadInProgress": "ダウンロードは既に進行中です",
|
||||
"enterLocationFirst": "最初にダウンロード場所を入力してください",
|
||||
"downloadStarted": "例画像のダウンロードが開始されました",
|
||||
|
||||
129
locales/ko.json
129
locales/ko.json
@@ -16,7 +16,9 @@
|
||||
"loading": "로딩 중...",
|
||||
"unknown": "알 수 없음",
|
||||
"date": "날짜",
|
||||
"version": "버전"
|
||||
"version": "버전",
|
||||
"enabled": "활성화됨",
|
||||
"disabled": "비활성화됨"
|
||||
},
|
||||
"language": {
|
||||
"select": "언어",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "폴더 설정",
|
||||
"downloadPathTemplates": "다운로드 경로 템플릿",
|
||||
"exampleImages": "예시 이미지",
|
||||
"misc": "기타"
|
||||
"misc": "기타",
|
||||
"metadataArchive": "메타데이터 아카이브 데이터베이스",
|
||||
"proxySettings": "프록시 설정"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "NSFW 콘텐츠 블러 처리",
|
||||
@@ -194,7 +198,7 @@
|
||||
"displayDensity": "표시 밀도",
|
||||
"displayDensityOptions": {
|
||||
"default": "기본",
|
||||
"medium": "중간",
|
||||
"medium": "중간",
|
||||
"compact": "조밀"
|
||||
},
|
||||
"displayDensityHelp": "한 줄에 표시할 카드 수를 선택하세요:",
|
||||
@@ -231,17 +235,18 @@
|
||||
"templateOptions": {
|
||||
"flatStructure": "플랫 구조",
|
||||
"byBaseModel": "베이스 모델별",
|
||||
"byAuthor": "제작자별",
|
||||
"byAuthor": "제작자별",
|
||||
"byFirstTag": "첫 번째 태그별",
|
||||
"baseModelFirstTag": "베이스 모델 + 첫 번째 태그",
|
||||
"baseModelAuthor": "베이스 모델 + 제작자",
|
||||
"authorFirstTag": "제작자 + 첫 번째 태그",
|
||||
"baseModelAuthorFirstTag": "베이스 모델 + 제작자 + 첫 번째 태그",
|
||||
"customTemplate": "사용자 정의 템플릿"
|
||||
},
|
||||
"customTemplatePlaceholder": "사용자 정의 템플릿 입력 (예: {base_model}/{author}/{first_tag})",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA",
|
||||
"checkpoint": "Checkpoint",
|
||||
"checkpoint": "Checkpoint",
|
||||
"embedding": "Embedding"
|
||||
},
|
||||
"baseModelPathMappings": "베이스 모델 경로 매핑",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "LoRA 문법에 트리거 단어 포함",
|
||||
"includeTriggerWordsHelp": "LoRA 문법을 클립보드에 복사할 때 학습된 트리거 단어를 포함합니다"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "메타데이터 아카이브 데이터베이스 활성화",
|
||||
"enableArchiveDbHelp": "Civitai에서 삭제된 모델의 메타데이터에 접근하기 위해 로컬 데이터베이스를 사용합니다.",
|
||||
"status": "상태",
|
||||
"statusAvailable": "사용 가능",
|
||||
"statusUnavailable": "사용 불가",
|
||||
"enabled": "활성화됨",
|
||||
"management": "데이터베이스 관리",
|
||||
"managementHelp": "메타데이터 아카이브 데이터베이스를 다운로드하거나 제거합니다",
|
||||
"downloadButton": "데이터베이스 다운로드",
|
||||
"downloadingButton": "다운로드 중...",
|
||||
"downloadedButton": "다운로드 완료",
|
||||
"removeButton": "데이터베이스 제거",
|
||||
"removingButton": "제거 중...",
|
||||
"downloadSuccess": "메타데이터 아카이브 데이터베이스가 성공적으로 다운로드되었습니다",
|
||||
"downloadError": "메타데이터 아카이브 데이터베이스 다운로드 실패",
|
||||
"removeSuccess": "메타데이터 아카이브 데이터베이스가 성공적으로 제거되었습니다",
|
||||
"removeError": "메타데이터 아카이브 데이터베이스 제거 실패",
|
||||
"removeConfirm": "메타데이터 아카이브 데이터베이스를 제거하시겠습니까? 이 작업은 로컬 데이터베이스 파일을 삭제하며, 이 기능을 사용하려면 다시 다운로드해야 합니다.",
|
||||
"preparing": "다운로드 준비 중...",
|
||||
"connecting": "다운로드 서버에 연결 중...",
|
||||
"completed": "완료됨",
|
||||
"downloadComplete": "다운로드가 성공적으로 완료되었습니다"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "앱 수준 프록시 활성화",
|
||||
"enableProxyHelp": "이 애플리케이션에 대한 사용자 지정 프록시 설정을 활성화하여 시스템 프록시 설정을 무시합니다",
|
||||
"proxyType": "프록시 유형",
|
||||
"proxyTypeHelp": "프록시 서버 유형을 선택하세요 (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "프록시 호스트",
|
||||
"proxyHostPlaceholder": "proxy.example.com",
|
||||
"proxyHostHelp": "프록시 서버의 호스트명 또는 IP 주소",
|
||||
"proxyPort": "프록시 포트",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "프록시 서버의 포트 번호",
|
||||
"proxyUsername": "사용자 이름 (선택사항)",
|
||||
"proxyUsernamePlaceholder": "username",
|
||||
"proxyUsernameHelp": "프록시 인증에 필요한 사용자 이름 (필요한 경우)",
|
||||
"proxyPassword": "비밀번호 (선택사항)",
|
||||
"proxyPasswordPlaceholder": "password",
|
||||
"proxyPasswordHelp": "프록시 인증에 필요한 비밀번호 (필요한 경우)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "{count}개 선택됨",
|
||||
"selectedSuffix": "개 선택됨",
|
||||
"viewSelected": "선택된 항목 보기",
|
||||
"sendToWorkflow": "워크플로로 전송",
|
||||
"copyAll": "모두 복사",
|
||||
"refreshAll": "모두 새로고침",
|
||||
"moveAll": "모두 이동",
|
||||
"deleteAll": "모두 삭제",
|
||||
"clear": "지우기"
|
||||
"viewSelected": "선택 항목 보기",
|
||||
"addTags": "모두에 태그 추가",
|
||||
"setBaseModel": "모두에 베이스 모델 설정",
|
||||
"copyAll": "모든 문법 복사",
|
||||
"refreshAll": "모든 메타데이터 새로고침",
|
||||
"moveAll": "모두 폴더로 이동",
|
||||
"autoOrganize": "자동 정리 선택",
|
||||
"deleteAll": "모든 모델 삭제",
|
||||
"clear": "선택 지우기",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "자동 정리 초기화 중...",
|
||||
"starting": "{type}에 대한 자동 정리 시작...",
|
||||
"processing": "처리 중 ({processed}/{total}) - {success}개 이동, {skipped}개 건너뜀, {failures}개 실패",
|
||||
"cleaning": "빈 디렉토리 정리 중...",
|
||||
"completed": "완료: {success}개 이동, {skipped}개 건너뜀, {failures}개 실패",
|
||||
"complete": "자동 정리 완료",
|
||||
"error": "오류: {error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Civitai 데이터 새로고침",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "개의 모델이 영구적으로 삭제됩니다.",
|
||||
"action": "모두 삭제"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "여러 모델에 태그 추가",
|
||||
"description": "다음에 태그를 추가합니다:",
|
||||
"models": "모델",
|
||||
"tagsToAdd": "추가할 태그",
|
||||
"placeholder": "태그를 입력하고 Enter를 누르세요...",
|
||||
"appendTags": "태그 추가",
|
||||
"replaceTags": "태그 교체",
|
||||
"saveChanges": "변경사항 저장"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "여러 모델의 베이스 모델 설정",
|
||||
"description": "다음 모델의 베이스 모델을 설정합니다:",
|
||||
"models": "모델",
|
||||
"selectBaseModel": "베이스 모델 선택",
|
||||
"save": "베이스 모델 업데이트",
|
||||
"cancel": "취소"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "로컬 예시 이미지",
|
||||
"message": "이 모델의 로컬 예시 이미지를 찾을 수 없습니다. 보기 옵션:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "베이스 모델 편집",
|
||||
"viewOnCivitai": "Civitai에서 보기",
|
||||
"viewOnCivitaiText": "Civitai에서 보기",
|
||||
"viewCreatorProfile": "제작자 프로필 보기"
|
||||
"viewCreatorProfile": "제작자 프로필 보기",
|
||||
"openFileLocation": "파일 위치 열기"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "파일 위치가 성공적으로 열렸습니다",
|
||||
"failed": "파일 위치 열기에 실패했습니다"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "버전",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "{total}개 중 {completed}개 LoRA가 다운로드되었습니다. {accessFailures}개는 액세스 제한으로 실패했습니다. 설정에서 API 키 또는 얼리 액세스 상태를 확인하세요.",
|
||||
"pleaseSelectVersion": "버전을 선택해주세요",
|
||||
"versionExists": "이 버전은 이미 라이브러리에 있습니다",
|
||||
"downloadCompleted": "다운로드가 성공적으로 완료되었습니다"
|
||||
"downloadCompleted": "다운로드가 성공적으로 완료되었습니다",
|
||||
"autoOrganizeSuccess": "{count}개의 {type}에 대해 자동 정리가 성공적으로 완료되었습니다",
|
||||
"autoOrganizePartialSuccess": "자동 정리 완료: 전체 {total}개 중 {success}개 이동, {failures}개 실패",
|
||||
"autoOrganizeFailed": "자동 정리 실패: {error}",
|
||||
"noModelsSelected": "선택된 모델이 없습니다"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "레시피 가져오기 실패: {message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "오류: {error}",
|
||||
"deleteFailedGeneral": "모델 삭제에 실패했습니다",
|
||||
"selectedAdditional": "추가로 {count}개의 {type}이(가) 선택되었습니다",
|
||||
"marqueeSelectionComplete": "마키 선택으로 {count}개의 {type}이(가) 선택되었습니다",
|
||||
"refreshMetadataFailed": "메타데이터 새로고침에 실패했습니다",
|
||||
"nameCannotBeEmpty": "모델 이름은 비어있을 수 없습니다",
|
||||
"nameUpdatedSuccessfully": "모델 이름이 성공적으로 업데이트되었습니다",
|
||||
"nameUpdateFailed": "모델 이름 업데이트에 실패했습니다",
|
||||
"baseModelUpdated": "베이스 모델이 성공적으로 업데이트되었습니다",
|
||||
"baseModelUpdateFailed": "베이스 모델 업데이트에 실패했습니다",
|
||||
"baseModelNotSelected": "베이스 모델을 선택해주세요",
|
||||
"bulkBaseModelUpdating": "{count}개의 모델에 베이스 모델을 업데이트 중...",
|
||||
"bulkBaseModelUpdateSuccess": "{count}개의 모델에 베이스 모델이 성공적으로 업데이트되었습니다",
|
||||
"bulkBaseModelUpdatePartial": "{success}개의 모델이 업데이트되었고, {failed}개의 모델이 실패했습니다",
|
||||
"bulkBaseModelUpdateFailed": "선택한 모델의 베이스 모델 업데이트에 실패했습니다",
|
||||
"invalidCharactersRemoved": "파일명에서 잘못된 문자가 제거되었습니다",
|
||||
"filenameCannotBeEmpty": "파일 이름은 비어있을 수 없습니다",
|
||||
"renameFailed": "파일 이름 변경 실패: {message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "이 그룹은 이미 검증되었습니다",
|
||||
"verificationCompleteMismatch": "검증 완료. {count}개 파일의 실제 해시가 다릅니다.",
|
||||
"verificationCompleteSuccess": "검증 완료. 모든 파일이 중복임을 확인했습니다.",
|
||||
"verificationFailed": "해시 검증 실패: {message}"
|
||||
"verificationFailed": "해시 검증 실패: {message}",
|
||||
"noTagsToAdd": "추가할 태그가 없습니다",
|
||||
"tagsAddedSuccessfully": "{count}개의 {type}에 {tagCount}개의 태그가 성공적으로 추가되었습니다",
|
||||
"tagsReplacedSuccessfully": "{count}개의 {type}의 태그가 {tagCount}개의 태그로 성공적으로 교체되었습니다",
|
||||
"tagsAddFailed": "{count}개의 모델에 태그 추가에 실패했습니다",
|
||||
"tagsReplaceFailed": "{count}개의 모델의 태그 교체에 실패했습니다",
|
||||
"bulkTagsAddFailed": "모델에 태그 추가에 실패했습니다",
|
||||
"bulkTagsReplaceFailed": "모델의 태그 교체에 실패했습니다"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "최소 하나의 검색 옵션을 선택해야 합니다"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "예시 이미지 경로가 성공적으로 업데이트되었습니다",
|
||||
"pathUpdateFailed": "예시 이미지 경로 업데이트 실패: {message}",
|
||||
"downloadInProgress": "이미 다운로드가 진행 중입니다",
|
||||
"enterLocationFirst": "먼저 다운로드 위치를 입력해주세요",
|
||||
"downloadStarted": "예시 이미지 다운로드가 시작되었습니다",
|
||||
|
||||
129
locales/ru.json
129
locales/ru.json
@@ -16,7 +16,9 @@
|
||||
"loading": "Загрузка...",
|
||||
"unknown": "Неизвестно",
|
||||
"date": "Дата",
|
||||
"version": "Версия"
|
||||
"version": "Версия",
|
||||
"enabled": "Включено",
|
||||
"disabled": "Отключено"
|
||||
},
|
||||
"language": {
|
||||
"select": "Язык",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "Настройки папок",
|
||||
"downloadPathTemplates": "Шаблоны путей загрузки",
|
||||
"exampleImages": "Примеры изображений",
|
||||
"misc": "Разное"
|
||||
"misc": "Разное",
|
||||
"metadataArchive": "Архив метаданных",
|
||||
"proxySettings": "Настройки прокси"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "Размывать NSFW контент",
|
||||
@@ -194,7 +198,7 @@
|
||||
"displayDensity": "Плотность отображения",
|
||||
"displayDensityOptions": {
|
||||
"default": "По умолчанию",
|
||||
"medium": "Средняя",
|
||||
"medium": "Средняя",
|
||||
"compact": "Компактная"
|
||||
},
|
||||
"displayDensityHelp": "Выберите количество карточек для отображения в ряду:",
|
||||
@@ -231,17 +235,18 @@
|
||||
"templateOptions": {
|
||||
"flatStructure": "Плоская структура",
|
||||
"byBaseModel": "По базовой модели",
|
||||
"byAuthor": "По автору",
|
||||
"byAuthor": "По автору",
|
||||
"byFirstTag": "По первому тегу",
|
||||
"baseModelFirstTag": "Базовая модель + Первый тег",
|
||||
"baseModelAuthor": "Базовая модель + Автор",
|
||||
"authorFirstTag": "Автор + Первый тег",
|
||||
"baseModelAuthorFirstTag": "Базовая модель + Автор + Первый тег",
|
||||
"customTemplate": "Пользовательский шаблон"
|
||||
},
|
||||
"customTemplatePlaceholder": "Введите пользовательский шаблон (например, {base_model}/{author}/{first_tag})",
|
||||
"modelTypes": {
|
||||
"lora": "LoRA",
|
||||
"checkpoint": "Checkpoint",
|
||||
"checkpoint": "Checkpoint",
|
||||
"embedding": "Embedding"
|
||||
},
|
||||
"baseModelPathMappings": "Сопоставления путей базовых моделей",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "Включать триггерные слова в синтаксис LoRA",
|
||||
"includeTriggerWordsHelp": "Включать обученные триггерные слова при копировании синтаксиса LoRA в буфер обмена"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "Включить архив метаданных",
|
||||
"enableArchiveDbHelp": "Использовать локальную базу данных для доступа к метаданным моделей, удалённых с Civitai.",
|
||||
"status": "Статус",
|
||||
"statusAvailable": "Доступно",
|
||||
"statusUnavailable": "Недоступно",
|
||||
"enabled": "Включено",
|
||||
"management": "Управление базой данных",
|
||||
"managementHelp": "Скачать или удалить базу данных архива метаданных",
|
||||
"downloadButton": "Скачать базу данных",
|
||||
"downloadingButton": "Скачивание...",
|
||||
"downloadedButton": "Скачано",
|
||||
"removeButton": "Удалить базу данных",
|
||||
"removingButton": "Удаление...",
|
||||
"downloadSuccess": "База данных архива метаданных успешно загружена",
|
||||
"downloadError": "Не удалось загрузить базу данных архива метаданных",
|
||||
"removeSuccess": "База данных архива метаданных успешно удалена",
|
||||
"removeError": "Не удалось удалить базу данных архива метаданных",
|
||||
"removeConfirm": "Вы уверены, что хотите удалить базу данных архива метаданных? Это удалит локальный файл базы данных, и для использования этой функции потребуется повторная загрузка.",
|
||||
"preparing": "Подготовка к загрузке...",
|
||||
"connecting": "Подключение к серверу загрузки...",
|
||||
"completed": "Завершено",
|
||||
"downloadComplete": "Загрузка успешно завершена"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "Включить прокси на уровне приложения",
|
||||
"enableProxyHelp": "Включить пользовательские настройки прокси для этого приложения, переопределяя системные настройки прокси",
|
||||
"proxyType": "Тип прокси",
|
||||
"proxyTypeHelp": "Выберите тип прокси-сервера (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "Хост прокси",
|
||||
"proxyHostPlaceholder": "proxy.example.com",
|
||||
"proxyHostHelp": "Имя хоста или IP-адрес вашего прокси-сервера",
|
||||
"proxyPort": "Порт прокси",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "Номер порта вашего прокси-сервера",
|
||||
"proxyUsername": "Имя пользователя (необязательно)",
|
||||
"proxyUsernamePlaceholder": "имя пользователя",
|
||||
"proxyUsernameHelp": "Имя пользователя для аутентификации на прокси (если требуется)",
|
||||
"proxyPassword": "Пароль (необязательно)",
|
||||
"proxyPasswordPlaceholder": "пароль",
|
||||
"proxyPasswordHelp": "Пароль для аутентификации на прокси (если требуется)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "Выбрано {count}",
|
||||
"selectedSuffix": "выбрано",
|
||||
"viewSelected": "Нажмите для просмотра выбранных элементов",
|
||||
"sendToWorkflow": "Отправить в Workflow",
|
||||
"copyAll": "Копировать все",
|
||||
"refreshAll": "Обновить все",
|
||||
"moveAll": "Переместить все",
|
||||
"deleteAll": "Удалить все",
|
||||
"clear": "Очистить"
|
||||
"viewSelected": "Просмотреть выбранные",
|
||||
"addTags": "Добавить теги ко всем",
|
||||
"setBaseModel": "Установить базовую модель для всех",
|
||||
"copyAll": "Копировать весь синтаксис",
|
||||
"refreshAll": "Обновить все метаданные",
|
||||
"moveAll": "Переместить все в папку",
|
||||
"autoOrganize": "Автоматически организовать выбранные",
|
||||
"deleteAll": "Удалить все модели",
|
||||
"clear": "Очистить выбор",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "Инициализация автоматической организации...",
|
||||
"starting": "Запуск автоматической организации для {type}...",
|
||||
"processing": "Обработка ({processed}/{total}) — {success} перемещено, {skipped} пропущено, {failures} не удалось",
|
||||
"cleaning": "Очистка пустых директорий...",
|
||||
"completed": "Завершено: {success} перемещено, {skipped} пропущено, {failures} не удалось",
|
||||
"complete": "Автоматическая организация завершена",
|
||||
"error": "Ошибка: {error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "Обновить данные Civitai",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "моделей будут удалены навсегда.",
|
||||
"action": "Удалить все"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "Добавить теги к нескольким моделям",
|
||||
"description": "Добавить теги к",
|
||||
"models": "моделям",
|
||||
"tagsToAdd": "Теги для добавления",
|
||||
"placeholder": "Введите тег и нажмите Enter...",
|
||||
"appendTags": "Добавить теги",
|
||||
"replaceTags": "Заменить теги",
|
||||
"saveChanges": "Сохранить изменения"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "Установить базовую модель для нескольких моделей",
|
||||
"description": "Установить базовую модель для",
|
||||
"models": "моделей",
|
||||
"selectBaseModel": "Выбрать базовую модель",
|
||||
"save": "Обновить базовую модель",
|
||||
"cancel": "Отмена"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "Локальные примеры изображений",
|
||||
"message": "Локальные примеры изображений для этой модели не найдены. Варианты просмотра:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "Редактировать базовую модель",
|
||||
"viewOnCivitai": "Посмотреть на Civitai",
|
||||
"viewOnCivitaiText": "Посмотреть на Civitai",
|
||||
"viewCreatorProfile": "Посмотреть профиль создателя"
|
||||
"viewCreatorProfile": "Посмотреть профиль создателя",
|
||||
"openFileLocation": "Открыть расположение файла"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "Расположение файла успешно открыто",
|
||||
"failed": "Не удалось открыть расположение файла"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "Версия",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "Загружено {completed} из {total} LoRAs. {accessFailures} не удалось из-за ограничений доступа. Проверьте ваш API ключ в настройках или статус раннего доступа.",
|
||||
"pleaseSelectVersion": "Пожалуйста, выберите версию",
|
||||
"versionExists": "Эта версия уже существует в вашей библиотеке",
|
||||
"downloadCompleted": "Загрузка успешно завершена"
|
||||
"downloadCompleted": "Загрузка успешно завершена",
|
||||
"autoOrganizeSuccess": "Автоматическая организация успешно завершена для {count} {type}",
|
||||
"autoOrganizePartialSuccess": "Автоматическая организация завершена: перемещено {success}, не удалось {failures} из {total} моделей",
|
||||
"autoOrganizeFailed": "Ошибка автоматической организации: {error}",
|
||||
"noModelsSelected": "Модели не выбраны"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "Не удалось получить рецепты: {message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "Ошибка: {error}",
|
||||
"deleteFailedGeneral": "Не удалось удалить модели",
|
||||
"selectedAdditional": "Выбрано дополнительно {count} {type}(ей)",
|
||||
"marqueeSelectionComplete": "Выбрано {count} {type} с помощью выделения рамкой",
|
||||
"refreshMetadataFailed": "Не удалось обновить метаданные",
|
||||
"nameCannotBeEmpty": "Название модели не может быть пустым",
|
||||
"nameUpdatedSuccessfully": "Название модели успешно обновлено",
|
||||
"nameUpdateFailed": "Не удалось обновить название модели",
|
||||
"baseModelUpdated": "Базовая модель успешно обновлена",
|
||||
"baseModelUpdateFailed": "Не удалось обновить базовую модель",
|
||||
"baseModelNotSelected": "Пожалуйста, выберите базовую модель",
|
||||
"bulkBaseModelUpdating": "Обновление базовой модели для {count} моделей...",
|
||||
"bulkBaseModelUpdateSuccess": "Базовая модель успешно обновлена для {count} моделей",
|
||||
"bulkBaseModelUpdatePartial": "Обновлено {success} моделей, не удалось обновить {failed} моделей",
|
||||
"bulkBaseModelUpdateFailed": "Не удалось обновить базовую модель для выбранных моделей",
|
||||
"invalidCharactersRemoved": "Недопустимые символы удалены из имени файла",
|
||||
"filenameCannotBeEmpty": "Имя файла не может быть пустым",
|
||||
"renameFailed": "Не удалось переименовать файл: {message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "Эта группа уже была проверена",
|
||||
"verificationCompleteMismatch": "Проверка завершена. {count} файл(ов) имеют разные фактические хеши.",
|
||||
"verificationCompleteSuccess": "Проверка завершена. Все файлы подтверждены как дубликаты.",
|
||||
"verificationFailed": "Не удалось проверить хеши: {message}"
|
||||
"verificationFailed": "Не удалось проверить хеши: {message}",
|
||||
"noTagsToAdd": "Нет тегов для добавления",
|
||||
"tagsAddedSuccessfully": "Успешно добавлено {tagCount} тег(ов) к {count} {type}(ам)",
|
||||
"tagsReplacedSuccessfully": "Успешно заменены теги для {count} {type}(ов) на {tagCount} тег(ов)",
|
||||
"tagsAddFailed": "Не удалось добавить теги к {count} модель(ям)",
|
||||
"tagsReplaceFailed": "Не удалось заменить теги для {count} модель(ей)",
|
||||
"bulkTagsAddFailed": "Не удалось добавить теги к моделям",
|
||||
"bulkTagsReplaceFailed": "Не удалось заменить теги для моделей"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "Должен быть выбран хотя бы один вариант поиска"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "Путь к примерам изображений успешно обновлен",
|
||||
"pathUpdateFailed": "Не удалось обновить путь к примерам изображений: {message}",
|
||||
"downloadInProgress": "Загрузка уже в процессе",
|
||||
"enterLocationFirst": "Пожалуйста, сначала введите место загрузки",
|
||||
"downloadStarted": "Загрузка примеров изображений начата",
|
||||
|
||||
@@ -16,20 +16,22 @@
|
||||
"loading": "加载中...",
|
||||
"unknown": "未知",
|
||||
"date": "日期",
|
||||
"version": "版本"
|
||||
"version": "版本",
|
||||
"enabled": "已启用",
|
||||
"disabled": "已禁用"
|
||||
},
|
||||
"language": {
|
||||
"select": "语言",
|
||||
"select_help": "选择你喜欢的界面语言",
|
||||
"select": "Language",
|
||||
"select_help": "Choose your preferred language for the interface",
|
||||
"english": "English",
|
||||
"chinese_simplified": "中文(简体)",
|
||||
"chinese_traditional": "中文(繁体)",
|
||||
"russian": "俄语",
|
||||
"german": "德语",
|
||||
"japanese": "日语",
|
||||
"korean": "韩语",
|
||||
"french": "法语",
|
||||
"spanish": "西班牙语"
|
||||
"russian": "Русский",
|
||||
"german": "Deutsch",
|
||||
"japanese": "日本語",
|
||||
"korean": "한국어",
|
||||
"french": "Français",
|
||||
"spanish": "Español"
|
||||
},
|
||||
"fileSize": {
|
||||
"zero": "0 字节",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "文件夹设置",
|
||||
"downloadPathTemplates": "下载路径模板",
|
||||
"exampleImages": "示例图片",
|
||||
"misc": "其他"
|
||||
"misc": "其他",
|
||||
"metadataArchive": "元数据归档数据库",
|
||||
"proxySettings": "代理设置"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "模糊 NSFW 内容",
|
||||
@@ -236,6 +240,7 @@
|
||||
"baseModelFirstTag": "基础模型 + 首标签",
|
||||
"baseModelAuthor": "基础模型 + 作者",
|
||||
"authorFirstTag": "作者 + 首标签",
|
||||
"baseModelAuthorFirstTag": "基础模型 + 作者 + 首标签",
|
||||
"customTemplate": "自定义模板"
|
||||
},
|
||||
"customTemplatePlaceholder": "输入自定义模板(如:{base_model}/{author}/{first_tag})",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "复制 LoRA 语法时包含触发词",
|
||||
"includeTriggerWordsHelp": "复制 LoRA 语法到剪贴板时包含训练触发词"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "启用元数据归档数据库",
|
||||
"enableArchiveDbHelp": "使用本地数据库访问已从 Civitai 删除的模型元数据。",
|
||||
"status": "状态",
|
||||
"statusAvailable": "可用",
|
||||
"statusUnavailable": "不可用",
|
||||
"enabled": "已启用",
|
||||
"management": "数据库管理",
|
||||
"managementHelp": "下载或移除元数据归档数据库",
|
||||
"downloadButton": "下载数据库",
|
||||
"downloadingButton": "正在下载...",
|
||||
"downloadedButton": "已下载",
|
||||
"removeButton": "移除数据库",
|
||||
"removingButton": "正在移除...",
|
||||
"downloadSuccess": "元数据归档数据库下载成功",
|
||||
"downloadError": "元数据归档数据库下载失败",
|
||||
"removeSuccess": "元数据归档数据库移除成功",
|
||||
"removeError": "元数据归档数据库移除失败",
|
||||
"removeConfirm": "你确定要移除元数据归档数据库吗?这将删除本地数据库文件,如需使用此功能需重新下载。",
|
||||
"preparing": "正在准备下载...",
|
||||
"connecting": "正在连接下载服务器...",
|
||||
"completed": "已完成",
|
||||
"downloadComplete": "下载成功完成"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "启用应用级代理",
|
||||
"enableProxyHelp": "为此应用启用自定义代理设置,覆盖系统代理设置",
|
||||
"proxyType": "代理类型",
|
||||
"proxyTypeHelp": "选择代理服务器类型 (HTTP, HTTPS, SOCKS4, SOCKS5)",
|
||||
"proxyHost": "代理主机",
|
||||
"proxyHostPlaceholder": "proxy.example.com",
|
||||
"proxyHostHelp": "代理服务器的主机名或IP地址",
|
||||
"proxyPort": "代理端口",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "代理服务器的端口号",
|
||||
"proxyUsername": "用户名 (可选)",
|
||||
"proxyUsernamePlaceholder": "用户名",
|
||||
"proxyUsernameHelp": "代理认证的用户名 (如果需要)",
|
||||
"proxyPassword": "密码 (可选)",
|
||||
"proxyPasswordPlaceholder": "密码",
|
||||
"proxyPasswordHelp": "代理认证的密码 (如果需要)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "已选中 {count} 项",
|
||||
"selectedSuffix": "已选中",
|
||||
"viewSelected": "点击查看已选项目",
|
||||
"sendToWorkflow": "发送到工作流",
|
||||
"copyAll": "全部复制",
|
||||
"refreshAll": "全部刷新",
|
||||
"moveAll": "全部移动",
|
||||
"deleteAll": "全部删除",
|
||||
"clear": "清除"
|
||||
"viewSelected": "查看已选中",
|
||||
"addTags": "为所有添加标签",
|
||||
"setBaseModel": "为所有设置基础模型",
|
||||
"copyAll": "复制全部语法",
|
||||
"refreshAll": "刷新全部元数据",
|
||||
"moveAll": "全部移动到文件夹",
|
||||
"autoOrganize": "自动整理所选模型",
|
||||
"deleteAll": "删除所有模型",
|
||||
"clear": "清除选择",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "正在初始化自动整理...",
|
||||
"starting": "正在为 {type} 启动自动整理...",
|
||||
"processing": "处理中({processed}/{total})- 已移动 {success} 个,跳过 {skipped} 个,失败 {failures} 个",
|
||||
"cleaning": "正在清理空文件夹...",
|
||||
"completed": "完成:已移动 {success} 个,跳过 {skipped} 个,失败 {failures} 个",
|
||||
"complete": "自动整理已完成",
|
||||
"error": "错误:{error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "刷新 Civitai 数据",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "模型将被永久删除。",
|
||||
"action": "全部删除"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "批量添加标签",
|
||||
"description": "为多个模型添加标签",
|
||||
"models": "个模型",
|
||||
"tagsToAdd": "要添加的标签",
|
||||
"placeholder": "输入标签并按回车...",
|
||||
"appendTags": "追加标签",
|
||||
"replaceTags": "替换标签",
|
||||
"saveChanges": "保存更改"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "批量设置基础模型",
|
||||
"description": "为多个模型设置基础模型",
|
||||
"models": "个模型",
|
||||
"selectBaseModel": "选择基础模型",
|
||||
"save": "更新基础模型",
|
||||
"cancel": "取消"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "本地示例图片",
|
||||
"message": "未找到此模型的本地示例图片。可选操作:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "编辑基础模型",
|
||||
"viewOnCivitai": "在 Civitai 查看",
|
||||
"viewOnCivitaiText": "在 Civitai 查看",
|
||||
"viewCreatorProfile": "查看创作者主页"
|
||||
"viewCreatorProfile": "查看创作者主页",
|
||||
"openFileLocation": "打开文件位置"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "文件位置已成功打开",
|
||||
"failed": "打开文件位置失败"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "版本",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "已下载 {completed}/{total} 个 LoRA。{accessFailures} 个因访问限制失败。请检查设置中的 API 密钥或早期访问状态。",
|
||||
"pleaseSelectVersion": "请选择版本",
|
||||
"versionExists": "该版本已存在于你的库中",
|
||||
"downloadCompleted": "下载成功完成"
|
||||
"downloadCompleted": "下载成功完成",
|
||||
"autoOrganizeSuccess": "自动整理已成功完成,共 {count} 个 {type}",
|
||||
"autoOrganizePartialSuccess": "自动整理完成:已移动 {success} 个,{failures} 个失败,共 {total} 个模型",
|
||||
"autoOrganizeFailed": "自动整理失败:{error}",
|
||||
"noModelsSelected": "未选中模型"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "获取配方失败:{message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "错误:{error}",
|
||||
"deleteFailedGeneral": "删除模型失败",
|
||||
"selectedAdditional": "已选中 {count} 个额外 {type}",
|
||||
"marqueeSelectionComplete": "框选已选中 {count} 个 {type}",
|
||||
"refreshMetadataFailed": "刷新元数据失败",
|
||||
"nameCannotBeEmpty": "模型名称不能为空",
|
||||
"nameUpdatedSuccessfully": "模型名称更新成功",
|
||||
"nameUpdateFailed": "模型名称更新失败",
|
||||
"baseModelUpdated": "基础模型更新成功",
|
||||
"baseModelUpdateFailed": "基础模型更新失败",
|
||||
"baseModelNotSelected": "请选择基础模型",
|
||||
"bulkBaseModelUpdating": "正在为 {count} 个模型更新基础模型...",
|
||||
"bulkBaseModelUpdateSuccess": "成功为 {count} 个模型更新基础模型",
|
||||
"bulkBaseModelUpdatePartial": "更新了 {success} 个模型,{failed} 个失败",
|
||||
"bulkBaseModelUpdateFailed": "为选中模型更新基础模型失败",
|
||||
"invalidCharactersRemoved": "文件名中的无效字符已移除",
|
||||
"filenameCannotBeEmpty": "文件名不能为空",
|
||||
"renameFailed": "重命名文件失败:{message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "此组已验证过",
|
||||
"verificationCompleteMismatch": "验证完成。{count} 个文件实际哈希不同。",
|
||||
"verificationCompleteSuccess": "验证完成。所有文件均为重复项。",
|
||||
"verificationFailed": "验证哈希失败:{message}"
|
||||
"verificationFailed": "验证哈希失败:{message}",
|
||||
"noTagsToAdd": "没有可添加的标签",
|
||||
"tagsAddedSuccessfully": "已成功为 {count} 个 {type} 添加 {tagCount} 个标签",
|
||||
"tagsReplacedSuccessfully": "已成功为 {count} 个 {type} 替换为 {tagCount} 个标签",
|
||||
"tagsAddFailed": "为 {count} 个模型添加标签失败",
|
||||
"tagsReplaceFailed": "为 {count} 个模型替换标签失败",
|
||||
"bulkTagsAddFailed": "批量添加标签失败",
|
||||
"bulkTagsReplaceFailed": "批量替换标签失败"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "至少选择一个搜索选项"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "示例图片路径更新成功",
|
||||
"pathUpdateFailed": "更新示例图片路径失败:{message}",
|
||||
"downloadInProgress": "下载已在进行中",
|
||||
"enterLocationFirst": "请先输入下载位置",
|
||||
"downloadStarted": "示例图片下载已开始",
|
||||
|
||||
@@ -16,7 +16,9 @@
|
||||
"loading": "載入中...",
|
||||
"unknown": "未知",
|
||||
"date": "日期",
|
||||
"version": "版本"
|
||||
"version": "版本",
|
||||
"enabled": "已啟用",
|
||||
"disabled": "已停用"
|
||||
},
|
||||
"language": {
|
||||
"select": "語言",
|
||||
@@ -178,7 +180,9 @@
|
||||
"folderSettings": "資料夾設定",
|
||||
"downloadPathTemplates": "下載路徑範本",
|
||||
"exampleImages": "範例圖片",
|
||||
"misc": "其他"
|
||||
"misc": "其他",
|
||||
"metadataArchive": "中繼資料封存資料庫",
|
||||
"proxySettings": "代理設定"
|
||||
},
|
||||
"contentFiltering": {
|
||||
"blurNsfwContent": "模糊 NSFW 內容",
|
||||
@@ -236,6 +240,7 @@
|
||||
"baseModelFirstTag": "基礎模型 + 第一標籤",
|
||||
"baseModelAuthor": "基礎模型 + 作者",
|
||||
"authorFirstTag": "作者 + 第一標籤",
|
||||
"baseModelAuthorFirstTag": "基礎模型 + 作者 + 第一標籤",
|
||||
"customTemplate": "自訂範本"
|
||||
},
|
||||
"customTemplatePlaceholder": "輸入自訂範本(例如:{base_model}/{author}/{first_tag})",
|
||||
@@ -273,6 +278,48 @@
|
||||
"misc": {
|
||||
"includeTriggerWords": "在 LoRA 語法中包含觸發詞",
|
||||
"includeTriggerWordsHelp": "複製 LoRA 語法到剪貼簿時包含訓練觸發詞"
|
||||
},
|
||||
"metadataArchive": {
|
||||
"enableArchiveDb": "啟用中繼資料封存資料庫",
|
||||
"enableArchiveDbHelp": "使用本機資料庫以存取已從 Civitai 刪除模型的中繼資料。",
|
||||
"status": "狀態",
|
||||
"statusAvailable": "可用",
|
||||
"statusUnavailable": "不可用",
|
||||
"enabled": "已啟用",
|
||||
"management": "資料庫管理",
|
||||
"managementHelp": "下載或移除中繼資料封存資料庫",
|
||||
"downloadButton": "下載資料庫",
|
||||
"downloadingButton": "下載中...",
|
||||
"downloadedButton": "已下載",
|
||||
"removeButton": "移除資料庫",
|
||||
"removingButton": "移除中...",
|
||||
"downloadSuccess": "中繼資料封存資料庫下載成功",
|
||||
"downloadError": "下載中繼資料封存資料庫失敗",
|
||||
"removeSuccess": "中繼資料封存資料庫移除成功",
|
||||
"removeError": "移除中繼資料封存資料庫失敗",
|
||||
"removeConfirm": "您確定要移除中繼資料封存資料庫嗎?這將刪除本機資料庫檔案,若要再次使用此功能需重新下載。",
|
||||
"preparing": "準備下載中...",
|
||||
"connecting": "正在連接下載伺服器...",
|
||||
"completed": "已完成",
|
||||
"downloadComplete": "下載成功完成"
|
||||
},
|
||||
"proxySettings": {
|
||||
"enableProxy": "啟用應用程式代理",
|
||||
"enableProxyHelp": "啟用此應用程式的自訂代理設定,將覆蓋系統代理設定",
|
||||
"proxyType": "代理類型",
|
||||
"proxyTypeHelp": "選擇代理伺服器類型(HTTP、HTTPS、SOCKS4、SOCKS5)",
|
||||
"proxyHost": "代理主機",
|
||||
"proxyHostPlaceholder": "proxy.example.com",
|
||||
"proxyHostHelp": "您的代理伺服器主機名稱或 IP 位址",
|
||||
"proxyPort": "代理埠號",
|
||||
"proxyPortPlaceholder": "8080",
|
||||
"proxyPortHelp": "您的代理伺服器埠號",
|
||||
"proxyUsername": "使用者名稱(選填)",
|
||||
"proxyUsernamePlaceholder": "username",
|
||||
"proxyUsernameHelp": "代理驗證所需的使用者名稱(如有需要)",
|
||||
"proxyPassword": "密碼(選填)",
|
||||
"proxyPasswordPlaceholder": "password",
|
||||
"proxyPasswordHelp": "代理驗證所需的密碼(如有需要)"
|
||||
}
|
||||
},
|
||||
"loras": {
|
||||
@@ -318,13 +365,24 @@
|
||||
"bulkOperations": {
|
||||
"selected": "已選擇 {count} 項",
|
||||
"selectedSuffix": "已選擇",
|
||||
"viewSelected": "點擊檢視已選項目",
|
||||
"sendToWorkflow": "傳送到工作流",
|
||||
"copyAll": "全部複製",
|
||||
"refreshAll": "全部刷新",
|
||||
"moveAll": "全部移動",
|
||||
"deleteAll": "全部刪除",
|
||||
"clear": "清除"
|
||||
"viewSelected": "檢視已選取",
|
||||
"addTags": "新增標籤到全部",
|
||||
"setBaseModel": "設定全部基礎模型",
|
||||
"copyAll": "複製全部語法",
|
||||
"refreshAll": "刷新全部 metadata",
|
||||
"moveAll": "全部移動到資料夾",
|
||||
"autoOrganize": "自動整理所選模型",
|
||||
"deleteAll": "刪除全部模型",
|
||||
"clear": "清除選取",
|
||||
"autoOrganizeProgress": {
|
||||
"initializing": "正在初始化自動整理...",
|
||||
"starting": "正在開始自動整理 {type}...",
|
||||
"processing": "處理中({processed}/{total})- 已移動 {success},已略過 {skipped},失敗 {failures}",
|
||||
"cleaning": "正在清理空資料夾...",
|
||||
"completed": "完成:已移動 {success},已略過 {skipped},失敗 {failures}",
|
||||
"complete": "自動整理完成",
|
||||
"error": "錯誤:{error}"
|
||||
}
|
||||
},
|
||||
"contextMenu": {
|
||||
"refreshMetadata": "刷新 Civitai 資料",
|
||||
@@ -572,6 +630,24 @@
|
||||
"countMessage": "模型將被永久刪除。",
|
||||
"action": "全部刪除"
|
||||
},
|
||||
"bulkAddTags": {
|
||||
"title": "新增標籤到多個模型",
|
||||
"description": "新增標籤到",
|
||||
"models": "個模型",
|
||||
"tagsToAdd": "要新增的標籤",
|
||||
"placeholder": "輸入標籤並按 Enter...",
|
||||
"appendTags": "附加標籤",
|
||||
"replaceTags": "取代標籤",
|
||||
"saveChanges": "儲存變更"
|
||||
},
|
||||
"bulkBaseModel": {
|
||||
"title": "設定多個模型的基礎模型",
|
||||
"description": "設定基礎模型給",
|
||||
"models": "個模型",
|
||||
"selectBaseModel": "選擇基礎模型",
|
||||
"save": "更新基礎模型",
|
||||
"cancel": "取消"
|
||||
},
|
||||
"exampleAccess": {
|
||||
"title": "本機範例圖片",
|
||||
"message": "此模型未找到本機範例圖片。可選擇:",
|
||||
@@ -622,7 +698,12 @@
|
||||
"editBaseModel": "編輯基礎模型",
|
||||
"viewOnCivitai": "在 Civitai 查看",
|
||||
"viewOnCivitaiText": "在 Civitai 查看",
|
||||
"viewCreatorProfile": "查看創作者個人檔案"
|
||||
"viewCreatorProfile": "查看創作者個人檔案",
|
||||
"openFileLocation": "開啟檔案位置"
|
||||
},
|
||||
"openFileLocation": {
|
||||
"success": "檔案位置已成功開啟",
|
||||
"failed": "開啟檔案位置失敗"
|
||||
},
|
||||
"metadata": {
|
||||
"version": "版本",
|
||||
@@ -923,7 +1004,11 @@
|
||||
"downloadPartialWithAccess": "已下載 {completed} 個 LoRA,共 {total} 個。{accessFailures} 個因訪問限制而失敗。請檢查您的 API 密鑰或提前訪問狀態。",
|
||||
"pleaseSelectVersion": "請選擇一個版本",
|
||||
"versionExists": "此版本已存在於您的庫中",
|
||||
"downloadCompleted": "下載成功完成"
|
||||
"downloadCompleted": "下載成功完成",
|
||||
"autoOrganizeSuccess": "自動整理已成功完成,共 {count} 個 {type} 已整理",
|
||||
"autoOrganizePartialSuccess": "自動整理完成:已移動 {success} 個,{failures} 個失敗,共 {total} 個模型",
|
||||
"autoOrganizeFailed": "自動整理失敗:{error}",
|
||||
"noModelsSelected": "未選擇任何模型"
|
||||
},
|
||||
"recipes": {
|
||||
"fetchFailed": "取得配方失敗:{message}",
|
||||
@@ -972,12 +1057,18 @@
|
||||
"deleteFailed": "錯誤:{error}",
|
||||
"deleteFailedGeneral": "刪除模型失敗",
|
||||
"selectedAdditional": "已選擇 {count} 個額外 {type}",
|
||||
"marqueeSelectionComplete": "框選已選擇 {count} 個 {type}",
|
||||
"refreshMetadataFailed": "刷新 metadata 失敗",
|
||||
"nameCannotBeEmpty": "模型名稱不可為空",
|
||||
"nameUpdatedSuccessfully": "模型名稱已成功更新",
|
||||
"nameUpdateFailed": "更新模型名稱失敗",
|
||||
"baseModelUpdated": "基礎模型已成功更新",
|
||||
"baseModelUpdateFailed": "更新基礎模型失敗",
|
||||
"baseModelNotSelected": "請選擇基礎模型",
|
||||
"bulkBaseModelUpdating": "正在為 {count} 個模型更新基礎模型...",
|
||||
"bulkBaseModelUpdateSuccess": "已成功為 {count} 個模型更新基礎模型",
|
||||
"bulkBaseModelUpdatePartial": "已更新 {success} 個模型,{failed} 個模型失敗",
|
||||
"bulkBaseModelUpdateFailed": "更新所選模型的基礎模型失敗",
|
||||
"invalidCharactersRemoved": "已移除檔名中的無效字元",
|
||||
"filenameCannotBeEmpty": "檔案名稱不可為空",
|
||||
"renameFailed": "重新命名檔案失敗:{message}",
|
||||
@@ -987,7 +1078,14 @@
|
||||
"verificationAlreadyDone": "此群組已驗證過",
|
||||
"verificationCompleteMismatch": "驗證完成。{count} 個檔案的實際雜湊不同。",
|
||||
"verificationCompleteSuccess": "驗證完成。所有檔案均確認為重複項。",
|
||||
"verificationFailed": "驗證雜湊失敗:{message}"
|
||||
"verificationFailed": "驗證雜湊失敗:{message}",
|
||||
"noTagsToAdd": "沒有可新增的標籤",
|
||||
"tagsAddedSuccessfully": "已成功將 {tagCount} 個標籤新增到 {count} 個 {type}",
|
||||
"tagsReplacedSuccessfully": "已成功以 {tagCount} 個標籤取代 {count} 個 {type} 的標籤",
|
||||
"tagsAddFailed": "新增標籤到 {count} 個模型失敗",
|
||||
"tagsReplaceFailed": "取代 {count} 個模型的標籤失敗",
|
||||
"bulkTagsAddFailed": "批量新增標籤到模型失敗",
|
||||
"bulkTagsReplaceFailed": "批量取代模型標籤失敗"
|
||||
},
|
||||
"search": {
|
||||
"atLeastOneOption": "至少需選擇一個搜尋選項"
|
||||
@@ -1069,6 +1167,7 @@
|
||||
},
|
||||
"exampleImages": {
|
||||
"pathUpdated": "範例圖片路徑已更新",
|
||||
"pathUpdateFailed": "更新範例圖片路徑失敗:{message}",
|
||||
"downloadInProgress": "下載已在進行中",
|
||||
"enterLocationFirst": "請先輸入下載位置",
|
||||
"downloadStarted": "範例圖片下載已開始",
|
||||
|
||||
@@ -190,6 +190,9 @@ class LoraManager:
|
||||
|
||||
# Register DownloadManager with ServiceRegistry
|
||||
await ServiceRegistry.get_download_manager()
|
||||
|
||||
from .services.metadata_service import initialize_metadata_providers
|
||||
await initialize_metadata_providers()
|
||||
|
||||
# Initialize WebSocket manager
|
||||
await ServiceRegistry.get_websocket_manager()
|
||||
@@ -218,7 +221,7 @@ class LoraManager:
|
||||
name='post_init_tasks'
|
||||
)
|
||||
|
||||
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
|
||||
logger.debug("LoRA Manager: All services initialized and background tasks scheduled")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
|
||||
@@ -237,6 +240,7 @@ class LoraManager:
|
||||
# Run post-initialization tasks
|
||||
post_tasks = [
|
||||
asyncio.create_task(cls._cleanup_backup_files(), name='cleanup_bak_files'),
|
||||
asyncio.create_task(cls._cleanup_example_images_folders(), name='cleanup_example_images'),
|
||||
# Add more post-initialization tasks here as needed
|
||||
# asyncio.create_task(cls._another_post_task(), name='another_task'),
|
||||
]
|
||||
@@ -346,17 +350,124 @@ class LoraManager:
|
||||
|
||||
return deleted_count, size_freed
|
||||
|
||||
@classmethod
|
||||
async def _cleanup_example_images_folders(cls):
|
||||
"""Clean up invalid or empty folders in example images directory"""
|
||||
try:
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path or not os.path.exists(example_images_path):
|
||||
logger.debug("Example images path not configured or doesn't exist, skipping cleanup")
|
||||
return
|
||||
|
||||
logger.debug(f"Starting cleanup of example images folders in: {example_images_path}")
|
||||
|
||||
# Get all scanner instances to check hash validity
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
total_folders_checked = 0
|
||||
empty_folders_removed = 0
|
||||
orphaned_folders_removed = 0
|
||||
|
||||
# Scan the example images directory
|
||||
try:
|
||||
with os.scandir(example_images_path) as it:
|
||||
for entry in it:
|
||||
if not entry.is_dir(follow_symlinks=False):
|
||||
continue
|
||||
|
||||
folder_name = entry.name
|
||||
folder_path = entry.path
|
||||
total_folders_checked += 1
|
||||
|
||||
try:
|
||||
# Check if folder is empty
|
||||
is_empty = cls._is_folder_empty(folder_path)
|
||||
if is_empty:
|
||||
logger.debug(f"Removing empty example images folder: {folder_name}")
|
||||
await cls._remove_folder_safely(folder_path)
|
||||
empty_folders_removed += 1
|
||||
continue
|
||||
|
||||
# Check if folder name is a valid SHA256 hash (64 hex characters)
|
||||
if len(folder_name) != 64 or not all(c in '0123456789abcdefABCDEF' for c in folder_name):
|
||||
# Skip non-hash folders to avoid deleting other content
|
||||
logger.debug(f"Skipping non-hash folder: {folder_name}")
|
||||
continue
|
||||
|
||||
# Check if hash exists in any of the scanners
|
||||
hash_exists = (
|
||||
lora_scanner.has_hash(folder_name) or
|
||||
checkpoint_scanner.has_hash(folder_name) or
|
||||
embedding_scanner.has_hash(folder_name)
|
||||
)
|
||||
|
||||
if not hash_exists:
|
||||
logger.debug(f"Removing example images folder for deleted model: {folder_name}")
|
||||
await cls._remove_folder_safely(folder_path)
|
||||
orphaned_folders_removed += 1
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing example images folder {folder_name}: {e}")
|
||||
|
||||
# Yield control periodically
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning example images directory: {e}")
|
||||
return
|
||||
|
||||
# Log final cleanup report
|
||||
total_removed = empty_folders_removed + orphaned_folders_removed
|
||||
if total_removed > 0:
|
||||
logger.info(f"Example images cleanup completed: checked {total_folders_checked} folders, "
|
||||
f"removed {empty_folders_removed} empty folders and {orphaned_folders_removed} "
|
||||
f"folders for deleted models (total: {total_removed} removed)")
|
||||
else:
|
||||
logger.debug(f"Example images cleanup completed: checked {total_folders_checked} folders, "
|
||||
f"no cleanup needed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during example images cleanup: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
def _is_folder_empty(cls, folder_path: str) -> bool:
|
||||
"""Check if a folder is empty
|
||||
|
||||
Args:
|
||||
folder_path: Path to the folder to check
|
||||
|
||||
Returns:
|
||||
bool: True if folder is empty, False otherwise
|
||||
"""
|
||||
try:
|
||||
with os.scandir(folder_path) as it:
|
||||
return not any(it)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking if folder is empty {folder_path}: {e}")
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
async def _remove_folder_safely(cls, folder_path: str):
|
||||
"""Safely remove a folder and all its contents
|
||||
|
||||
Args:
|
||||
folder_path: Path to the folder to remove
|
||||
"""
|
||||
try:
|
||||
import shutil
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, shutil.rmtree, folder_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to remove folder {folder_path}: {e}")
|
||||
|
||||
@classmethod
|
||||
async def _cleanup(cls, app):
|
||||
"""Cleanup resources using ServiceRegistry"""
|
||||
try:
|
||||
logger.info("LoRA Manager: Cleaning up services")
|
||||
|
||||
# Close CivitaiClient gracefully
|
||||
civitai_client = await ServiceRegistry.get_service("civitai_client")
|
||||
if civitai_client:
|
||||
await civitai_client.close()
|
||||
logger.info("Closed CivitaiClient connection")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during cleanup: {e}", exc_info=True)
|
||||
|
||||
@@ -295,7 +295,7 @@ class MetadataProcessor:
|
||||
"seed": None,
|
||||
"steps": None,
|
||||
"cfg_scale": None,
|
||||
"guidance": None, # Add guidance parameter
|
||||
# "guidance": None, # Add guidance parameter
|
||||
"sampler": None,
|
||||
"scheduler": None,
|
||||
"checkpoint": None,
|
||||
|
||||
@@ -671,6 +671,7 @@ NODE_EXTRACTORS = {
|
||||
"AdvancedCLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb
|
||||
"smZ_CLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/shiimizu/ComfyUI_smZNodes
|
||||
"CR_ApplyControlNetStack": CR_ApplyControlNetStackExtractor, # Add CR_ApplyControlNetStack
|
||||
"PCTextEncode": CLIPTextEncodeExtractor, # From https://github.com/asagi4/comfyui-prompt-control
|
||||
# Latent
|
||||
"EmptyLatentImage": ImageSizeExtractor,
|
||||
# Flux
|
||||
|
||||
1
py/middleware/__init__.py
Normal file
1
py/middleware/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Server middleware modules"""
|
||||
53
py/middleware/cache_middleware.py
Normal file
53
py/middleware/cache_middleware.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""Cache control middleware for ComfyUI server"""
|
||||
|
||||
from aiohttp import web
|
||||
from typing import Callable, Awaitable
|
||||
|
||||
# Time in seconds
|
||||
ONE_HOUR: int = 3600
|
||||
ONE_DAY: int = 86400
|
||||
IMG_EXTENSIONS = (
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".png",
|
||||
".ppm",
|
||||
".bmp",
|
||||
".pgm",
|
||||
".tif",
|
||||
".tiff",
|
||||
".webp",
|
||||
".mp4"
|
||||
)
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def cache_control(
|
||||
request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
|
||||
) -> web.Response:
|
||||
"""Cache control middleware that sets appropriate cache headers based on file type and response status"""
|
||||
response: web.Response = await handler(request)
|
||||
|
||||
if (
|
||||
request.path.endswith(".js")
|
||||
or request.path.endswith(".css")
|
||||
or request.path.endswith("index.json")
|
||||
):
|
||||
response.headers.setdefault("Cache-Control", "no-cache")
|
||||
return response
|
||||
|
||||
# Early return for non-image files - no cache headers needed
|
||||
if not request.path.lower().endswith(IMG_EXTENSIONS):
|
||||
return response
|
||||
|
||||
# Handle image files
|
||||
if response.status == 404:
|
||||
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}")
|
||||
elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308):
|
||||
# Success responses and permanent redirects - cache for 1 day
|
||||
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}")
|
||||
elif response.status in (302, 303, 307):
|
||||
# Temporary redirects - no cache
|
||||
response.headers.setdefault("Cache-Control", "no-cache")
|
||||
# Note: 304 Not Modified falls through - no cache headers set
|
||||
|
||||
return response
|
||||
@@ -115,7 +115,7 @@ class LoraManagerLoader:
|
||||
formatted_loras = []
|
||||
for item in loaded_loras:
|
||||
parts = item.split(":")
|
||||
lora_name = parts[0].strip()
|
||||
lora_name = parts[0]
|
||||
strength_parts = parts[1].strip().split(",")
|
||||
|
||||
if len(strength_parts) > 1:
|
||||
@@ -165,7 +165,7 @@ class LoraManagerTextLoader:
|
||||
|
||||
loras = []
|
||||
for match in matches:
|
||||
lora_name = match[0].strip()
|
||||
lora_name = match[0]
|
||||
model_strength = float(match[1])
|
||||
clip_strength = float(match[2]) if match[2] else model_strength
|
||||
|
||||
|
||||
127
py/nodes/wanvideo_lora_select_from_text.py
Normal file
127
py/nodes/wanvideo_lora_select_from_text.py
Normal file
@@ -0,0 +1,127 @@
|
||||
from comfy.comfy_types import IO
|
||||
import folder_paths
|
||||
from ..utils.utils import get_lora_info
|
||||
from .utils import any_type
|
||||
import logging
|
||||
|
||||
# 初始化日志记录器
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 定义新节点的类
|
||||
class WanVideoLoraSelectFromText:
|
||||
# 节点在UI中显示的名称
|
||||
NAME = "WanVideo Lora Select From Text (LoraManager)"
|
||||
# 节点所属的分类
|
||||
CATEGORY = "Lora Manager/stackers"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
|
||||
"merge_lora": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
|
||||
"lora_syntax": (IO.STRING, {
|
||||
"multiline": True,
|
||||
"defaultInput": True,
|
||||
"forceInput": True,
|
||||
"tooltip": "Connect a TEXT output for LoRA syntax: <lora:name:strength>"
|
||||
}),
|
||||
},
|
||||
|
||||
"optional": {
|
||||
"prev_lora": ("WANVIDLORA",),
|
||||
"blocks": ("BLOCKS",)
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("WANVIDLORA", IO.STRING, IO.STRING)
|
||||
RETURN_NAMES = ("lora", "trigger_words", "active_loras")
|
||||
|
||||
FUNCTION = "process_loras_from_syntax"
|
||||
|
||||
def process_loras_from_syntax(self, lora_syntax, low_mem_load=False, merge_lora=True, **kwargs):
|
||||
text_to_process = lora_syntax
|
||||
|
||||
blocks = kwargs.get('blocks', {})
|
||||
selected_blocks = blocks.get("selected_blocks", {})
|
||||
layer_filter = blocks.get("layer_filter", "")
|
||||
|
||||
loras_list = []
|
||||
all_trigger_words = []
|
||||
active_loras = []
|
||||
|
||||
prev_lora = kwargs.get('prev_lora', None)
|
||||
if prev_lora is not None:
|
||||
loras_list.extend(prev_lora)
|
||||
|
||||
if not merge_lora:
|
||||
low_mem_load = False
|
||||
|
||||
parts = text_to_process.split('<lora:')
|
||||
for part in parts[1:]:
|
||||
end_index = part.find('>')
|
||||
if end_index == -1:
|
||||
continue
|
||||
|
||||
content = part[:end_index]
|
||||
lora_parts = content.split(':')
|
||||
|
||||
lora_name_raw = ""
|
||||
model_strength = 1.0
|
||||
clip_strength = 1.0
|
||||
|
||||
if len(lora_parts) == 2:
|
||||
lora_name_raw = lora_parts[0].strip()
|
||||
try:
|
||||
model_strength = float(lora_parts[1])
|
||||
clip_strength = model_strength
|
||||
except (ValueError, IndexError):
|
||||
logger.warning(f"Invalid strength for LoRA '{lora_name_raw}'. Skipping.")
|
||||
continue
|
||||
elif len(lora_parts) >= 3:
|
||||
lora_name_raw = lora_parts[0].strip()
|
||||
try:
|
||||
model_strength = float(lora_parts[1])
|
||||
clip_strength = float(lora_parts[2])
|
||||
except (ValueError, IndexError):
|
||||
logger.warning(f"Invalid strengths for LoRA '{lora_name_raw}'. Skipping.")
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
lora_path, trigger_words = get_lora_info(lora_name_raw)
|
||||
|
||||
lora_item = {
|
||||
"path": folder_paths.get_full_path("loras", lora_path),
|
||||
"strength": model_strength,
|
||||
"name": lora_path.split(".")[0],
|
||||
"blocks": selected_blocks,
|
||||
"layer_filter": layer_filter,
|
||||
"low_mem_load": low_mem_load,
|
||||
"merge_loras": merge_lora,
|
||||
}
|
||||
|
||||
loras_list.append(lora_item)
|
||||
active_loras.append((lora_name_raw, model_strength, clip_strength))
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
formatted_loras = []
|
||||
for name, model_strength, clip_strength in active_loras:
|
||||
if abs(model_strength - clip_strength) > 0.001:
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}:{str(clip_strength).strip()}>")
|
||||
else:
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}>")
|
||||
|
||||
active_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (loras_list, trigger_words_text, active_loras_text)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"WanVideoLoraSelectFromText": WanVideoLoraSelectFromText
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"WanVideoLoraSelectFromText": "WanVideo Lora Select From Text (LoraManager)"
|
||||
}
|
||||
@@ -55,7 +55,7 @@ class RecipeMetadataParser(ABC):
|
||||
# Unpack the tuple to get the actual data
|
||||
civitai_info, error_msg = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
|
||||
|
||||
if not civitai_info or civitai_info.get("error") == "Model not found":
|
||||
if not civitai_info or error_msg == "Model not found":
|
||||
# Model not found or deleted
|
||||
lora_entry['isDeleted'] = True
|
||||
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'
|
||||
|
||||
@@ -6,6 +6,7 @@ import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -30,6 +31,9 @@ class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Automatic1111 format"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Split on Negative prompt if it exists
|
||||
if "Negative prompt:" in user_comment:
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
@@ -216,9 +220,9 @@ class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Get additional info from Civitai
|
||||
if civitai_client:
|
||||
if metadata_provider:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_version_info(resource.get("modelVersionId"))
|
||||
civitai_info = await metadata_provider.get_model_version_info(resource.get("modelVersionId"))
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
@@ -271,11 +275,11 @@ class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai
|
||||
if civitai_client:
|
||||
if metadata_provider:
|
||||
try:
|
||||
if lora_hash:
|
||||
# If we have hash, use it for lookup
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
|
||||
else:
|
||||
civitai_info = None
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
from typing import Dict, Any, Union
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -36,12 +37,15 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
Args:
|
||||
metadata: The metadata from the image (dict)
|
||||
recipe_scanner: Optional recipe scanner service
|
||||
civitai_client: Optional Civitai API client
|
||||
civitai_client: Optional Civitai API client (deprecated, use metadata_provider instead)
|
||||
|
||||
Returns:
|
||||
Dict containing parsed recipe data
|
||||
"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Initialize result structure
|
||||
result = {
|
||||
'base_model': None,
|
||||
@@ -53,6 +57,14 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
# Track already added LoRAs to prevent duplicates
|
||||
added_loras = {} # key: model_version_id or hash, value: index in result["loras"]
|
||||
|
||||
# Extract hash information from hashes field for LoRA matching
|
||||
lora_hashes = {}
|
||||
if "hashes" in metadata and isinstance(metadata["hashes"], dict):
|
||||
for key, hash_value in metadata["hashes"].items():
|
||||
if key.startswith("LORA:"):
|
||||
lora_name = key.replace("LORA:", "")
|
||||
lora_hashes[lora_name] = hash_value
|
||||
|
||||
# Extract prompt and negative prompt
|
||||
if "prompt" in metadata:
|
||||
result["gen_params"]["prompt"] = metadata["prompt"]
|
||||
@@ -77,9 +89,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
# Extract base model information - directly if available
|
||||
if "baseModel" in metadata:
|
||||
result["base_model"] = metadata["baseModel"]
|
||||
elif "Model hash" in metadata and civitai_client:
|
||||
elif "Model hash" in metadata and metadata_provider:
|
||||
model_hash = metadata["Model hash"]
|
||||
model_info = await civitai_client.get_model_by_hash(model_hash)
|
||||
model_info, error = await metadata_provider.get_model_by_hash(model_hash)
|
||||
if model_info:
|
||||
result["base_model"] = model_info.get("baseModel", "")
|
||||
elif "Model" in metadata and isinstance(metadata.get("resources"), list):
|
||||
@@ -87,8 +99,8 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
for resource in metadata.get("resources", []):
|
||||
if resource.get("type") == "model" and resource.get("name") == metadata.get("Model"):
|
||||
# This is likely the checkpoint model
|
||||
if civitai_client and resource.get("hash"):
|
||||
model_info = await civitai_client.get_model_by_hash(resource.get("hash"))
|
||||
if metadata_provider and resource.get("hash"):
|
||||
model_info, error = await metadata_provider.get_model_by_hash(resource.get("hash"))
|
||||
if model_info:
|
||||
result["base_model"] = model_info.get("baseModel", "")
|
||||
|
||||
@@ -101,6 +113,10 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
if resource.get("type", "lora") == "lora":
|
||||
lora_hash = resource.get("hash", "")
|
||||
|
||||
# Try to get hash from the hashes field if not present in resource
|
||||
if not lora_hash and resource.get("name"):
|
||||
lora_hash = lora_hashes.get(resource["name"], "")
|
||||
|
||||
# Skip LoRAs without proper identification (hash or modelVersionId)
|
||||
if not lora_hash and not resource.get("modelVersionId"):
|
||||
logger.debug(f"Skipping LoRA resource '{resource.get('name', 'Unknown')}' - no hash or modelVersionId")
|
||||
@@ -126,9 +142,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if hash is available
|
||||
if lora_entry['hash'] and civitai_client:
|
||||
if lora_entry['hash'] and metadata_provider:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
@@ -182,14 +198,10 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if modelVersionId is available
|
||||
if version_id and civitai_client:
|
||||
if version_id and metadata_provider:
|
||||
try:
|
||||
# Use get_model_version_info instead of get_model_version
|
||||
civitai_info, error = await civitai_client.get_model_version_info(version_id)
|
||||
|
||||
if error:
|
||||
logger.warning(f"Error getting model version info: {error}")
|
||||
continue
|
||||
civitai_info = await metadata_provider.get_model_version_info(version_id)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
@@ -247,30 +259,27 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# If we have a version ID and civitai client, try to get more info
|
||||
if version_id and civitai_client:
|
||||
# If we have a version ID and metadata provider, try to get more info
|
||||
if version_id and metadata_provider:
|
||||
try:
|
||||
# Use get_model_version_info with the version ID
|
||||
civitai_info, error = await civitai_client.get_model_version_info(version_id)
|
||||
civitai_info = await metadata_provider.get_model_version_info(version_id)
|
||||
|
||||
if error:
|
||||
logger.warning(f"Error getting model version info: {error}")
|
||||
else:
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts
|
||||
)
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
if populated_entry is None:
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
|
||||
# Track this LoRA for deduplication
|
||||
if version_id:
|
||||
added_loras[version_id] = len(result["loras"])
|
||||
lora_entry = populated_entry
|
||||
|
||||
# Track this LoRA for deduplication
|
||||
if version_id:
|
||||
added_loras[version_id] = len(result["loras"])
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for model ID {version_id}: {e}")
|
||||
|
||||
@@ -304,9 +313,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if hash is available
|
||||
if lora_entry['hash'] and civitai_client:
|
||||
if lora_entry['hash'] and metadata_provider:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
|
||||
@@ -6,6 +6,7 @@ import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -26,6 +27,9 @@ class ComfyMetadataParser(RecipeMetadataParser):
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Civitai ComfyUI metadata format"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
data = json.loads(user_comment)
|
||||
loras = []
|
||||
|
||||
@@ -73,10 +77,10 @@ class ComfyMetadataParser(RecipeMetadataParser):
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Get additional info from Civitai if client is available
|
||||
if civitai_client:
|
||||
# Get additional info from Civitai if metadata provider is available
|
||||
if metadata_provider:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(model_version_id)
|
||||
civitai_info_tuple = await metadata_provider.get_model_version_info(model_version_id)
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
@@ -116,9 +120,9 @@ class ComfyMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Get additional checkpoint info from Civitai
|
||||
if civitai_client:
|
||||
if metadata_provider:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(checkpoint_version_id)
|
||||
civitai_info_tuple = await metadata_provider.get_model_version_info(checkpoint_version_id)
|
||||
civitai_info, _ = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
|
||||
# Populate checkpoint with Civitai info
|
||||
checkpoint = await self.populate_checkpoint_from_civitai(checkpoint, civitai_info)
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,8 +19,11 @@ class MetaFormatParser(RecipeMetadataParser):
|
||||
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from images with meta format metadata"""
|
||||
"""Parse metadata from images with meta format metadata (Lora_N Model hash format)"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Extract prompt and negative prompt
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
prompt = parts[0].strip()
|
||||
@@ -122,9 +126,9 @@ class MetaFormatParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Get info from Civitai by hash if available
|
||||
if civitai_client and hash_value:
|
||||
if metadata_provider and hash_value:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(hash_value)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(hash_value)
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import Dict, Any
|
||||
from ...config import config
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -23,6 +24,9 @@ class RecipeFormatParser(RecipeMetadataParser):
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from images with dedicated recipe metadata format"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Extract recipe metadata from user comment
|
||||
try:
|
||||
# Look for recipe metadata section
|
||||
@@ -71,9 +75,9 @@ class RecipeFormatParser(RecipeMetadataParser):
|
||||
lora_entry['localPath'] = None
|
||||
|
||||
# Try to get additional info from Civitai if we have a model version ID
|
||||
if lora.get('modelVersionId') and civitai_client:
|
||||
if lora.get('modelVersionId') and metadata_provider:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(lora['modelVersionId'])
|
||||
civitai_info_tuple = await metadata_provider.get_model_version_info(lora['modelVersionId'])
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
|
||||
@@ -12,8 +12,9 @@ from ..utils.routes_common import ModelRouteUtils
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.server_i18n import server_i18n
|
||||
from ..utils.utils import calculate_relative_path_for_model
|
||||
from ..utils.constants import AUTO_ORGANIZE_BATCH_SIZE
|
||||
from ..services.model_file_service import ModelFileService, ModelMoveService
|
||||
from ..services.websocket_progress_callback import WebSocketProgressCallback
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -33,6 +34,11 @@ class BaseModelRoutes(ABC):
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
# Initialize file services with dependency injection
|
||||
self.model_file_service = ModelFileService(service.scanner, service.model_type)
|
||||
self.model_move_service = ModelMoveService(service.scanner)
|
||||
self.websocket_progress_callback = WebSocketProgressCallback()
|
||||
|
||||
def setup_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup common routes for the model type
|
||||
@@ -42,49 +48,53 @@ class BaseModelRoutes(ABC):
|
||||
prefix: URL prefix (e.g., 'loras', 'checkpoints')
|
||||
"""
|
||||
# Common model management routes
|
||||
app.router.add_get(f'/api/{prefix}/list', self.get_models)
|
||||
app.router.add_post(f'/api/{prefix}/delete', self.delete_model)
|
||||
app.router.add_post(f'/api/{prefix}/exclude', self.exclude_model)
|
||||
app.router.add_post(f'/api/{prefix}/fetch-civitai', self.fetch_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/relink-civitai', self.relink_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/replace-preview', self.replace_preview)
|
||||
app.router.add_post(f'/api/{prefix}/save-metadata', self.save_metadata)
|
||||
app.router.add_post(f'/api/{prefix}/rename', self.rename_model)
|
||||
app.router.add_post(f'/api/{prefix}/bulk-delete', self.bulk_delete_models)
|
||||
app.router.add_post(f'/api/{prefix}/verify-duplicates', self.verify_duplicates)
|
||||
app.router.add_post(f'/api/{prefix}/move_model', self.move_model)
|
||||
app.router.add_post(f'/api/{prefix}/move_models_bulk', self.move_models_bulk)
|
||||
app.router.add_get(f'/api/{prefix}/auto-organize', self.auto_organize_models)
|
||||
app.router.add_get(f'/api/{prefix}/auto-organize-progress', self.get_auto_organize_progress)
|
||||
app.router.add_get(f'/api/lm/{prefix}/list', self.get_models)
|
||||
app.router.add_post(f'/api/lm/{prefix}/delete', self.delete_model)
|
||||
app.router.add_post(f'/api/lm/{prefix}/exclude', self.exclude_model)
|
||||
app.router.add_post(f'/api/lm/{prefix}/fetch-civitai', self.fetch_civitai)
|
||||
app.router.add_post(f'/api/lm/{prefix}/fetch-all-civitai', self.fetch_all_civitai)
|
||||
app.router.add_post(f'/api/lm/{prefix}/relink-civitai', self.relink_civitai)
|
||||
app.router.add_post(f'/api/lm/{prefix}/replace-preview', self.replace_preview)
|
||||
app.router.add_post(f'/api/lm/{prefix}/save-metadata', self.save_metadata)
|
||||
app.router.add_post(f'/api/lm/{prefix}/add-tags', self.add_tags)
|
||||
app.router.add_post(f'/api/lm/{prefix}/rename', self.rename_model)
|
||||
app.router.add_post(f'/api/lm/{prefix}/bulk-delete', self.bulk_delete_models)
|
||||
app.router.add_post(f'/api/lm/{prefix}/verify-duplicates', self.verify_duplicates)
|
||||
app.router.add_post(f'/api/lm/{prefix}/move_model', self.move_model)
|
||||
app.router.add_post(f'/api/lm/{prefix}/move_models_bulk', self.move_models_bulk)
|
||||
app.router.add_get(f'/api/lm/{prefix}/auto-organize', self.auto_organize_models)
|
||||
app.router.add_post(f'/api/lm/{prefix}/auto-organize', self.auto_organize_models)
|
||||
app.router.add_get(f'/api/lm/{prefix}/auto-organize-progress', self.get_auto_organize_progress)
|
||||
|
||||
# Common query routes
|
||||
app.router.add_get(f'/api/{prefix}/top-tags', self.get_top_tags)
|
||||
app.router.add_get(f'/api/{prefix}/base-models', self.get_base_models)
|
||||
app.router.add_get(f'/api/{prefix}/scan', self.scan_models)
|
||||
app.router.add_get(f'/api/{prefix}/roots', self.get_model_roots)
|
||||
app.router.add_get(f'/api/{prefix}/folders', self.get_folders)
|
||||
app.router.add_get(f'/api/{prefix}/folder-tree', self.get_folder_tree)
|
||||
app.router.add_get(f'/api/{prefix}/unified-folder-tree', self.get_unified_folder_tree)
|
||||
app.router.add_get(f'/api/{prefix}/find-duplicates', self.find_duplicate_models)
|
||||
app.router.add_get(f'/api/{prefix}/find-filename-conflicts', self.find_filename_conflicts)
|
||||
app.router.add_get(f'/api/{prefix}/get-notes', self.get_model_notes)
|
||||
app.router.add_get(f'/api/{prefix}/preview-url', self.get_model_preview_url)
|
||||
app.router.add_get(f'/api/{prefix}/civitai-url', self.get_model_civitai_url)
|
||||
app.router.add_get(f'/api/{prefix}/metadata', self.get_model_metadata)
|
||||
app.router.add_get(f'/api/{prefix}/model-description', self.get_model_description)
|
||||
app.router.add_get(f'/api/lm/{prefix}/top-tags', self.get_top_tags)
|
||||
app.router.add_get(f'/api/lm/{prefix}/base-models', self.get_base_models)
|
||||
app.router.add_get(f'/api/lm/{prefix}/scan', self.scan_models)
|
||||
app.router.add_get(f'/api/lm/{prefix}/roots', self.get_model_roots)
|
||||
app.router.add_get(f'/api/lm/{prefix}/folders', self.get_folders)
|
||||
app.router.add_get(f'/api/lm/{prefix}/folder-tree', self.get_folder_tree)
|
||||
app.router.add_get(f'/api/lm/{prefix}/unified-folder-tree', self.get_unified_folder_tree)
|
||||
app.router.add_get(f'/api/lm/{prefix}/find-duplicates', self.find_duplicate_models)
|
||||
app.router.add_get(f'/api/lm/{prefix}/find-filename-conflicts', self.find_filename_conflicts)
|
||||
app.router.add_get(f'/api/lm/{prefix}/get-notes', self.get_model_notes)
|
||||
app.router.add_get(f'/api/lm/{prefix}/preview-url', self.get_model_preview_url)
|
||||
app.router.add_get(f'/api/lm/{prefix}/civitai-url', self.get_model_civitai_url)
|
||||
app.router.add_get(f'/api/lm/{prefix}/metadata', self.get_model_metadata)
|
||||
app.router.add_get(f'/api/lm/{prefix}/model-description', self.get_model_description)
|
||||
|
||||
# Autocomplete route
|
||||
app.router.add_get(f'/api/{prefix}/relative-paths', self.get_relative_paths)
|
||||
app.router.add_get(f'/api/lm/{prefix}/relative-paths', self.get_relative_paths)
|
||||
|
||||
# Common CivitAI integration
|
||||
app.router.add_get(f'/api/lm/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions)
|
||||
app.router.add_get(f'/api/lm/{prefix}/civitai/model/version/{{modelVersionId}}', self.get_civitai_model_by_version)
|
||||
app.router.add_get(f'/api/lm/{prefix}/civitai/model/hash/{{hash}}', self.get_civitai_model_by_hash)
|
||||
|
||||
# Common Download management
|
||||
app.router.add_post(f'/api/download-model', self.download_model)
|
||||
app.router.add_get(f'/api/download-model-get', self.download_model_get)
|
||||
app.router.add_get(f'/api/cancel-download-get', self.cancel_download_get)
|
||||
app.router.add_get(f'/api/download-progress/{{download_id}}', self.get_download_progress)
|
||||
|
||||
# CivitAI integration routes
|
||||
app.router.add_post(f'/api/{prefix}/fetch-all-civitai', self.fetch_all_civitai)
|
||||
# app.router.add_get(f'/api/civitai/versions/{{model_id}}', self.get_civitai_versions)
|
||||
app.router.add_post(f'/api/lm/download-model', self.download_model)
|
||||
app.router.add_get(f'/api/lm/download-model-get', self.download_model_get)
|
||||
app.router.add_get(f'/api/lm/cancel-download-get', self.cancel_download_get)
|
||||
app.router.add_get(f'/api/lm/download-progress/{{download_id}}', self.get_download_progress)
|
||||
|
||||
# Add generic page route
|
||||
app.router.add_get(f'/{prefix}', self.handle_models_page)
|
||||
@@ -245,20 +255,45 @@ class BaseModelRoutes(ABC):
|
||||
return await ModelRouteUtils.handle_exclude_model(request, self.service.scanner)
|
||||
|
||||
async def fetch_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Handle CivitAI metadata fetch request"""
|
||||
response = await ModelRouteUtils.handle_fetch_civitai(request, self.service.scanner)
|
||||
|
||||
# If successful, format the metadata before returning
|
||||
if response.status == 200:
|
||||
data = json.loads(response.body.decode('utf-8'))
|
||||
if data.get("success") and data.get("metadata"):
|
||||
formatted_metadata = await self.service.format_response(data["metadata"])
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"metadata": formatted_metadata
|
||||
})
|
||||
|
||||
return response
|
||||
"""Handle CivitAI metadata fetch request - force refresh model metadata"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
if not file_path:
|
||||
return web.json_response({"success": False, "error": "File path is required"}, status=400)
|
||||
|
||||
# Get model data from cache
|
||||
cache = await self.service.scanner.get_cached_data()
|
||||
model_data = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
|
||||
|
||||
if not model_data:
|
||||
return web.json_response({"success": False, "error": "Model not found in cache"}, status=404)
|
||||
|
||||
# Check if model has SHA256 hash
|
||||
if not model_data.get('sha256'):
|
||||
return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
|
||||
|
||||
# Use fetch_and_update_model to get and update metadata
|
||||
success, error = await ModelRouteUtils.fetch_and_update_model(
|
||||
sha256=model_data['sha256'],
|
||||
file_path=file_path,
|
||||
model_data=model_data,
|
||||
update_cache_func=self.service.scanner.update_single_model_cache
|
||||
)
|
||||
|
||||
if not success:
|
||||
return web.json_response({"success": False, "error": error})
|
||||
|
||||
# Format the updated metadata for response
|
||||
formatted_metadata = await self.service.format_response(model_data)
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"metadata": formatted_metadata
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
|
||||
return web.json_response({"success": False, "error": str(e)}, status=500)
|
||||
|
||||
async def relink_civitai(self, request: web.Request) -> web.Response:
|
||||
"""Handle CivitAI metadata re-linking request"""
|
||||
@@ -272,6 +307,10 @@ class BaseModelRoutes(ABC):
|
||||
"""Handle saving metadata updates"""
|
||||
return await ModelRouteUtils.handle_save_metadata(request, self.service.scanner)
|
||||
|
||||
async def add_tags(self, request: web.Request) -> web.Response:
|
||||
"""Handle adding tags to model metadata"""
|
||||
return await ModelRouteUtils.handle_add_tags(request, self.service.scanner)
|
||||
|
||||
async def rename_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle renaming a model file and its associated files"""
|
||||
return await ModelRouteUtils.handle_rename_model(request, self.service.scanner)
|
||||
@@ -513,6 +552,7 @@ class BaseModelRoutes(ABC):
|
||||
model_version_id = request.query.get('model_version_id')
|
||||
download_id = request.query.get('download_id')
|
||||
use_default_paths = request.query.get('use_default_paths', 'false').lower() == 'true'
|
||||
source = request.query.get('source') # Optional source parameter
|
||||
|
||||
# Create a data dictionary that mimics what would be received from a POST request
|
||||
data = {
|
||||
@@ -528,6 +568,10 @@ class BaseModelRoutes(ABC):
|
||||
|
||||
data['use_default_paths'] = use_default_paths
|
||||
|
||||
# Add source parameter if provided
|
||||
if source:
|
||||
data['source'] = source
|
||||
|
||||
# Create a mock request object with the data
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
future.set_result(data)
|
||||
@@ -605,10 +649,19 @@ class BaseModelRoutes(ABC):
|
||||
success = 0
|
||||
needs_resort = False
|
||||
|
||||
# Prepare models to process
|
||||
# Prepare models to process, only those without CivitAI data
|
||||
enable_metadata_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
# Filter models that need CivitAI metadata update
|
||||
to_process = [
|
||||
model for model in cache.raw_data
|
||||
if model.get('sha256') and (not model.get('civitai') or 'id' not in model.get('civitai')) and model.get('from_civitai', True)
|
||||
model for model in cache.raw_data
|
||||
if model.get('sha256')
|
||||
and (
|
||||
not model.get('civitai') or not model['civitai'].get('id')
|
||||
)
|
||||
and (
|
||||
(enable_metadata_archive_db and not model.get('db_checked', False))
|
||||
or (not enable_metadata_archive_db and model.get('from_civitai') is True)
|
||||
)
|
||||
]
|
||||
total_to_process = len(to_process)
|
||||
|
||||
@@ -624,12 +677,13 @@ class BaseModelRoutes(ABC):
|
||||
for model in to_process:
|
||||
try:
|
||||
original_name = model.get('model_name')
|
||||
if await ModelRouteUtils.fetch_and_update_model(
|
||||
result, error = await ModelRouteUtils.fetch_and_update_model(
|
||||
sha256=model['sha256'],
|
||||
file_path=model['file_path'],
|
||||
model_data=model,
|
||||
update_cache_func=self.service.scanner.update_single_model_cache
|
||||
):
|
||||
)
|
||||
if result:
|
||||
success += 1
|
||||
if original_name != model.get('model_name'):
|
||||
needs_resort = True
|
||||
@@ -675,10 +729,107 @@ class BaseModelRoutes(ABC):
|
||||
|
||||
async def get_civitai_versions(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai model with local availability info"""
|
||||
# This will be implemented by subclasses as they need CivitAI client access
|
||||
return web.json_response({
|
||||
"error": "Not implemented in base class"
|
||||
}, status=501)
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
response = await metadata_provider.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - allow subclasses to override validation
|
||||
if not self._validate_civitai_model_type(model_type):
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected {self._get_expected_model_types()}, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the model file (type="Model" and primary=true) in the files list
|
||||
model_file = self._find_model_file(version.get('files', []))
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching {self.model_type} model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by model version ID"""
|
||||
try:
|
||||
model_version_id = request.match_info.get('modelVersionId')
|
||||
|
||||
# Get model details from metadata provider
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
model, error_msg = await metadata_provider.get_model_version_info(model_version_id)
|
||||
|
||||
if not model:
|
||||
# Log warning for failed model retrieval
|
||||
logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
|
||||
|
||||
# Determine status code based on error message
|
||||
status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
|
||||
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error_msg or "Failed to fetch model information"
|
||||
}, status=status_code)
|
||||
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by hash"""
|
||||
try:
|
||||
hash = request.match_info.get('hash')
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
model, error = await metadata_provider.get_model_by_hash(hash)
|
||||
if error:
|
||||
logger.warning(f"Error getting model by hash: {error}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error
|
||||
}, status=404)
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details by hash: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
def _validate_civitai_model_type(self, model_type: str) -> bool:
|
||||
"""Validate CivitAI model type - to be overridden by subclasses"""
|
||||
return True # Default: accept all types
|
||||
|
||||
def _get_expected_model_types(self) -> str:
|
||||
"""Get expected model types string for error messages - to be overridden by subclasses"""
|
||||
return "any model type"
|
||||
|
||||
def _find_model_file(self, files: list) -> dict:
|
||||
"""Find the appropriate model file from the files list - can be overridden by subclasses"""
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
return next((file for file in files if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
# Common model move handlers
|
||||
async def move_model(self, request: web.Request) -> web.Response:
|
||||
@@ -687,33 +838,17 @@ class BaseModelRoutes(ABC):
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
target_path = data.get('target_path')
|
||||
|
||||
if not file_path or not target_path:
|
||||
return web.Response(text='File path and target path are required', status=400)
|
||||
import os
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
logger.info(f"Source and target directories are the same: {source_dir}")
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Source and target directories are the same',
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': file_path
|
||||
})
|
||||
|
||||
new_file_path = await self.service.scanner.move_model(file_path, target_path)
|
||||
if new_file_path:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': new_file_path
|
||||
})
|
||||
|
||||
result = await self.model_move_service.move_model(file_path, target_path)
|
||||
|
||||
if result['success']:
|
||||
return web.json_response(result)
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Failed to move model',
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': None
|
||||
}, status=500)
|
||||
return web.json_response(result, status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
@@ -724,51 +859,19 @@ class BaseModelRoutes(ABC):
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths', [])
|
||||
target_path = data.get('target_path')
|
||||
|
||||
if not file_paths or not target_path:
|
||||
return web.Response(text='File paths and target path are required', status=400)
|
||||
results = []
|
||||
import os
|
||||
for file_path in file_paths:
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
results.append({
|
||||
"original_file_path": file_path,
|
||||
"new_file_path": file_path,
|
||||
"success": True,
|
||||
"message": "Source and target directories are the same"
|
||||
})
|
||||
continue
|
||||
|
||||
new_file_path = await self.service.scanner.move_model(file_path, target_path)
|
||||
if new_file_path:
|
||||
results.append({
|
||||
"original_file_path": file_path,
|
||||
"new_file_path": new_file_path,
|
||||
"success": True,
|
||||
"message": "Success"
|
||||
})
|
||||
else:
|
||||
results.append({
|
||||
"original_file_path": file_path,
|
||||
"new_file_path": None,
|
||||
"success": False,
|
||||
"message": "Failed to move model"
|
||||
})
|
||||
success_count = sum(1 for r in results if r["success"])
|
||||
failure_count = len(results) - success_count
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Moved {success_count} of {len(file_paths)} models',
|
||||
'results': results,
|
||||
'success_count': success_count,
|
||||
'failure_count': failure_count
|
||||
})
|
||||
|
||||
result = await self.model_move_service.move_models_bulk(file_paths, target_path)
|
||||
return web.json_response(result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def auto_organize_models(self, request: web.Request) -> web.Response:
|
||||
"""Auto-organize all models based on current settings"""
|
||||
"""Auto-organize all models or a specific set of models based on current settings"""
|
||||
try:
|
||||
# Check if auto-organize is already running
|
||||
if ws_manager.is_auto_organize_running():
|
||||
@@ -786,13 +889,28 @@ class BaseModelRoutes(ABC):
|
||||
'error': 'Auto-organize is already running. Please wait for it to complete.'
|
||||
}, status=409)
|
||||
|
||||
# Get specific file paths from request if this is a POST with selected models
|
||||
file_paths = None
|
||||
if request.method == 'POST':
|
||||
try:
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths')
|
||||
except Exception:
|
||||
pass # Continue with all models if no valid JSON
|
||||
|
||||
async with auto_organize_lock:
|
||||
return await self._perform_auto_organize()
|
||||
# Use the service layer for business logic
|
||||
result = await self.model_file_service.auto_organize_models(
|
||||
file_paths=file_paths,
|
||||
progress_callback=self.websocket_progress_callback
|
||||
)
|
||||
|
||||
return web.json_response(result.to_dict())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in auto_organize_models: {e}", exc_info=True)
|
||||
|
||||
# Send error message via WebSocket and cleanup
|
||||
# Send error message via WebSocket
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'error',
|
||||
@@ -804,249 +922,6 @@ class BaseModelRoutes(ABC):
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def _perform_auto_organize(self) -> web.Response:
|
||||
"""Perform the actual auto-organize operation"""
|
||||
try:
|
||||
# Get all models from cache
|
||||
cache = await self.service.scanner.get_cached_data()
|
||||
all_models = cache.raw_data
|
||||
|
||||
# Get model roots for this scanner
|
||||
model_roots = self.service.get_model_roots()
|
||||
if not model_roots:
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'error',
|
||||
'error': 'No model roots configured'
|
||||
})
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No model roots configured'
|
||||
}, status=400)
|
||||
|
||||
# Check if flat structure is configured for this model type
|
||||
path_template = settings.get_download_path_template(self.service.model_type)
|
||||
is_flat_structure = not path_template
|
||||
|
||||
# Prepare results tracking
|
||||
results = []
|
||||
total_models = len(all_models)
|
||||
processed = 0
|
||||
success_count = 0
|
||||
failure_count = 0
|
||||
skipped_count = 0
|
||||
|
||||
# Send initial progress via WebSocket
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'started',
|
||||
'total': total_models,
|
||||
'processed': 0,
|
||||
'success': 0,
|
||||
'failures': 0,
|
||||
'skipped': 0
|
||||
})
|
||||
|
||||
# Process models in batches
|
||||
for i in range(0, total_models, AUTO_ORGANIZE_BATCH_SIZE):
|
||||
batch = all_models[i:i + AUTO_ORGANIZE_BATCH_SIZE]
|
||||
|
||||
for model in batch:
|
||||
try:
|
||||
file_path = model.get('file_path')
|
||||
if not file_path:
|
||||
if len(results) < 100: # Limit detailed results
|
||||
results.append({
|
||||
"model": model.get('model_name', 'Unknown'),
|
||||
"success": False,
|
||||
"message": "No file path found"
|
||||
})
|
||||
failure_count += 1
|
||||
processed += 1
|
||||
continue
|
||||
|
||||
# Find which model root this file belongs to
|
||||
current_root = None
|
||||
for root in model_roots:
|
||||
# Normalize paths for comparison
|
||||
normalized_root = os.path.normpath(root).replace(os.sep, '/')
|
||||
normalized_file = os.path.normpath(file_path).replace(os.sep, '/')
|
||||
|
||||
if normalized_file.startswith(normalized_root):
|
||||
current_root = root
|
||||
break
|
||||
|
||||
if not current_root:
|
||||
if len(results) < 100: # Limit detailed results
|
||||
results.append({
|
||||
"model": model.get('model_name', 'Unknown'),
|
||||
"success": False,
|
||||
"message": "Model file not found in any configured root directory"
|
||||
})
|
||||
failure_count += 1
|
||||
processed += 1
|
||||
continue
|
||||
|
||||
# Handle flat structure case
|
||||
if is_flat_structure:
|
||||
current_dir = os.path.dirname(file_path)
|
||||
# Check if already in root directory
|
||||
if os.path.normpath(current_dir) == os.path.normpath(current_root):
|
||||
skipped_count += 1
|
||||
processed += 1
|
||||
continue
|
||||
|
||||
# Move to root directory for flat structure
|
||||
target_dir = current_root
|
||||
else:
|
||||
# Calculate new relative path based on settings
|
||||
new_relative_path = calculate_relative_path_for_model(model, self.service.model_type)
|
||||
|
||||
# If no relative path calculated (insufficient metadata), skip
|
||||
if not new_relative_path:
|
||||
if len(results) < 100: # Limit detailed results
|
||||
results.append({
|
||||
"model": model.get('model_name', 'Unknown'),
|
||||
"success": False,
|
||||
"message": "Skipped - insufficient metadata for organization"
|
||||
})
|
||||
skipped_count += 1
|
||||
processed += 1
|
||||
continue
|
||||
|
||||
# Calculate target directory
|
||||
target_dir = os.path.join(current_root, new_relative_path).replace(os.sep, '/')
|
||||
|
||||
current_dir = os.path.dirname(file_path)
|
||||
|
||||
# Skip if already in correct location
|
||||
if current_dir.replace(os.sep, '/') == target_dir.replace(os.sep, '/'):
|
||||
skipped_count += 1
|
||||
processed += 1
|
||||
continue
|
||||
|
||||
# Check if target file would conflict
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_dir, file_name)
|
||||
|
||||
if os.path.exists(target_file_path):
|
||||
if len(results) < 100: # Limit detailed results
|
||||
results.append({
|
||||
"model": model.get('model_name', 'Unknown'),
|
||||
"success": False,
|
||||
"message": f"Target file already exists: {target_file_path}"
|
||||
})
|
||||
failure_count += 1
|
||||
processed += 1
|
||||
continue
|
||||
|
||||
# Perform the move
|
||||
success = await self.service.scanner.move_model(file_path, target_dir)
|
||||
|
||||
if success:
|
||||
success_count += 1
|
||||
else:
|
||||
if len(results) < 100: # Limit detailed results
|
||||
results.append({
|
||||
"model": model.get('model_name', 'Unknown'),
|
||||
"success": False,
|
||||
"message": "Failed to move model"
|
||||
})
|
||||
failure_count += 1
|
||||
|
||||
processed += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing model {model.get('model_name', 'Unknown')}: {e}", exc_info=True)
|
||||
if len(results) < 100: # Limit detailed results
|
||||
results.append({
|
||||
"model": model.get('model_name', 'Unknown'),
|
||||
"success": False,
|
||||
"message": f"Error: {str(e)}"
|
||||
})
|
||||
failure_count += 1
|
||||
processed += 1
|
||||
|
||||
# Send progress update after each batch
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'processing',
|
||||
'total': total_models,
|
||||
'processed': processed,
|
||||
'success': success_count,
|
||||
'failures': failure_count,
|
||||
'skipped': skipped_count
|
||||
})
|
||||
|
||||
# Small delay between batches to prevent overwhelming the system
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# Send completion message
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'cleaning',
|
||||
'total': total_models,
|
||||
'processed': processed,
|
||||
'success': success_count,
|
||||
'failures': failure_count,
|
||||
'skipped': skipped_count,
|
||||
'message': 'Cleaning up empty directories...'
|
||||
})
|
||||
|
||||
# Clean up empty directories after organizing
|
||||
from ..utils.utils import remove_empty_dirs
|
||||
cleanup_counts = {}
|
||||
for root in model_roots:
|
||||
removed = remove_empty_dirs(root)
|
||||
cleanup_counts[root] = removed
|
||||
|
||||
# Send cleanup completed message
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'completed',
|
||||
'total': total_models,
|
||||
'processed': processed,
|
||||
'success': success_count,
|
||||
'failures': failure_count,
|
||||
'skipped': skipped_count,
|
||||
'cleanup': cleanup_counts
|
||||
})
|
||||
|
||||
# Prepare response with limited details
|
||||
response_data = {
|
||||
'success': True,
|
||||
'message': f'Auto-organize completed: {success_count} moved, {skipped_count} skipped, {failure_count} failed out of {total_models} total',
|
||||
'summary': {
|
||||
'total': total_models,
|
||||
'success': success_count,
|
||||
'skipped': skipped_count,
|
||||
'failures': failure_count,
|
||||
'organization_type': 'flat' if is_flat_structure else 'structured',
|
||||
'cleaned_dirs': cleanup_counts
|
||||
}
|
||||
}
|
||||
|
||||
# Only include detailed results if under limit
|
||||
if len(results) <= 100:
|
||||
response_data['results'] = results
|
||||
else:
|
||||
response_data['results_truncated'] = True
|
||||
response_data['sample_results'] = results[:50] # Show first 50 as sample
|
||||
|
||||
return web.json_response(response_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in _perform_auto_organize: {e}", exc_info=True)
|
||||
|
||||
# Send error message via WebSocket
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
raise e
|
||||
|
||||
async def get_auto_organize_progress(self, request: web.Request) -> web.Response:
|
||||
"""Get current auto-organize progress for polling"""
|
||||
try:
|
||||
|
||||
@@ -4,6 +4,7 @@ from aiohttp import web
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.checkpoint_service import CheckpointService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -15,14 +16,12 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
"""Initialize Checkpoint routes with Checkpoint service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "checkpoints.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.service = CheckpointService(checkpoint_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
@@ -37,15 +36,20 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup Checkpoint-specific routes"""
|
||||
# Checkpoint-specific CivitAI integration
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_checkpoint)
|
||||
|
||||
# Checkpoint info by name
|
||||
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_checkpoint_info)
|
||||
app.router.add_get(f'/api/lm/{prefix}/info/{{name}}', self.get_checkpoint_info)
|
||||
|
||||
# Checkpoint roots and Unet roots
|
||||
app.router.add_get(f'/api/{prefix}/checkpoints_roots', self.get_checkpoints_roots)
|
||||
app.router.add_get(f'/api/{prefix}/unet_roots', self.get_unet_roots)
|
||||
app.router.add_get(f'/api/lm/{prefix}/checkpoints_roots', self.get_checkpoints_roots)
|
||||
app.router.add_get(f'/api/lm/{prefix}/unet_roots', self.get_unet_roots)
|
||||
|
||||
def _validate_civitai_model_type(self, model_type: str) -> bool:
|
||||
"""Validate CivitAI model type for Checkpoint"""
|
||||
return model_type.lower() == 'checkpoint'
|
||||
|
||||
def _get_expected_model_types(self) -> str:
|
||||
"""Get expected model types string for error messages"""
|
||||
return "Checkpoint"
|
||||
|
||||
async def get_checkpoint_info(self, request: web.Request) -> web.Response:
|
||||
"""Get detailed information for a specific checkpoint by name"""
|
||||
@@ -62,53 +66,6 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
logger.error(f"Error in get_checkpoint_info: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_civitai_versions_checkpoint(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai checkpoint model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be Checkpoint
|
||||
if model_type.lower() != 'checkpoint':
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
# If no primary file found, try to find any model file
|
||||
if not model_file:
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching checkpoint model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_checkpoints_roots(self, request: web.Request) -> web.Response:
|
||||
"""Return the list of checkpoint roots from config"""
|
||||
try:
|
||||
|
||||
@@ -4,6 +4,7 @@ from aiohttp import web
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.embedding_service import EmbeddingService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -14,14 +15,12 @@ class EmbeddingRoutes(BaseModelRoutes):
|
||||
"""Initialize Embedding routes with Embedding service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "embeddings.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
self.service = EmbeddingService(embedding_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
@@ -36,11 +35,16 @@ class EmbeddingRoutes(BaseModelRoutes):
|
||||
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup Embedding-specific routes"""
|
||||
# Embedding-specific CivitAI integration
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_embedding)
|
||||
|
||||
# Embedding info by name
|
||||
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_embedding_info)
|
||||
app.router.add_get(f'/api/lm/{prefix}/info/{{name}}', self.get_embedding_info)
|
||||
|
||||
def _validate_civitai_model_type(self, model_type: str) -> bool:
|
||||
"""Validate CivitAI model type for Embedding"""
|
||||
return model_type.lower() == 'textualinversion'
|
||||
|
||||
def _get_expected_model_types(self) -> str:
|
||||
"""Get expected model types string for error messages"""
|
||||
return "TextualInversion"
|
||||
|
||||
async def get_embedding_info(self, request: web.Request) -> web.Response:
|
||||
"""Get detailed information for a specific embedding by name"""
|
||||
@@ -56,50 +60,3 @@ class EmbeddingRoutes(BaseModelRoutes):
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_embedding_info: {e}", exc_info=True)
|
||||
return web.json_response({"error": str(e)}, status=500)
|
||||
|
||||
async def get_civitai_versions_embedding(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai embedding model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be TextualInversion (Embedding)
|
||||
if model_type.lower() not in ['textualinversion', 'embedding']:
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected TextualInversion/Embedding, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the primary model file (type="Model" and primary=true) in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model' and file.get('primary') == True), None)
|
||||
|
||||
# If no primary file found, try to find any model file
|
||||
if not model_file:
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching embedding model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
@@ -12,16 +12,16 @@ class ExampleImagesRoutes:
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register example images routes"""
|
||||
app.router.add_post('/api/download-example-images', ExampleImagesRoutes.download_example_images)
|
||||
app.router.add_post('/api/import-example-images', ExampleImagesRoutes.import_example_images)
|
||||
app.router.add_get('/api/example-images-status', ExampleImagesRoutes.get_example_images_status)
|
||||
app.router.add_post('/api/pause-example-images', ExampleImagesRoutes.pause_example_images)
|
||||
app.router.add_post('/api/resume-example-images', ExampleImagesRoutes.resume_example_images)
|
||||
app.router.add_post('/api/open-example-images-folder', ExampleImagesRoutes.open_example_images_folder)
|
||||
app.router.add_get('/api/example-image-files', ExampleImagesRoutes.get_example_image_files)
|
||||
app.router.add_get('/api/has-example-images', ExampleImagesRoutes.has_example_images)
|
||||
app.router.add_post('/api/delete-example-image', ExampleImagesRoutes.delete_example_image)
|
||||
app.router.add_post('/api/force-download-example-images', ExampleImagesRoutes.force_download_example_images)
|
||||
app.router.add_post('/api/lm/download-example-images', ExampleImagesRoutes.download_example_images)
|
||||
app.router.add_post('/api/lm/import-example-images', ExampleImagesRoutes.import_example_images)
|
||||
app.router.add_get('/api/lm/example-images-status', ExampleImagesRoutes.get_example_images_status)
|
||||
app.router.add_post('/api/lm/pause-example-images', ExampleImagesRoutes.pause_example_images)
|
||||
app.router.add_post('/api/lm/resume-example-images', ExampleImagesRoutes.resume_example_images)
|
||||
app.router.add_post('/api/lm/open-example-images-folder', ExampleImagesRoutes.open_example_images_folder)
|
||||
app.router.add_get('/api/lm/example-image-files', ExampleImagesRoutes.get_example_image_files)
|
||||
app.router.add_get('/api/lm/has-example-images', ExampleImagesRoutes.has_example_images)
|
||||
app.router.add_post('/api/lm/delete-example-image', ExampleImagesRoutes.delete_example_image)
|
||||
app.router.add_post('/api/lm/force-download-example-images', ExampleImagesRoutes.force_download_example_images)
|
||||
|
||||
@staticmethod
|
||||
async def download_example_images(request):
|
||||
|
||||
@@ -7,7 +7,7 @@ from server import PromptServer # type: ignore
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.lora_service import LoraService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
from ..utils.utils import get_lora_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -19,14 +19,12 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"""Initialize LoRA routes with LoRA service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "loras.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.service = LoraService(lora_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
@@ -42,17 +40,12 @@ class LoraRoutes(BaseModelRoutes):
|
||||
def setup_specific_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup LoRA-specific routes"""
|
||||
# LoRA-specific query routes
|
||||
app.router.add_get(f'/api/{prefix}/letter-counts', self.get_letter_counts)
|
||||
app.router.add_get(f'/api/{prefix}/get-trigger-words', self.get_lora_trigger_words)
|
||||
app.router.add_get(f'/api/{prefix}/usage-tips-by-path', self.get_lora_usage_tips_by_path)
|
||||
|
||||
# CivitAI integration with LoRA-specific validation
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_lora)
|
||||
app.router.add_get(f'/api/{prefix}/civitai/model/version/{{modelVersionId}}', self.get_civitai_model_by_version)
|
||||
app.router.add_get(f'/api/{prefix}/civitai/model/hash/{{hash}}', self.get_civitai_model_by_hash)
|
||||
app.router.add_get(f'/api/lm/{prefix}/letter-counts', self.get_letter_counts)
|
||||
app.router.add_get(f'/api/lm/{prefix}/get-trigger-words', self.get_lora_trigger_words)
|
||||
app.router.add_get(f'/api/lm/{prefix}/usage-tips-by-path', self.get_lora_usage_tips_by_path)
|
||||
|
||||
# ComfyUI integration
|
||||
app.router.add_post(f'/api/{prefix}/get_trigger_words', self.get_trigger_words)
|
||||
app.router.add_post(f'/api/lm/{prefix}/get_trigger_words', self.get_trigger_words)
|
||||
|
||||
def _parse_specific_params(self, request: web.Request) -> Dict:
|
||||
"""Parse LoRA-specific parameters"""
|
||||
@@ -78,6 +71,15 @@ class LoraRoutes(BaseModelRoutes):
|
||||
|
||||
return params
|
||||
|
||||
def _validate_civitai_model_type(self, model_type: str) -> bool:
|
||||
"""Validate CivitAI model type for LoRA"""
|
||||
from ..utils.constants import VALID_LORA_TYPES
|
||||
return model_type.lower() in VALID_LORA_TYPES
|
||||
|
||||
def _get_expected_model_types(self) -> str:
|
||||
"""Get expected model types string for error messages"""
|
||||
return "LORA, LoCon, or DORA"
|
||||
|
||||
# LoRA-specific route handlers
|
||||
async def get_letter_counts(self, request: web.Request) -> web.Response:
|
||||
"""Get count of LoRAs for each letter of the alphabet"""
|
||||
@@ -212,91 +214,6 @@ class LoraRoutes(BaseModelRoutes):
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
# CivitAI integration methods
|
||||
async def get_civitai_versions_lora(self, request: web.Request) -> web.Response:
|
||||
"""Get available versions for a Civitai LoRA model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_type = response.get('type', '')
|
||||
|
||||
# Check model type - should be LORA, LoCon, or DORA
|
||||
from ..utils.constants import VALID_LORA_TYPES
|
||||
if model_type.lower() not in VALID_LORA_TYPES:
|
||||
return web.json_response({
|
||||
'error': f"Model type mismatch. Expected LORA or LoCon, got {model_type}"
|
||||
}, status=400)
|
||||
|
||||
# Check local availability for each version
|
||||
for version in versions:
|
||||
# Find the model file (type="Model") in the files list
|
||||
model_file = next((file for file in version.get('files', [])
|
||||
if file.get('type') == 'Model'), None)
|
||||
|
||||
if model_file:
|
||||
sha256 = model_file.get('hashes', {}).get('SHA256')
|
||||
if sha256:
|
||||
# Set existsLocally and localPath at the version level
|
||||
version['existsLocally'] = self.service.has_hash(sha256)
|
||||
if version['existsLocally']:
|
||||
version['localPath'] = self.service.get_path_by_hash(sha256)
|
||||
|
||||
# Also set the model file size at the version level for easier access
|
||||
version['modelSizeKB'] = model_file.get('sizeKB')
|
||||
else:
|
||||
# No model file found in this version
|
||||
version['existsLocally'] = False
|
||||
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching LoRA model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by model version ID"""
|
||||
try:
|
||||
model_version_id = request.match_info.get('modelVersionId')
|
||||
|
||||
# Get model details from Civitai API
|
||||
model, error_msg = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
if not model:
|
||||
# Log warning for failed model retrieval
|
||||
logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
|
||||
|
||||
# Determine status code based on error message
|
||||
status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
|
||||
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": error_msg or "Failed to fetch model information"
|
||||
}, status=status_code)
|
||||
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
|
||||
"""Get CivitAI model details by hash"""
|
||||
try:
|
||||
hash = request.match_info.get('hash')
|
||||
model = await self.civitai_client.get_model_by_hash(hash)
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details by hash: {e}")
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_trigger_words(self, request: web.Request) -> web.Response:
|
||||
"""Get trigger words for specified LoRA models"""
|
||||
try:
|
||||
|
||||
@@ -3,6 +3,8 @@ import os
|
||||
import sys
|
||||
import threading
|
||||
import asyncio
|
||||
import subprocess
|
||||
import re
|
||||
from server import PromptServer # type: ignore
|
||||
from aiohttp import web
|
||||
from ..services.settings_manager import settings
|
||||
@@ -11,8 +13,9 @@ from ..utils.lora_metadata import extract_trained_words
|
||||
from ..config import config
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS, NODE_TYPES, DEFAULT_NODE_COLOR
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
import re
|
||||
|
||||
from ..services.metadata_service import get_metadata_archive_manager, update_metadata_providers, get_metadata_provider
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.downloader import get_downloader
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
@@ -83,76 +86,131 @@ node_registry = NodeRegistry()
|
||||
class MiscRoutes:
|
||||
"""Miscellaneous routes for various utility functions"""
|
||||
|
||||
@staticmethod
|
||||
def is_dedicated_example_images_folder(folder_path):
|
||||
"""
|
||||
Check if a folder is a dedicated example images folder.
|
||||
|
||||
A dedicated folder should either be:
|
||||
1. Empty
|
||||
2. Only contain .download_progress.json file and/or folders with valid SHA256 hash names (64 hex characters)
|
||||
|
||||
Args:
|
||||
folder_path (str): Path to the folder to check
|
||||
|
||||
Returns:
|
||||
bool: True if the folder is dedicated, False otherwise
|
||||
"""
|
||||
try:
|
||||
if not os.path.exists(folder_path) or not os.path.isdir(folder_path):
|
||||
return False
|
||||
|
||||
items = os.listdir(folder_path)
|
||||
|
||||
# Empty folder is considered dedicated
|
||||
if not items:
|
||||
return True
|
||||
|
||||
# Check each item in the folder
|
||||
for item in items:
|
||||
item_path = os.path.join(folder_path, item)
|
||||
|
||||
# Allow .download_progress.json file
|
||||
if item == '.download_progress.json' and os.path.isfile(item_path):
|
||||
continue
|
||||
|
||||
# Allow folders with valid SHA256 hash names (64 hex characters)
|
||||
if os.path.isdir(item_path):
|
||||
# Check if the folder name is a valid SHA256 hash
|
||||
if re.match(r'^[a-fA-F0-9]{64}$', item):
|
||||
continue
|
||||
|
||||
# If we encounter anything else, it's not a dedicated folder
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking if folder is dedicated: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register miscellaneous routes"""
|
||||
app.router.add_post('/api/settings', MiscRoutes.update_settings)
|
||||
|
||||
# Add new route for clearing cache
|
||||
app.router.add_post('/api/clear-cache', MiscRoutes.clear_cache)
|
||||
app.router.add_get('/api/lm/settings', MiscRoutes.get_settings)
|
||||
app.router.add_post('/api/lm/settings', MiscRoutes.update_settings)
|
||||
|
||||
app.router.add_get('/api/health-check', lambda request: web.json_response({'status': 'ok'}))
|
||||
app.router.add_get('/api/lm/health-check', lambda request: web.json_response({'status': 'ok'}))
|
||||
|
||||
app.router.add_post('/api/lm/open-file-location', MiscRoutes.open_file_location)
|
||||
|
||||
# Usage stats routes
|
||||
app.router.add_post('/api/update-usage-stats', MiscRoutes.update_usage_stats)
|
||||
app.router.add_get('/api/get-usage-stats', MiscRoutes.get_usage_stats)
|
||||
app.router.add_post('/api/lm/update-usage-stats', MiscRoutes.update_usage_stats)
|
||||
app.router.add_get('/api/lm/get-usage-stats', MiscRoutes.get_usage_stats)
|
||||
|
||||
# Lora code update endpoint
|
||||
app.router.add_post('/api/update-lora-code', MiscRoutes.update_lora_code)
|
||||
app.router.add_post('/api/lm/update-lora-code', MiscRoutes.update_lora_code)
|
||||
|
||||
# Add new route for getting trained words
|
||||
app.router.add_get('/api/trained-words', MiscRoutes.get_trained_words)
|
||||
app.router.add_get('/api/lm/trained-words', MiscRoutes.get_trained_words)
|
||||
|
||||
# Add new route for getting model example files
|
||||
app.router.add_get('/api/model-example-files', MiscRoutes.get_model_example_files)
|
||||
app.router.add_get('/api/lm/model-example-files', MiscRoutes.get_model_example_files)
|
||||
|
||||
# Node registry endpoints
|
||||
app.router.add_post('/api/register-nodes', MiscRoutes.register_nodes)
|
||||
app.router.add_get('/api/get-registry', MiscRoutes.get_registry)
|
||||
app.router.add_post('/api/lm/register-nodes', MiscRoutes.register_nodes)
|
||||
app.router.add_get('/api/lm/get-registry', MiscRoutes.get_registry)
|
||||
|
||||
# Add new route for checking if a model exists in the library
|
||||
app.router.add_get('/api/check-model-exists', MiscRoutes.check_model_exists)
|
||||
app.router.add_get('/api/lm/check-model-exists', MiscRoutes.check_model_exists)
|
||||
|
||||
# Add routes for metadata archive database management
|
||||
app.router.add_post('/api/lm/download-metadata-archive', MiscRoutes.download_metadata_archive)
|
||||
app.router.add_post('/api/lm/remove-metadata-archive', MiscRoutes.remove_metadata_archive)
|
||||
app.router.add_get('/api/lm/metadata-archive-status', MiscRoutes.get_metadata_archive_status)
|
||||
|
||||
# Add route for checking model versions in library
|
||||
app.router.add_get('/api/lm/model-versions-status', MiscRoutes.get_model_versions_status)
|
||||
|
||||
@staticmethod
|
||||
async def clear_cache(request):
|
||||
"""Clear all cache files from the cache folder"""
|
||||
async def get_settings(request):
|
||||
"""Get application settings that should be synced to frontend"""
|
||||
try:
|
||||
# Get the cache folder path (relative to project directory)
|
||||
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
cache_folder = os.path.join(project_dir, 'cache')
|
||||
# Define keys that should be synced from backend to frontend
|
||||
sync_keys = [
|
||||
'civitai_api_key',
|
||||
'default_lora_root',
|
||||
'default_checkpoint_root',
|
||||
'default_embedding_root',
|
||||
'base_model_path_mappings',
|
||||
'download_path_templates',
|
||||
'enable_metadata_archive_db',
|
||||
'language',
|
||||
'proxy_enabled',
|
||||
'proxy_type',
|
||||
'proxy_host',
|
||||
'proxy_port',
|
||||
'proxy_username',
|
||||
'proxy_password',
|
||||
'example_images_path',
|
||||
'optimizeExampleImages',
|
||||
'autoDownloadExampleImages'
|
||||
]
|
||||
|
||||
# Check if cache folder exists
|
||||
if not os.path.exists(cache_folder):
|
||||
logger.info("Cache folder does not exist, nothing to clear")
|
||||
return web.json_response({'success': True, 'message': 'No cache folder found'})
|
||||
|
||||
# Get list of cache files before deleting for reporting
|
||||
cache_files = [f for f in os.listdir(cache_folder) if os.path.isfile(os.path.join(cache_folder, f))]
|
||||
deleted_files = []
|
||||
|
||||
# Delete each .msgpack file in the cache folder
|
||||
for filename in cache_files:
|
||||
if filename.endswith('.msgpack'):
|
||||
file_path = os.path.join(cache_folder, filename)
|
||||
try:
|
||||
os.remove(file_path)
|
||||
deleted_files.append(filename)
|
||||
logger.info(f"Deleted cache file: {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete {filename}: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Failed to delete {filename}: {str(e)}"
|
||||
}, status=500)
|
||||
# Build response with only the keys that should be synced
|
||||
response_data = {}
|
||||
for key in sync_keys:
|
||||
value = settings.get(key)
|
||||
if value is not None:
|
||||
response_data[key] = value
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f"Successfully cleared {len(deleted_files)} cache files",
|
||||
'deleted_files': deleted_files
|
||||
'settings': response_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing cache files: {e}", exc_info=True)
|
||||
logger.error(f"Error getting settings: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
@@ -163,13 +221,15 @@ class MiscRoutes:
|
||||
"""Update application settings"""
|
||||
try:
|
||||
data = await request.json()
|
||||
proxy_keys = {'proxy_enabled', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password', 'proxy_type'}
|
||||
proxy_changed = False
|
||||
|
||||
# Validate and update settings
|
||||
for key, value in data.items():
|
||||
if value == settings.get(key):
|
||||
# No change, skip
|
||||
continue
|
||||
# Special handling for example_images_path - verify path exists
|
||||
# Special handling for example_images_path - verify path exists and is dedicated
|
||||
if key == 'example_images_path' and value:
|
||||
if not os.path.exists(value):
|
||||
return web.json_response({
|
||||
@@ -177,14 +237,35 @@ class MiscRoutes:
|
||||
'error': f"Path does not exist: {value}"
|
||||
})
|
||||
|
||||
# Check if folder is dedicated for example images
|
||||
if not MiscRoutes.is_dedicated_example_images_folder(value):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': "Please set a dedicated folder for example images."
|
||||
})
|
||||
|
||||
# Path changed - server restart required for new path to take effect
|
||||
old_path = settings.get('example_images_path')
|
||||
if old_path != value:
|
||||
logger.info(f"Example images path changed to {value} - server restart required")
|
||||
|
||||
# Save to settings
|
||||
settings.set(key, value)
|
||||
|
||||
# Handle deletion for proxy credentials
|
||||
if value == '__DELETE__' and key in ('proxy_username', 'proxy_password'):
|
||||
settings.delete(key)
|
||||
else:
|
||||
# Save to settings
|
||||
settings.set(key, value)
|
||||
|
||||
if key == 'enable_metadata_archive_db':
|
||||
await update_metadata_providers()
|
||||
|
||||
if key in proxy_keys:
|
||||
proxy_changed = True
|
||||
|
||||
if proxy_changed:
|
||||
downloader = await get_downloader()
|
||||
await downloader.refresh_session()
|
||||
|
||||
return web.json_response({'success': True})
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating settings: {e}", exc_info=True)
|
||||
@@ -697,3 +778,274 @@ class MiscRoutes:
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def download_metadata_archive(request):
|
||||
"""Download and extract the metadata archive database"""
|
||||
try:
|
||||
archive_manager = await get_metadata_archive_manager()
|
||||
|
||||
# Get the download_id from query parameters if provided
|
||||
download_id = request.query.get('download_id')
|
||||
|
||||
# Progress callback to send updates via WebSocket
|
||||
def progress_callback(stage, message):
|
||||
data = {
|
||||
'stage': stage,
|
||||
'message': message,
|
||||
'type': 'metadata_archive_download'
|
||||
}
|
||||
|
||||
if download_id:
|
||||
# Send to specific download WebSocket if download_id is provided
|
||||
asyncio.create_task(ws_manager.broadcast_download_progress(download_id, data))
|
||||
else:
|
||||
# Fallback to general broadcast
|
||||
asyncio.create_task(ws_manager.broadcast(data))
|
||||
|
||||
# Download and extract in background
|
||||
success = await archive_manager.download_and_extract_database(progress_callback)
|
||||
|
||||
if success:
|
||||
# Update settings to enable metadata archive
|
||||
settings.set('enable_metadata_archive_db', True)
|
||||
|
||||
# Update metadata providers
|
||||
await update_metadata_providers()
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Metadata archive database downloaded and extracted successfully'
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Failed to download and extract metadata archive database'
|
||||
}, status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading metadata archive: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def remove_metadata_archive(request):
|
||||
"""Remove the metadata archive database"""
|
||||
try:
|
||||
archive_manager = await get_metadata_archive_manager()
|
||||
|
||||
success = await archive_manager.remove_database()
|
||||
|
||||
if success:
|
||||
# Update settings to disable metadata archive
|
||||
settings.set('enable_metadata_archive_db', False)
|
||||
|
||||
# Update metadata providers
|
||||
await update_metadata_providers()
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Metadata archive database removed successfully'
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Failed to remove metadata archive database'
|
||||
}, status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing metadata archive: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_metadata_archive_status(request):
|
||||
"""Get the status of metadata archive database"""
|
||||
try:
|
||||
archive_manager = await get_metadata_archive_manager()
|
||||
|
||||
is_available = archive_manager.is_database_available()
|
||||
is_enabled = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
db_size = 0
|
||||
if is_available:
|
||||
db_path = archive_manager.get_database_path()
|
||||
if db_path and os.path.exists(db_path):
|
||||
db_size = os.path.getsize(db_path)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'isAvailable': is_available,
|
||||
'isEnabled': is_enabled,
|
||||
'databaseSize': db_size,
|
||||
'databasePath': archive_manager.get_database_path() if is_available else None
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting metadata archive status: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_model_versions_status(request):
|
||||
"""
|
||||
Get all versions of a model from metadata provider and check their library status
|
||||
|
||||
Expects query parameters:
|
||||
- modelId: int - Civitai model ID (required)
|
||||
|
||||
Returns:
|
||||
- JSON with model type and versions list, each version includes 'inLibrary' flag
|
||||
"""
|
||||
try:
|
||||
# Get the modelId from query parameters
|
||||
model_id_str = request.query.get('modelId')
|
||||
|
||||
# Validate modelId parameter (required)
|
||||
if not model_id_str:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing required parameter: modelId'
|
||||
}, status=400)
|
||||
|
||||
try:
|
||||
# Convert modelId to integer
|
||||
model_id = int(model_id_str)
|
||||
except ValueError:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Parameter modelId must be an integer'
|
||||
}, status=400)
|
||||
|
||||
# Get metadata provider
|
||||
metadata_provider = await get_metadata_provider()
|
||||
if not metadata_provider:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Metadata provider not available'
|
||||
}, status=503)
|
||||
|
||||
# Get model versions from metadata provider
|
||||
response = await metadata_provider.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Model not found'
|
||||
}, status=404)
|
||||
|
||||
versions = response.get('modelVersions', [])
|
||||
model_name = response.get('name', '')
|
||||
model_type = response.get('type', '').lower()
|
||||
|
||||
# Determine scanner based on model type
|
||||
scanner = None
|
||||
normalized_type = None
|
||||
|
||||
if model_type in ['lora', 'locon', 'dora']:
|
||||
scanner = await ServiceRegistry.get_lora_scanner()
|
||||
normalized_type = 'lora'
|
||||
elif model_type == 'checkpoint':
|
||||
scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
normalized_type = 'checkpoint'
|
||||
elif model_type == 'textualinversion':
|
||||
scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
normalized_type = 'embedding'
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'Model type "{model_type}" is not supported'
|
||||
}, status=400)
|
||||
|
||||
if not scanner:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'Scanner for type "{normalized_type}" is not available'
|
||||
}, status=503)
|
||||
|
||||
# Get local versions from scanner
|
||||
local_versions = await scanner.get_model_versions_by_id(model_id)
|
||||
local_version_ids = set(version['versionId'] for version in local_versions)
|
||||
|
||||
# Add inLibrary flag to each version
|
||||
enriched_versions = []
|
||||
for version in versions:
|
||||
version_id = version.get('id')
|
||||
enriched_version = {
|
||||
'id': version_id,
|
||||
'name': version.get('name', ''),
|
||||
'thumbnailUrl': version.get('images')[0]['url'] if version.get('images') else None,
|
||||
'inLibrary': version_id in local_version_ids
|
||||
}
|
||||
enriched_versions.append(enriched_version)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'modelId': model_id,
|
||||
'modelName': model_name,
|
||||
'modelType': model_type,
|
||||
'versions': enriched_versions
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get model versions status: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def open_file_location(request):
|
||||
"""
|
||||
Open the folder containing the specified file and select the file in the file explorer.
|
||||
|
||||
Expects a JSON request body with:
|
||||
{
|
||||
"file_path": "absolute/path/to/file"
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
|
||||
if not file_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing file_path parameter'
|
||||
}, status=400)
|
||||
|
||||
file_path = os.path.abspath(file_path)
|
||||
|
||||
if not os.path.isfile(file_path):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'File does not exist'
|
||||
}, status=404)
|
||||
|
||||
# Open the folder and select the file
|
||||
if os.name == 'nt': # Windows
|
||||
# explorer /select,"C:\path\to\file"
|
||||
subprocess.Popen(['explorer', '/select,', file_path])
|
||||
elif os.name == 'posix':
|
||||
if sys.platform == 'darwin': # macOS
|
||||
subprocess.Popen(['open', '-R', file_path])
|
||||
else: # Linux (selecting file is not standard, just open folder)
|
||||
folder = os.path.dirname(file_path)
|
||||
subprocess.Popen(['xdg-open', folder])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Opened folder and selected file: {file_path}'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to open file location: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@@ -24,6 +24,7 @@ from ..config import config
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
from ..services.service_registry import ServiceRegistry # Add ServiceRegistry import
|
||||
from ..services.downloader import get_downloader
|
||||
|
||||
# Only import MetadataRegistry in non-standalone mode
|
||||
if not standalone_mode:
|
||||
@@ -60,46 +61,46 @@ class RecipeRoutes:
|
||||
routes = cls()
|
||||
app.router.add_get('/loras/recipes', routes.handle_recipes_page)
|
||||
|
||||
app.router.add_get('/api/recipes', routes.get_recipes)
|
||||
app.router.add_get('/api/recipe/{recipe_id}', routes.get_recipe_detail)
|
||||
app.router.add_post('/api/recipes/analyze-image', routes.analyze_recipe_image)
|
||||
app.router.add_post('/api/recipes/analyze-local-image', routes.analyze_local_image)
|
||||
app.router.add_post('/api/recipes/save', routes.save_recipe)
|
||||
app.router.add_delete('/api/recipe/{recipe_id}', routes.delete_recipe)
|
||||
app.router.add_get('/api/lm/recipes', routes.get_recipes)
|
||||
app.router.add_get('/api/lm/recipe/{recipe_id}', routes.get_recipe_detail)
|
||||
app.router.add_post('/api/lm/recipes/analyze-image', routes.analyze_recipe_image)
|
||||
app.router.add_post('/api/lm/recipes/analyze-local-image', routes.analyze_local_image)
|
||||
app.router.add_post('/api/lm/recipes/save', routes.save_recipe)
|
||||
app.router.add_delete('/api/lm/recipe/{recipe_id}', routes.delete_recipe)
|
||||
|
||||
# Add new filter-related endpoints
|
||||
app.router.add_get('/api/recipes/top-tags', routes.get_top_tags)
|
||||
app.router.add_get('/api/recipes/base-models', routes.get_base_models)
|
||||
app.router.add_get('/api/lm/recipes/top-tags', routes.get_top_tags)
|
||||
app.router.add_get('/api/lm/recipes/base-models', routes.get_base_models)
|
||||
|
||||
# Add new sharing endpoints
|
||||
app.router.add_get('/api/recipe/{recipe_id}/share', routes.share_recipe)
|
||||
app.router.add_get('/api/recipe/{recipe_id}/share/download', routes.download_shared_recipe)
|
||||
app.router.add_get('/api/lm/recipe/{recipe_id}/share', routes.share_recipe)
|
||||
app.router.add_get('/api/lm/recipe/{recipe_id}/share/download', routes.download_shared_recipe)
|
||||
|
||||
# Add new endpoint for getting recipe syntax
|
||||
app.router.add_get('/api/recipe/{recipe_id}/syntax', routes.get_recipe_syntax)
|
||||
app.router.add_get('/api/lm/recipe/{recipe_id}/syntax', routes.get_recipe_syntax)
|
||||
|
||||
# Add new endpoint for updating recipe metadata (name, tags and source_path)
|
||||
app.router.add_put('/api/recipe/{recipe_id}/update', routes.update_recipe)
|
||||
app.router.add_put('/api/lm/recipe/{recipe_id}/update', routes.update_recipe)
|
||||
|
||||
# Add new endpoint for reconnecting deleted LoRAs
|
||||
app.router.add_post('/api/recipe/lora/reconnect', routes.reconnect_lora)
|
||||
app.router.add_post('/api/lm/recipe/lora/reconnect', routes.reconnect_lora)
|
||||
|
||||
# Add new endpoint for finding duplicate recipes
|
||||
app.router.add_get('/api/recipes/find-duplicates', routes.find_duplicates)
|
||||
app.router.add_get('/api/lm/recipes/find-duplicates', routes.find_duplicates)
|
||||
|
||||
# Add new endpoint for bulk deletion of recipes
|
||||
app.router.add_post('/api/recipes/bulk-delete', routes.bulk_delete)
|
||||
app.router.add_post('/api/lm/recipes/bulk-delete', routes.bulk_delete)
|
||||
|
||||
# Start cache initialization
|
||||
app.on_startup.append(routes._init_cache)
|
||||
|
||||
app.router.add_post('/api/recipes/save-from-widget', routes.save_recipe_from_widget)
|
||||
app.router.add_post('/api/lm/recipes/save-from-widget', routes.save_recipe_from_widget)
|
||||
|
||||
# Add route to get recipes for a specific Lora
|
||||
app.router.add_get('/api/recipes/for-lora', routes.get_recipes_for_lora)
|
||||
app.router.add_get('/api/lm/recipes/for-lora', routes.get_recipes_for_lora)
|
||||
|
||||
# Add new endpoint for scanning and rebuilding the recipe cache
|
||||
app.router.add_get('/api/recipes/scan', routes.scan_recipes)
|
||||
app.router.add_get('/api/lm/recipes/scan', routes.scan_recipes)
|
||||
|
||||
async def _init_cache(self, app):
|
||||
"""Initialize cache on startup"""
|
||||
@@ -372,21 +373,23 @@ class RecipeRoutes:
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
# Download image directly from URL
|
||||
session = await self.civitai_client.session
|
||||
# Download image using unified downloader
|
||||
downloader = await get_downloader()
|
||||
# Create a temporary file to save the downloaded image
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
async with session.get(image_url) as response:
|
||||
if response.status != 200:
|
||||
return web.json_response({
|
||||
"error": f"Failed to download image from URL: HTTP {response.status}",
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
with open(temp_path, 'wb') as f:
|
||||
f.write(await response.read())
|
||||
success, result = await downloader.download_file(
|
||||
image_url,
|
||||
temp_path,
|
||||
use_auth=False # Image downloads typically don't need auth
|
||||
)
|
||||
|
||||
if not success:
|
||||
return web.json_response({
|
||||
"error": f"Failed to download image from URL: {result}",
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
# Use meta field from image_info as metadata
|
||||
if 'meta' in image_info:
|
||||
@@ -430,8 +433,7 @@ class RecipeRoutes:
|
||||
# Parse the metadata
|
||||
result = await parser.parse_metadata(
|
||||
metadata,
|
||||
recipe_scanner=self.recipe_scanner,
|
||||
civitai_client=self.civitai_client
|
||||
recipe_scanner=self.recipe_scanner
|
||||
)
|
||||
|
||||
# For URL mode, include the image data as base64
|
||||
@@ -532,8 +534,7 @@ class RecipeRoutes:
|
||||
# Parse the metadata
|
||||
result = await parser.parse_metadata(
|
||||
metadata,
|
||||
recipe_scanner=self.recipe_scanner,
|
||||
civitai_client=self.civitai_client
|
||||
recipe_scanner=self.recipe_scanner
|
||||
)
|
||||
|
||||
# Add base64 image data to result
|
||||
|
||||
@@ -33,7 +33,13 @@ class StatsRoutes:
|
||||
self.lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
self.usage_stats = UsageStats()
|
||||
|
||||
# Only initialize usage stats if we have valid paths configured
|
||||
try:
|
||||
self.usage_stats = UsageStats()
|
||||
except RuntimeError as e:
|
||||
logger.warning(f"Could not initialize usage statistics: {e}")
|
||||
self.usage_stats = None
|
||||
|
||||
async def handle_stats_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /statistics request"""
|
||||
@@ -501,12 +507,12 @@ class StatsRoutes:
|
||||
app.router.add_get('/statistics', self.handle_stats_page)
|
||||
|
||||
# Register API routes
|
||||
app.router.add_get('/api/stats/collection-overview', self.get_collection_overview)
|
||||
app.router.add_get('/api/stats/usage-analytics', self.get_usage_analytics)
|
||||
app.router.add_get('/api/stats/base-model-distribution', self.get_base_model_distribution)
|
||||
app.router.add_get('/api/stats/tag-analytics', self.get_tag_analytics)
|
||||
app.router.add_get('/api/stats/storage-analytics', self.get_storage_analytics)
|
||||
app.router.add_get('/api/stats/insights', self.get_insights)
|
||||
app.router.add_get('/api/lm/stats/collection-overview', self.get_collection_overview)
|
||||
app.router.add_get('/api/lm/stats/usage-analytics', self.get_usage_analytics)
|
||||
app.router.add_get('/api/lm/stats/base-model-distribution', self.get_base_model_distribution)
|
||||
app.router.add_get('/api/lm/stats/tag-analytics', self.get_tag_analytics)
|
||||
app.router.add_get('/api/lm/stats/storage-analytics', self.get_storage_analytics)
|
||||
app.router.add_get('/api/lm/stats/insights', self.get_insights)
|
||||
|
||||
async def _on_startup(self, app):
|
||||
"""Initialize services when the app starts"""
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import os
|
||||
import aiohttp
|
||||
import logging
|
||||
import toml
|
||||
import git
|
||||
@@ -8,7 +7,7 @@ import shutil
|
||||
import tempfile
|
||||
from aiohttp import web
|
||||
from typing import Dict, List
|
||||
|
||||
from ..services.downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,9 +17,9 @@ class UpdateRoutes:
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register update check routes"""
|
||||
app.router.add_get('/api/check-updates', UpdateRoutes.check_updates)
|
||||
app.router.add_get('/api/version-info', UpdateRoutes.get_version_info)
|
||||
app.router.add_post('/api/perform-update', UpdateRoutes.perform_update)
|
||||
app.router.add_get('/api/lm/check-updates', UpdateRoutes.check_updates)
|
||||
app.router.add_get('/api/lm/version-info', UpdateRoutes.get_version_info)
|
||||
app.router.add_post('/api/lm/perform-update', UpdateRoutes.perform_update)
|
||||
|
||||
@staticmethod
|
||||
async def check_updates(request):
|
||||
@@ -155,51 +154,66 @@ class UpdateRoutes:
|
||||
async def _download_and_replace_zip(plugin_root: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Download latest release ZIP from GitHub and replace plugin files.
|
||||
Skips settings.json. Writes extracted file list to .tracking.
|
||||
Skips settings.json and civitai folder. Writes extracted file list to .tracking.
|
||||
"""
|
||||
repo_owner = "willmiao"
|
||||
repo_name = "ComfyUI-Lora-Manager"
|
||||
github_api = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_api) as resp:
|
||||
if resp.status != 200:
|
||||
logger.error(f"Failed to fetch release info: {resp.status}")
|
||||
return False, ""
|
||||
data = await resp.json()
|
||||
zip_url = data.get("zipball_url")
|
||||
version = data.get("tag_name", "unknown")
|
||||
downloader = await get_downloader()
|
||||
|
||||
# Get release info
|
||||
success, data = await downloader.make_request(
|
||||
'GET',
|
||||
github_api,
|
||||
use_auth=False
|
||||
)
|
||||
if not success:
|
||||
logger.error(f"Failed to fetch release info: {data}")
|
||||
return False, ""
|
||||
|
||||
zip_url = data.get("zipball_url")
|
||||
version = data.get("tag_name", "unknown")
|
||||
|
||||
# Download ZIP
|
||||
async with session.get(zip_url) as zip_resp:
|
||||
if zip_resp.status != 200:
|
||||
logger.error(f"Failed to download ZIP: {zip_resp.status}")
|
||||
return False, ""
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
|
||||
tmp_zip.write(await zip_resp.read())
|
||||
zip_path = tmp_zip.name
|
||||
# Download ZIP to temporary file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
|
||||
tmp_zip_path = tmp_zip.name
|
||||
|
||||
success, result = await downloader.download_file(
|
||||
url=zip_url,
|
||||
save_path=tmp_zip_path,
|
||||
use_auth=False,
|
||||
allow_resume=False
|
||||
)
|
||||
|
||||
if not success:
|
||||
logger.error(f"Failed to download ZIP: {result}")
|
||||
return False, ""
|
||||
|
||||
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json'])
|
||||
zip_path = tmp_zip_path
|
||||
|
||||
# Extract ZIP to temp dir
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
# Skip both settings.json and civitai folder
|
||||
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json', 'civitai'])
|
||||
|
||||
# Extract ZIP to temp dir
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(tmp_dir)
|
||||
# Find extracted folder (GitHub ZIP contains a root folder)
|
||||
extracted_root = next(os.scandir(tmp_dir)).path
|
||||
|
||||
# Copy files, skipping settings.json
|
||||
# Copy files, skipping settings.json and civitai folder
|
||||
for item in os.listdir(extracted_root):
|
||||
if item == 'settings.json' or item == 'civitai':
|
||||
continue
|
||||
src = os.path.join(extracted_root, item)
|
||||
dst = os.path.join(plugin_root, item)
|
||||
if os.path.isdir(src):
|
||||
if os.path.exists(dst):
|
||||
shutil.rmtree(dst)
|
||||
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json'))
|
||||
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json', 'civitai'))
|
||||
else:
|
||||
if item == 'settings.json':
|
||||
continue
|
||||
shutil.copy2(src, dst)
|
||||
|
||||
# Write .tracking file: list all files under extracted_root, relative to extracted_root
|
||||
@@ -207,15 +221,22 @@ class UpdateRoutes:
|
||||
tracking_info_file = os.path.join(plugin_root, '.tracking')
|
||||
tracking_files = []
|
||||
for root, dirs, files in os.walk(extracted_root):
|
||||
# Skip civitai folder and its contents
|
||||
rel_root = os.path.relpath(root, extracted_root)
|
||||
if rel_root == 'civitai' or rel_root.startswith('civitai' + os.sep):
|
||||
continue
|
||||
for file in files:
|
||||
rel_path = os.path.relpath(os.path.join(root, file), extracted_root)
|
||||
# Skip settings.json and any file under civitai
|
||||
if rel_path == 'settings.json' or rel_path.startswith('civitai' + os.sep):
|
||||
continue
|
||||
tracking_files.append(rel_path.replace("\\", "/"))
|
||||
with open(tracking_info_file, "w", encoding='utf-8') as file:
|
||||
file.write('\n'.join(tracking_files))
|
||||
|
||||
os.remove(zip_path)
|
||||
logger.info(f"Updated plugin via ZIP to {version}")
|
||||
return True, version
|
||||
os.remove(zip_path)
|
||||
logger.info(f"Updated plugin via ZIP to {version}")
|
||||
return True, version
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"ZIP update failed: {e}", exc_info=True)
|
||||
@@ -244,23 +265,23 @@ class UpdateRoutes:
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/commits/main"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
|
||||
if response.status != 200:
|
||||
logger.warning(f"Failed to fetch GitHub commit: {response.status}")
|
||||
return "main", []
|
||||
|
||||
data = await response.json()
|
||||
commit_sha = data.get('sha', '')[:7] # Short hash
|
||||
commit_message = data.get('commit', {}).get('message', '')
|
||||
|
||||
# Format as "main-{short_hash}"
|
||||
version = f"main-{commit_sha}"
|
||||
|
||||
# Use commit message as changelog
|
||||
changelog = [commit_message] if commit_message else []
|
||||
|
||||
return version, changelog
|
||||
downloader = await get_downloader()
|
||||
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
|
||||
|
||||
if not success:
|
||||
logger.warning(f"Failed to fetch GitHub commit: {data}")
|
||||
return "main", []
|
||||
|
||||
commit_sha = data.get('sha', '')[:7] # Short hash
|
||||
commit_message = data.get('commit', {}).get('message', '')
|
||||
|
||||
# Format as "main-{short_hash}"
|
||||
version = f"main-{commit_sha}"
|
||||
|
||||
# Use commit message as changelog
|
||||
changelog = [commit_message] if commit_message else []
|
||||
|
||||
return version, changelog
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching nightly version: {e}", exc_info=True)
|
||||
@@ -410,22 +431,22 @@ class UpdateRoutes:
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
|
||||
if response.status != 200:
|
||||
logger.warning(f"Failed to fetch GitHub release: {response.status}")
|
||||
return "v0.0.0", []
|
||||
|
||||
data = await response.json()
|
||||
version = data.get('tag_name', '')
|
||||
if not version.startswith('v'):
|
||||
version = f"v{version}"
|
||||
|
||||
# Extract changelog from release notes
|
||||
body = data.get('body', '')
|
||||
changelog = UpdateRoutes._parse_changelog(body)
|
||||
|
||||
return version, changelog
|
||||
downloader = await get_downloader()
|
||||
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
|
||||
|
||||
if not success:
|
||||
logger.warning(f"Failed to fetch GitHub release: {data}")
|
||||
return "v0.0.0", []
|
||||
|
||||
version = data.get('tag_name', '')
|
||||
if not version.startswith('v'):
|
||||
version = f"v{version}"
|
||||
|
||||
# Extract changelog from release notes
|
||||
body = data.get('body', '')
|
||||
changelog = UpdateRoutes._parse_changelog(body)
|
||||
|
||||
return version, changelog
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching remote version: {e}", exc_info=True)
|
||||
|
||||
@@ -363,7 +363,7 @@ class BaseModelService(ABC):
|
||||
from ..config import config
|
||||
return config.get_preview_static_url(preview_url)
|
||||
|
||||
return None
|
||||
return '/loras_static/images/no-preview.png'
|
||||
|
||||
async def get_model_civitai_url(self, model_name: str) -> Dict[str, Optional[str]]:
|
||||
"""Get the Civitai URL for a model file"""
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
from datetime import datetime
|
||||
import aiohttp
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
from email.parser import Parser
|
||||
from typing import Optional, Dict, Tuple, List
|
||||
from urllib.parse import unquote
|
||||
from .model_metadata_provider import CivitaiModelMetadataProvider, ModelMetadataProviderManager
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -19,6 +17,11 @@ class CivitaiClient:
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
|
||||
# Register this client as a metadata provider
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
provider_manager.register_provider('civitai', CivitaiModelMetadataProvider(cls._instance), True)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
@@ -28,80 +31,8 @@ class CivitaiClient:
|
||||
self._initialized = True
|
||||
|
||||
self.base_url = "https://civitai.com/api/v1"
|
||||
self.headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
|
||||
}
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
# Adjust chunk size based on storage type - consider making this configurable
|
||||
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better HDD throughput
|
||||
|
||||
@property
|
||||
async def session(self) -> aiohttp.ClientSession:
|
||||
"""Lazy initialize the session"""
|
||||
if self._session is None:
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=8, # Increase from 3 to 8 for better parallelism
|
||||
ttl_dns_cache=300, # Enable DNS caching with reasonable timeout
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
trust_env = True # Allow using system environment proxy settings
|
||||
# Configure timeout parameters - increase read timeout for large files and remove sock_read timeout
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=None)
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=trust_env,
|
||||
timeout=timeout
|
||||
)
|
||||
self._session_created_at = datetime.now()
|
||||
return self._session
|
||||
|
||||
async def _ensure_fresh_session(self):
|
||||
"""Refresh session if it's been open too long"""
|
||||
if self._session is not None:
|
||||
if not hasattr(self, '_session_created_at') or \
|
||||
(datetime.now() - self._session_created_at).total_seconds() > 300: # 5 minutes
|
||||
await self.close()
|
||||
self._session = None
|
||||
|
||||
return await self.session
|
||||
|
||||
def _parse_content_disposition(self, header: str) -> str:
|
||||
"""Parse filename from content-disposition header"""
|
||||
if not header:
|
||||
return None
|
||||
|
||||
# Handle quoted filenames
|
||||
if 'filename="' in header:
|
||||
start = header.index('filename="') + 10
|
||||
end = header.index('"', start)
|
||||
return unquote(header[start:end])
|
||||
|
||||
# Fallback to original parsing
|
||||
disposition = Parser().parsestr(f'Content-Disposition: {header}')
|
||||
filename = disposition.get_param('filename')
|
||||
if filename:
|
||||
return unquote(filename)
|
||||
return None
|
||||
|
||||
def _get_request_headers(self) -> dict:
|
||||
"""Get request headers with optional API key"""
|
||||
headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
from .settings_manager import settings
|
||||
api_key = settings.get('civitai_api_key')
|
||||
if (api_key):
|
||||
headers['Authorization'] = f'Bearer {api_key}'
|
||||
|
||||
return headers
|
||||
|
||||
async def _download_file(self, url: str, save_dir: str, default_filename: str, progress_callback=None) -> Tuple[bool, str]:
|
||||
async def download_file(self, url: str, save_dir: str, default_filename: str, progress_callback=None) -> Tuple[bool, str]:
|
||||
"""Download file with resumable downloads and retry mechanism
|
||||
|
||||
Args:
|
||||
@@ -113,215 +44,94 @@ class CivitaiClient:
|
||||
Returns:
|
||||
Tuple[bool, str]: (success, save_path or error message)
|
||||
"""
|
||||
max_retries = 5
|
||||
retry_count = 0
|
||||
base_delay = 2.0 # Base delay for exponential backoff
|
||||
|
||||
# Initial setup
|
||||
session = await self._ensure_fresh_session()
|
||||
downloader = await get_downloader()
|
||||
save_path = os.path.join(save_dir, default_filename)
|
||||
part_path = save_path + '.part'
|
||||
|
||||
# Get existing file size for resume
|
||||
resume_offset = 0
|
||||
if os.path.exists(part_path):
|
||||
resume_offset = os.path.getsize(part_path)
|
||||
logger.info(f"Resuming download from offset {resume_offset} bytes")
|
||||
# Use unified downloader with CivitAI authentication
|
||||
success, result = await downloader.download_file(
|
||||
url=url,
|
||||
save_path=save_path,
|
||||
progress_callback=progress_callback,
|
||||
use_auth=True, # Enable CivitAI authentication
|
||||
allow_resume=True
|
||||
)
|
||||
|
||||
total_size = 0
|
||||
filename = default_filename
|
||||
|
||||
while retry_count <= max_retries:
|
||||
try:
|
||||
headers = self._get_request_headers()
|
||||
|
||||
# Add Range header for resume if we have partial data
|
||||
if resume_offset > 0:
|
||||
headers['Range'] = f'bytes={resume_offset}-'
|
||||
|
||||
# Add Range header to allow resumable downloads
|
||||
headers['Accept-Encoding'] = 'identity' # Disable compression for better chunked downloads
|
||||
|
||||
logger.debug(f"Download attempt {retry_count + 1}/{max_retries + 1} from: {url}")
|
||||
if resume_offset > 0:
|
||||
logger.debug(f"Requesting range from byte {resume_offset}")
|
||||
|
||||
async with session.get(url, headers=headers, allow_redirects=True) as response:
|
||||
# Handle different response codes
|
||||
if response.status == 200:
|
||||
# Full content response
|
||||
if resume_offset > 0:
|
||||
# Server doesn't support ranges, restart from beginning
|
||||
logger.warning("Server doesn't support range requests, restarting download")
|
||||
resume_offset = 0
|
||||
if os.path.exists(part_path):
|
||||
os.remove(part_path)
|
||||
elif response.status == 206:
|
||||
# Partial content response (resume successful)
|
||||
content_range = response.headers.get('Content-Range')
|
||||
if content_range:
|
||||
# Parse total size from Content-Range header (e.g., "bytes 1024-2047/2048")
|
||||
range_parts = content_range.split('/')
|
||||
if len(range_parts) == 2:
|
||||
total_size = int(range_parts[1])
|
||||
logger.info(f"Successfully resumed download from byte {resume_offset}")
|
||||
elif response.status == 416:
|
||||
# Range not satisfiable - file might be complete or corrupted
|
||||
if os.path.exists(part_path):
|
||||
part_size = os.path.getsize(part_path)
|
||||
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
|
||||
# Try to get actual file size
|
||||
head_response = await session.head(url, headers=self._get_request_headers())
|
||||
if head_response.status == 200:
|
||||
actual_size = int(head_response.headers.get('content-length', 0))
|
||||
if part_size == actual_size:
|
||||
# File is complete, just rename it
|
||||
os.rename(part_path, save_path)
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
return True, save_path
|
||||
# Remove corrupted part file and restart
|
||||
os.remove(part_path)
|
||||
resume_offset = 0
|
||||
continue
|
||||
elif response.status == 401:
|
||||
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
|
||||
return False, "Invalid or missing CivitAI API key, or early access restriction."
|
||||
elif response.status == 403:
|
||||
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
|
||||
return False, "Access forbidden: You don't have permission to download this file."
|
||||
else:
|
||||
logger.error(f"Download failed for {url} with status {response.status}")
|
||||
return False, f"Download failed with status {response.status}"
|
||||
|
||||
# Get total file size for progress calculation (if not set from Content-Range)
|
||||
if total_size == 0:
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
if response.status == 206:
|
||||
# For partial content, add the offset to get total file size
|
||||
total_size += resume_offset
|
||||
return success, result
|
||||
|
||||
current_size = resume_offset
|
||||
last_progress_report_time = datetime.now()
|
||||
|
||||
# Stream download to file with progress updates using larger buffer
|
||||
loop = asyncio.get_running_loop()
|
||||
mode = 'ab' if resume_offset > 0 else 'wb'
|
||||
with open(part_path, mode) as f:
|
||||
async for chunk in response.content.iter_chunked(self.chunk_size):
|
||||
if chunk:
|
||||
# Run blocking file write in executor
|
||||
await loop.run_in_executor(None, f.write, chunk)
|
||||
current_size += len(chunk)
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
now = datetime.now()
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
|
||||
if progress_callback and total_size and time_diff >= 1.0:
|
||||
progress = (current_size / total_size) * 100
|
||||
await progress_callback(progress)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Download completed successfully
|
||||
# Verify file size if total_size was provided
|
||||
final_size = os.path.getsize(part_path)
|
||||
if total_size > 0 and final_size != total_size:
|
||||
logger.warning(f"File size mismatch. Expected: {total_size}, Got: {final_size}")
|
||||
# Don't treat this as fatal error, rename anyway
|
||||
|
||||
# Atomically rename .part to final file with retries
|
||||
max_rename_attempts = 5
|
||||
rename_attempt = 0
|
||||
rename_success = False
|
||||
|
||||
while rename_attempt < max_rename_attempts and not rename_success:
|
||||
try:
|
||||
os.rename(part_path, save_path)
|
||||
rename_success = True
|
||||
except PermissionError as e:
|
||||
rename_attempt += 1
|
||||
if rename_attempt < max_rename_attempts:
|
||||
logger.info(f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})")
|
||||
await asyncio.sleep(2) # Wait before retrying
|
||||
else:
|
||||
logger.error(f"Failed to rename file after {max_rename_attempts} attempts: {e}")
|
||||
return False, f"Failed to finalize download: {str(e)}"
|
||||
|
||||
# Ensure 100% progress is reported
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
|
||||
return True, save_path
|
||||
|
||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError,
|
||||
aiohttp.ServerDisconnectedError, asyncio.TimeoutError) as e:
|
||||
retry_count += 1
|
||||
logger.warning(f"Network error during download (attempt {retry_count}/{max_retries + 1}): {e}")
|
||||
|
||||
if retry_count <= max_retries:
|
||||
# Calculate delay with exponential backoff
|
||||
delay = base_delay * (2 ** (retry_count - 1))
|
||||
logger.info(f"Retrying in {delay} seconds...")
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Update resume offset for next attempt
|
||||
if os.path.exists(part_path):
|
||||
resume_offset = os.path.getsize(part_path)
|
||||
logger.info(f"Will resume from byte {resume_offset}")
|
||||
|
||||
# Refresh session to get new connection
|
||||
await self.close()
|
||||
session = await self._ensure_fresh_session()
|
||||
continue
|
||||
else:
|
||||
logger.error(f"Max retries exceeded for download: {e}")
|
||||
return False, f"Network error after {max_retries + 1} attempts: {str(e)}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected download error: {e}")
|
||||
return False, str(e)
|
||||
|
||||
return False, f"Download failed after {max_retries + 1} attempts"
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
async with session.get(f"{self.base_url}/model-versions/by-hash/{model_hash}") as response:
|
||||
if response.status == 200:
|
||||
return await response.json()
|
||||
return None
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/model-versions/by-hash/{model_hash}",
|
||||
use_auth=True
|
||||
)
|
||||
if success:
|
||||
# Get model ID from version data
|
||||
model_id = result.get('modelId')
|
||||
if model_id:
|
||||
# Fetch additional model metadata
|
||||
success_model, data = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if success_model:
|
||||
# Enrich version_info with model data
|
||||
result['model']['description'] = data.get("description")
|
||||
result['model']['tags'] = data.get("tags", [])
|
||||
|
||||
# Add creator from model data
|
||||
result['creator'] = data.get("creator")
|
||||
|
||||
return result, None
|
||||
|
||||
# Handle specific error cases
|
||||
if "not found" in str(result):
|
||||
return None, "Model not found"
|
||||
|
||||
# Other error cases
|
||||
logger.error(f"Failed to fetch model info for {model_hash[:10]}: {result}")
|
||||
return None, str(result)
|
||||
except Exception as e:
|
||||
logger.error(f"API Error: {str(e)}")
|
||||
return None
|
||||
return None, str(e)
|
||||
|
||||
async def download_preview_image(self, image_url: str, save_path: str):
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
async with session.get(image_url) as response:
|
||||
if response.status == 200:
|
||||
content = await response.read()
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
return True
|
||||
return False
|
||||
downloader = await get_downloader()
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
image_url,
|
||||
use_auth=False # Preview images don't need auth
|
||||
)
|
||||
if success:
|
||||
# Ensure directory exists
|
||||
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Download Error: {str(e)}")
|
||||
logger.error(f"Download Error: {str(e)}")
|
||||
return False
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> List[Dict]:
|
||||
"""Get all versions of a model with local availability info"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session() # Use fresh session
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
data = await response.json()
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if success:
|
||||
# Also return model type along with versions
|
||||
return {
|
||||
'modelVersions': data.get('modelVersions', []),
|
||||
'type': data.get('type', '')
|
||||
'modelVersions': result.get('modelVersions', []),
|
||||
'type': result.get('type', ''),
|
||||
'name': result.get('name', '')
|
||||
}
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model versions: {e}")
|
||||
return None
|
||||
@@ -337,68 +147,74 @@ class CivitaiClient:
|
||||
Optional[Dict]: The model version data with additional fields or None if not found
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
downloader = await get_downloader()
|
||||
|
||||
# Case 1: Only version_id is provided
|
||||
if model_id is None and version_id is not None:
|
||||
# First get the version info to extract model_id
|
||||
async with session.get(f"{self.base_url}/model-versions/{version_id}", headers=headers) as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
version = await response.json()
|
||||
model_id = version.get('modelId')
|
||||
|
||||
if not model_id:
|
||||
logger.error(f"No modelId found in version {version_id}")
|
||||
return None
|
||||
success, version = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/model-versions/{version_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
|
||||
model_id = version.get('modelId')
|
||||
if not model_id:
|
||||
logger.error(f"No modelId found in version {version_id}")
|
||||
return None
|
||||
|
||||
# Now get the model data for additional metadata
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
return version # Return version without additional metadata
|
||||
|
||||
model_data = await response.json()
|
||||
|
||||
success, model_data = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if success:
|
||||
# Enrich version with model data
|
||||
version['model']['description'] = model_data.get("description")
|
||||
version['model']['tags'] = model_data.get("tags", [])
|
||||
version['creator'] = model_data.get("creator")
|
||||
|
||||
return version
|
||||
|
||||
return version
|
||||
|
||||
# Case 2: model_id is provided (with or without version_id)
|
||||
elif model_id is not None:
|
||||
# Step 1: Get model data to find version_id if not provided and get additional metadata
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
data = await response.json()
|
||||
model_versions = data.get('modelVersions', [])
|
||||
success, data = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
|
||||
# Step 2: Determine the version_id to use
|
||||
target_version_id = version_id
|
||||
if target_version_id is None:
|
||||
target_version_id = model_versions[0].get('id')
|
||||
model_versions = data.get('modelVersions', [])
|
||||
|
||||
# Step 2: Determine the version_id to use
|
||||
target_version_id = version_id
|
||||
if target_version_id is None:
|
||||
target_version_id = model_versions[0].get('id')
|
||||
|
||||
# Step 3: Get detailed version info using the version_id
|
||||
async with session.get(f"{self.base_url}/model-versions/{target_version_id}", headers=headers) as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
version = await response.json()
|
||||
|
||||
# Step 4: Enrich version_info with model data
|
||||
# Add description and tags from model data
|
||||
version['model']['description'] = data.get("description")
|
||||
version['model']['tags'] = data.get("tags", [])
|
||||
|
||||
# Add creator from model data
|
||||
version['creator'] = data.get("creator")
|
||||
|
||||
return version
|
||||
success, version = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/model-versions/{target_version_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
|
||||
# Step 4: Enrich version_info with model data
|
||||
# Add description and tags from model data
|
||||
version['model']['description'] = data.get("description")
|
||||
version['model']['tags'] = data.get("tags", [])
|
||||
|
||||
# Add creator from model data
|
||||
version['creator'] = data.get("creator")
|
||||
|
||||
return version
|
||||
|
||||
# Case 3: Neither model_id nor version_id provided
|
||||
else:
|
||||
@@ -421,116 +237,34 @@ class CivitaiClient:
|
||||
- An error message if there was an error, or None on success
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
downloader = await get_downloader()
|
||||
url = f"{self.base_url}/model-versions/{version_id}"
|
||||
headers = self._get_request_headers()
|
||||
|
||||
logger.debug(f"Resolving DNS for model version info: {url}")
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
logger.debug(f"Successfully fetched model version info for: {version_id}")
|
||||
return await response.json(), None
|
||||
|
||||
# Handle specific error cases
|
||||
if response.status == 404:
|
||||
# Try to parse the error message
|
||||
try:
|
||||
error_data = await response.json()
|
||||
error_msg = error_data.get('error', f"Model not found (status 404)")
|
||||
logger.warning(f"Model version not found: {version_id} - {error_msg}")
|
||||
return None, error_msg
|
||||
except:
|
||||
return None, "Model not found (status 404)"
|
||||
|
||||
# Other error cases
|
||||
logger.error(f"Failed to fetch model info for {version_id} (status {response.status})")
|
||||
return None, f"Failed to fetch model info (status {response.status})"
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
url,
|
||||
use_auth=True
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.debug(f"Successfully fetched model version info for: {version_id}")
|
||||
return result, None
|
||||
|
||||
# Handle specific error cases
|
||||
if "not found" in str(result):
|
||||
error_msg = f"Model not found"
|
||||
logger.warning(f"Model version not found: {version_id} - {error_msg}")
|
||||
return None, error_msg
|
||||
|
||||
# Other error cases
|
||||
logger.error(f"Failed to fetch model info for {version_id}: {result}")
|
||||
return None, str(result)
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching model version info: {e}"
|
||||
logger.error(error_msg)
|
||||
return None, error_msg
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
"""Fetch model metadata (description, tags, and creator info) from Civitai API
|
||||
|
||||
Args:
|
||||
model_id: The Civitai model ID
|
||||
|
||||
Returns:
|
||||
Tuple[Optional[Dict], int]: A tuple containing:
|
||||
- A dictionary with model metadata or None if not found
|
||||
- The HTTP status code from the request
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
url = f"{self.base_url}/models/{model_id}"
|
||||
|
||||
async with session.get(url, headers=headers) as response:
|
||||
status_code = response.status
|
||||
|
||||
if status_code != 200:
|
||||
logger.warning(f"Failed to fetch model metadata: Status {status_code}")
|
||||
return None, status_code
|
||||
|
||||
data = await response.json()
|
||||
|
||||
# Extract relevant metadata
|
||||
metadata = {
|
||||
"description": data.get("description") or "No model description available",
|
||||
"tags": data.get("tags", []),
|
||||
"creator": {
|
||||
"username": data.get("creator", {}).get("username"),
|
||||
"image": data.get("creator", {}).get("image")
|
||||
}
|
||||
}
|
||||
|
||||
if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
|
||||
return metadata, status_code
|
||||
else:
|
||||
logger.warning(f"No metadata found for model {model_id}")
|
||||
return None, status_code
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model metadata: {e}", exc_info=True)
|
||||
return None, 0
|
||||
|
||||
# Keep old method for backward compatibility, delegating to the new one
|
||||
async def get_model_description(self, model_id: str) -> Optional[str]:
|
||||
"""Fetch the model description from Civitai API (Legacy method)"""
|
||||
metadata, _ = await self.get_model_metadata(model_id)
|
||||
return metadata.get("description") if metadata else None
|
||||
|
||||
async def close(self):
|
||||
"""Close the session if it exists"""
|
||||
if self._session is not None:
|
||||
await self._session.close()
|
||||
self._session = None
|
||||
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
if not session:
|
||||
return None
|
||||
|
||||
version_info = await session.get(f"{self.base_url}/model-versions/{model_version_id}")
|
||||
|
||||
if not version_info or not version_info.json().get('files'):
|
||||
return None
|
||||
|
||||
# Get hash from the first file
|
||||
for file_info in version_info.json().get('files', []):
|
||||
if file_info.get('hashes', {}).get('SHA256'):
|
||||
# Convert hash to lowercase to standardize
|
||||
hash_value = file_info['hashes']['SHA256'].lower()
|
||||
return hash_value
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting hash from Civitai: {e}")
|
||||
return None
|
||||
|
||||
async def get_image_info(self, image_id: str) -> Optional[Dict]:
|
||||
"""Fetch image information from Civitai API
|
||||
|
||||
@@ -541,22 +275,25 @@ class CivitaiClient:
|
||||
Optional[Dict]: The image data or None if not found
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
downloader = await get_downloader()
|
||||
url = f"{self.base_url}/images?imageId={image_id}&nsfw=X"
|
||||
|
||||
logger.debug(f"Fetching image info for ID: {image_id}")
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
if data and "items" in data and len(data["items"]) > 0:
|
||||
logger.debug(f"Successfully fetched image info for ID: {image_id}")
|
||||
return data["items"][0]
|
||||
logger.warning(f"No image found with ID: {image_id}")
|
||||
return None
|
||||
|
||||
logger.error(f"Failed to fetch image info for ID: {image_id} (status {response.status})")
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
url,
|
||||
use_auth=True
|
||||
)
|
||||
|
||||
if success:
|
||||
if result and "items" in result and len(result["items"]) > 0:
|
||||
logger.debug(f"Successfully fetched image info for ID: {image_id}")
|
||||
return result["items"][0]
|
||||
logger.warning(f"No image found with ID: {image_id}")
|
||||
return None
|
||||
|
||||
logger.error(f"Failed to fetch image info for ID: {image_id}: {result}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching image info: {e}"
|
||||
logger.error(error_msg)
|
||||
|
||||
@@ -10,6 +10,8 @@ from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .service_registry import ServiceRegistry
|
||||
from .settings_manager import settings
|
||||
from .metadata_service import get_default_metadata_provider
|
||||
from .downloader import get_downloader
|
||||
|
||||
# Download to temporary file first
|
||||
import tempfile
|
||||
@@ -34,17 +36,10 @@ class DownloadManager:
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
self._civitai_client = None # Will be lazily initialized
|
||||
# Add download management
|
||||
self._active_downloads = OrderedDict() # download_id -> download_info
|
||||
self._download_semaphore = asyncio.Semaphore(5) # Limit concurrent downloads
|
||||
self._download_tasks = {} # download_id -> asyncio.Task
|
||||
|
||||
async def _get_civitai_client(self):
|
||||
"""Lazily initialize CivitaiClient from registry"""
|
||||
if self._civitai_client is None:
|
||||
self._civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
return self._civitai_client
|
||||
|
||||
async def _get_lora_scanner(self):
|
||||
"""Get the lora scanner from registry"""
|
||||
@@ -57,7 +52,7 @@ class DownloadManager:
|
||||
async def download_from_civitai(self, model_id: int = None, model_version_id: int = None,
|
||||
save_dir: str = None, relative_path: str = '',
|
||||
progress_callback=None, use_default_paths: bool = False,
|
||||
download_id: str = None) -> Dict:
|
||||
download_id: str = None, source: str = None) -> Dict:
|
||||
"""Download model from Civitai with task tracking and concurrency control
|
||||
|
||||
Args:
|
||||
@@ -68,6 +63,7 @@ class DownloadManager:
|
||||
progress_callback: Callback function for progress updates
|
||||
use_default_paths: Flag to use default paths
|
||||
download_id: Unique identifier for this download task
|
||||
source: Optional source parameter to specify metadata provider
|
||||
|
||||
Returns:
|
||||
Dict with download result
|
||||
@@ -91,7 +87,7 @@ class DownloadManager:
|
||||
download_task = asyncio.create_task(
|
||||
self._download_with_semaphore(
|
||||
task_id, model_id, model_version_id, save_dir,
|
||||
relative_path, progress_callback, use_default_paths
|
||||
relative_path, progress_callback, use_default_paths, source
|
||||
)
|
||||
)
|
||||
|
||||
@@ -112,7 +108,8 @@ class DownloadManager:
|
||||
|
||||
async def _download_with_semaphore(self, task_id: str, model_id: int, model_version_id: int,
|
||||
save_dir: str, relative_path: str,
|
||||
progress_callback=None, use_default_paths: bool = False):
|
||||
progress_callback=None, use_default_paths: bool = False,
|
||||
source: str = None):
|
||||
"""Execute download with semaphore to limit concurrency"""
|
||||
# Update status to waiting
|
||||
if task_id in self._active_downloads:
|
||||
@@ -142,7 +139,7 @@ class DownloadManager:
|
||||
result = await self._execute_original_download(
|
||||
model_id, model_version_id, save_dir,
|
||||
relative_path, tracking_callback, use_default_paths,
|
||||
task_id
|
||||
task_id, source
|
||||
)
|
||||
|
||||
# Update status based on result
|
||||
@@ -177,7 +174,7 @@ class DownloadManager:
|
||||
|
||||
async def _execute_original_download(self, model_id, model_version_id, save_dir,
|
||||
relative_path, progress_callback, use_default_paths,
|
||||
download_id=None):
|
||||
download_id=None, source=None):
|
||||
"""Wrapper for original download_from_civitai implementation"""
|
||||
try:
|
||||
# Check if model version already exists in library
|
||||
@@ -199,11 +196,15 @@ class DownloadManager:
|
||||
if await embedding_scanner.check_model_version_exists(model_version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in embedding library'}
|
||||
|
||||
# Get civitai client
|
||||
civitai_client = await self._get_civitai_client()
|
||||
# Get metadata provider based on source parameter
|
||||
if source == 'civarchive':
|
||||
from .metadata_service import get_metadata_provider
|
||||
metadata_provider = await get_metadata_provider('civarchive')
|
||||
else:
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Get version info based on the provided identifier
|
||||
version_info = await civitai_client.get_model_version(model_id, model_version_id)
|
||||
version_info = await metadata_provider.get_model_version(model_id, model_version_id)
|
||||
|
||||
if not version_info:
|
||||
return {'success': False, 'error': 'Failed to fetch model metadata'}
|
||||
@@ -293,6 +294,8 @@ class DownloadManager:
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if not file_info:
|
||||
return {'success': False, 'error': 'No primary file found in metadata'}
|
||||
if not file_info.get('downloadUrl'):
|
||||
return {'success': False, 'error': 'No download URL found for primary file'}
|
||||
|
||||
# 3. Prepare download
|
||||
file_name = file_info['name']
|
||||
@@ -394,8 +397,6 @@ class DownloadManager:
|
||||
model_type: str = "lora", download_id: str = None) -> Dict:
|
||||
"""Execute the actual download process including preview images and model files"""
|
||||
try:
|
||||
civitai_client = await self._get_civitai_client()
|
||||
|
||||
# Extract original filename details
|
||||
original_filename = os.path.basename(metadata.file_path)
|
||||
base_name, extension = os.path.splitext(original_filename)
|
||||
@@ -445,8 +446,14 @@ class DownloadManager:
|
||||
preview_ext = '.mp4'
|
||||
preview_path = os.path.splitext(save_path)[0] + preview_ext
|
||||
|
||||
# Download video directly
|
||||
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||
# Download video directly using downloader
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.download_file(
|
||||
images[0]['url'],
|
||||
preview_path,
|
||||
use_auth=False # Preview images typically don't need auth
|
||||
)
|
||||
if success:
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
else:
|
||||
@@ -454,8 +461,16 @@ class DownloadManager:
|
||||
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
# Download the original image to temp path
|
||||
if await civitai_client.download_preview_image(images[0]['url'], temp_path):
|
||||
# Download the original image to temp path using downloader
|
||||
downloader = await get_downloader()
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
images[0]['url'],
|
||||
use_auth=False
|
||||
)
|
||||
if success:
|
||||
# Save to temp file
|
||||
with open(temp_path, 'wb') as f:
|
||||
f.write(content)
|
||||
# Optimize and convert to WebP
|
||||
preview_path = os.path.splitext(save_path)[0] + '.webp'
|
||||
|
||||
@@ -486,12 +501,15 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
await progress_callback(3) # 3% progress after preview download
|
||||
|
||||
# Download model file with progress tracking
|
||||
success, result = await civitai_client._download_file(
|
||||
# Download model file with progress tracking using downloader
|
||||
downloader = await get_downloader()
|
||||
# Determine if the download URL is from Civitai
|
||||
use_auth = download_url.startswith("https://civitai.com/api/download/")
|
||||
success, result = await downloader.download_file(
|
||||
download_url,
|
||||
save_dir,
|
||||
os.path.basename(save_path),
|
||||
progress_callback=lambda p: self._handle_download_progress(p, progress_callback)
|
||||
save_path, # Use full path instead of separate dir and filename
|
||||
progress_callback=lambda p: self._handle_download_progress(p, progress_callback),
|
||||
use_auth=use_auth # Only use authentication for Civitai downloads
|
||||
)
|
||||
|
||||
if not success:
|
||||
|
||||
539
py/services/downloader.py
Normal file
539
py/services/downloader.py
Normal file
@@ -0,0 +1,539 @@
|
||||
"""
|
||||
Unified download manager for all HTTP/HTTPS downloads in the application.
|
||||
|
||||
This module provides a centralized download service with:
|
||||
- Singleton pattern for global session management
|
||||
- Support for authenticated downloads (e.g., CivitAI API key)
|
||||
- Resumable downloads with automatic retry
|
||||
- Progress tracking and callbacks
|
||||
- Optimized connection pooling and timeouts
|
||||
- Unified error handling and logging
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import aiohttp
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Tuple, Callable, Union
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Downloader:
|
||||
"""Unified downloader for all HTTP/HTTPS downloads in the application."""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of Downloader"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the downloader with optimal settings"""
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
# Session management
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
self._proxy_url = None # Store proxy URL for current session
|
||||
|
||||
# Configuration
|
||||
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better throughput
|
||||
self.max_retries = 5
|
||||
self.base_delay = 2.0 # Base delay for exponential backoff
|
||||
self.session_timeout = 300 # 5 minutes
|
||||
|
||||
# Default headers
|
||||
self.default_headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
|
||||
}
|
||||
|
||||
@property
|
||||
async def session(self) -> aiohttp.ClientSession:
|
||||
"""Get or create the global aiohttp session with optimized settings"""
|
||||
if self._session is None or self._should_refresh_session():
|
||||
await self._create_session()
|
||||
return self._session
|
||||
|
||||
@property
|
||||
def proxy_url(self) -> Optional[str]:
|
||||
"""Get the current proxy URL (initialize if needed)"""
|
||||
if not hasattr(self, '_proxy_url'):
|
||||
self._proxy_url = None
|
||||
return self._proxy_url
|
||||
|
||||
def _should_refresh_session(self) -> bool:
|
||||
"""Check if session should be refreshed"""
|
||||
if self._session is None:
|
||||
return True
|
||||
|
||||
if not hasattr(self, '_session_created_at') or self._session_created_at is None:
|
||||
return True
|
||||
|
||||
# Refresh if session is older than timeout
|
||||
if (datetime.now() - self._session_created_at).total_seconds() > self.session_timeout:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def _create_session(self):
|
||||
"""Create a new aiohttp session with optimized settings"""
|
||||
# Close existing session if any
|
||||
if self._session is not None:
|
||||
await self._session.close()
|
||||
|
||||
# Check for app-level proxy settings
|
||||
proxy_url = None
|
||||
if settings.get('proxy_enabled', False):
|
||||
proxy_host = settings.get('proxy_host', '').strip()
|
||||
proxy_port = settings.get('proxy_port', '').strip()
|
||||
proxy_type = settings.get('proxy_type', 'http').lower()
|
||||
proxy_username = settings.get('proxy_username', '').strip()
|
||||
proxy_password = settings.get('proxy_password', '').strip()
|
||||
|
||||
if proxy_host and proxy_port:
|
||||
# Build proxy URL
|
||||
if proxy_username and proxy_password:
|
||||
proxy_url = f"{proxy_type}://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"
|
||||
else:
|
||||
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
|
||||
|
||||
logger.debug(f"Using app-level proxy: {proxy_type}://{proxy_host}:{proxy_port}")
|
||||
logger.debug("Proxy mode: app-level proxy is active.")
|
||||
else:
|
||||
logger.debug("Proxy mode: system-level proxy (trust_env) will be used if configured in environment.")
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=8, # Concurrent connections
|
||||
ttl_dns_cache=300, # DNS cache timeout
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
|
||||
# Configure timeout parameters
|
||||
timeout = aiohttp.ClientTimeout(
|
||||
total=None, # No total timeout for large downloads
|
||||
connect=60, # Connection timeout
|
||||
sock_read=300 # 5 minute socket read timeout
|
||||
)
|
||||
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=proxy_url is None, # Only use system proxy if no app-level proxy is set
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# Store proxy URL for use in requests
|
||||
self._proxy_url = proxy_url
|
||||
self._session_created_at = datetime.now()
|
||||
|
||||
logger.debug("Created new HTTP session with proxy settings. App-level proxy: %s, System-level proxy (trust_env): %s", bool(proxy_url), proxy_url is None)
|
||||
|
||||
def _get_auth_headers(self, use_auth: bool = False) -> Dict[str, str]:
|
||||
"""Get headers with optional authentication"""
|
||||
headers = self.default_headers.copy()
|
||||
|
||||
if use_auth:
|
||||
# Add CivitAI API key if available
|
||||
api_key = settings.get('civitai_api_key')
|
||||
if api_key:
|
||||
headers['Authorization'] = f'Bearer {api_key}'
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
return headers
|
||||
|
||||
async def download_file(
|
||||
self,
|
||||
url: str,
|
||||
save_path: str,
|
||||
progress_callback: Optional[Callable[[float], None]] = None,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
allow_resume: bool = True
|
||||
) -> Tuple[bool, str]:
|
||||
"""
|
||||
Download a file with resumable downloads and retry mechanism
|
||||
|
||||
Args:
|
||||
url: Download URL
|
||||
save_path: Full path where the file should be saved
|
||||
progress_callback: Optional callback for progress updates (0-100)
|
||||
use_auth: Whether to include authentication headers (e.g., CivitAI API key)
|
||||
custom_headers: Additional headers to include in request
|
||||
allow_resume: Whether to support resumable downloads
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str]: (success, save_path or error message)
|
||||
"""
|
||||
retry_count = 0
|
||||
part_path = save_path + '.part' if allow_resume else save_path
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
# Get existing file size for resume
|
||||
resume_offset = 0
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
resume_offset = os.path.getsize(part_path)
|
||||
logger.info(f"Resuming download from offset {resume_offset} bytes")
|
||||
|
||||
total_size = 0
|
||||
|
||||
while retry_count <= self.max_retries:
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[download_file] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[download_file] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Add Range header for resume if we have partial data
|
||||
request_headers = headers.copy()
|
||||
if allow_resume and resume_offset > 0:
|
||||
request_headers['Range'] = f'bytes={resume_offset}-'
|
||||
|
||||
# Disable compression for better chunked downloads
|
||||
request_headers['Accept-Encoding'] = 'identity'
|
||||
|
||||
logger.debug(f"Download attempt {retry_count + 1}/{self.max_retries + 1} from: {url}")
|
||||
if resume_offset > 0:
|
||||
logger.debug(f"Requesting range from byte {resume_offset}")
|
||||
|
||||
async with session.get(url, headers=request_headers, allow_redirects=True, proxy=self.proxy_url) as response:
|
||||
# Handle different response codes
|
||||
if response.status == 200:
|
||||
# Full content response
|
||||
if resume_offset > 0:
|
||||
# Server doesn't support ranges, restart from beginning
|
||||
logger.warning("Server doesn't support range requests, restarting download")
|
||||
resume_offset = 0
|
||||
if os.path.exists(part_path):
|
||||
os.remove(part_path)
|
||||
elif response.status == 206:
|
||||
# Partial content response (resume successful)
|
||||
content_range = response.headers.get('Content-Range')
|
||||
if content_range:
|
||||
# Parse total size from Content-Range header (e.g., "bytes 1024-2047/2048")
|
||||
range_parts = content_range.split('/')
|
||||
if len(range_parts) == 2:
|
||||
total_size = int(range_parts[1])
|
||||
logger.info(f"Successfully resumed download from byte {resume_offset}")
|
||||
elif response.status == 416:
|
||||
# Range not satisfiable - file might be complete or corrupted
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
part_size = os.path.getsize(part_path)
|
||||
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
|
||||
# Try to get actual file size
|
||||
head_response = await session.head(url, headers=headers, proxy=self.proxy_url)
|
||||
if head_response.status == 200:
|
||||
actual_size = int(head_response.headers.get('content-length', 0))
|
||||
if part_size == actual_size:
|
||||
# File is complete, just rename it
|
||||
if allow_resume:
|
||||
os.rename(part_path, save_path)
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
return True, save_path
|
||||
# Remove corrupted part file and restart
|
||||
os.remove(part_path)
|
||||
resume_offset = 0
|
||||
continue
|
||||
elif response.status == 401:
|
||||
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
|
||||
return False, "Invalid or missing API key, or early access restriction."
|
||||
elif response.status == 403:
|
||||
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
|
||||
return False, "Access forbidden: You don't have permission to download this file."
|
||||
elif response.status == 404:
|
||||
logger.warning(f"Resource not found: {url} (Status 404)")
|
||||
return False, "File not found - the download link may be invalid or expired."
|
||||
else:
|
||||
logger.error(f"Download failed for {url} with status {response.status}")
|
||||
return False, f"Download failed with status {response.status}"
|
||||
|
||||
# Get total file size for progress calculation (if not set from Content-Range)
|
||||
if total_size == 0:
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
if response.status == 206:
|
||||
# For partial content, add the offset to get total file size
|
||||
total_size += resume_offset
|
||||
|
||||
current_size = resume_offset
|
||||
last_progress_report_time = datetime.now()
|
||||
|
||||
# Ensure directory exists
|
||||
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||
|
||||
# Stream download to file with progress updates
|
||||
loop = asyncio.get_running_loop()
|
||||
mode = 'ab' if (allow_resume and resume_offset > 0) else 'wb'
|
||||
with open(part_path, mode) as f:
|
||||
async for chunk in response.content.iter_chunked(self.chunk_size):
|
||||
if chunk:
|
||||
# Run blocking file write in executor
|
||||
await loop.run_in_executor(None, f.write, chunk)
|
||||
current_size += len(chunk)
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
now = datetime.now()
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
|
||||
if progress_callback and total_size and time_diff >= 1.0:
|
||||
progress = (current_size / total_size) * 100
|
||||
await progress_callback(progress)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Download completed successfully
|
||||
# Verify file size if total_size was provided
|
||||
final_size = os.path.getsize(part_path)
|
||||
if total_size > 0 and final_size != total_size:
|
||||
logger.warning(f"File size mismatch. Expected: {total_size}, Got: {final_size}")
|
||||
# Don't treat this as fatal error, continue anyway
|
||||
|
||||
# Atomically rename .part to final file (only if using resume)
|
||||
if allow_resume and part_path != save_path:
|
||||
max_rename_attempts = 5
|
||||
rename_attempt = 0
|
||||
rename_success = False
|
||||
|
||||
while rename_attempt < max_rename_attempts and not rename_success:
|
||||
try:
|
||||
# If the destination file exists, remove it first (Windows safe)
|
||||
if os.path.exists(save_path):
|
||||
os.remove(save_path)
|
||||
|
||||
os.rename(part_path, save_path)
|
||||
rename_success = True
|
||||
except PermissionError as e:
|
||||
rename_attempt += 1
|
||||
if rename_attempt < max_rename_attempts:
|
||||
logger.info(f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})")
|
||||
await asyncio.sleep(2)
|
||||
else:
|
||||
logger.error(f"Failed to rename file after {max_rename_attempts} attempts: {e}")
|
||||
return False, f"Failed to finalize download: {str(e)}"
|
||||
|
||||
# Ensure 100% progress is reported
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
|
||||
return True, save_path
|
||||
|
||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError,
|
||||
aiohttp.ServerDisconnectedError, asyncio.TimeoutError) as e:
|
||||
retry_count += 1
|
||||
logger.warning(f"Network error during download (attempt {retry_count}/{self.max_retries + 1}): {e}")
|
||||
|
||||
if retry_count <= self.max_retries:
|
||||
# Calculate delay with exponential backoff
|
||||
delay = self.base_delay * (2 ** (retry_count - 1))
|
||||
logger.info(f"Retrying in {delay} seconds...")
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Update resume offset for next attempt
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
resume_offset = os.path.getsize(part_path)
|
||||
logger.info(f"Will resume from byte {resume_offset}")
|
||||
|
||||
# Refresh session to get new connection
|
||||
await self._create_session()
|
||||
continue
|
||||
else:
|
||||
logger.error(f"Max retries exceeded for download: {e}")
|
||||
return False, f"Network error after {self.max_retries + 1} attempts: {str(e)}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected download error: {e}")
|
||||
return False, str(e)
|
||||
|
||||
return False, f"Download failed after {self.max_retries + 1} attempts"
|
||||
|
||||
async def download_to_memory(
|
||||
self,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
return_headers: bool = False
|
||||
) -> Tuple[bool, Union[bytes, str], Optional[Dict]]:
|
||||
"""
|
||||
Download a file to memory (for small files like preview images)
|
||||
|
||||
Args:
|
||||
url: Download URL
|
||||
use_auth: Whether to include authentication headers
|
||||
custom_headers: Additional headers to include in request
|
||||
return_headers: Whether to return response headers along with content
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[bytes, str], Optional[Dict]]: (success, content or error message, response headers if requested)
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[download_to_memory] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[download_to_memory] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
async with session.get(url, headers=headers, proxy=self.proxy_url) as response:
|
||||
if response.status == 200:
|
||||
content = await response.read()
|
||||
if return_headers:
|
||||
return True, content, dict(response.headers)
|
||||
else:
|
||||
return True, content, None
|
||||
elif response.status == 401:
|
||||
error_msg = "Unauthorized access - invalid or missing API key"
|
||||
return False, error_msg, None
|
||||
elif response.status == 403:
|
||||
error_msg = "Access forbidden"
|
||||
return False, error_msg, None
|
||||
elif response.status == 404:
|
||||
error_msg = "File not found"
|
||||
return False, error_msg, None
|
||||
else:
|
||||
error_msg = f"Download failed with status {response.status}"
|
||||
return False, error_msg, None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading to memory from {url}: {e}")
|
||||
return False, str(e), None
|
||||
|
||||
async def get_response_headers(
|
||||
self,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None
|
||||
) -> Tuple[bool, Union[Dict, str]]:
|
||||
"""
|
||||
Get response headers without downloading the full content
|
||||
|
||||
Args:
|
||||
url: URL to check
|
||||
use_auth: Whether to include authentication headers
|
||||
custom_headers: Additional headers to include in request
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[Dict, str]]: (success, headers dict or error message)
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[get_response_headers] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[get_response_headers] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
async with session.head(url, headers=headers, proxy=self.proxy_url) as response:
|
||||
if response.status == 200:
|
||||
return True, dict(response.headers)
|
||||
else:
|
||||
return False, f"Head request failed with status {response.status}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting headers from {url}: {e}")
|
||||
return False, str(e)
|
||||
|
||||
async def make_request(
|
||||
self,
|
||||
method: str,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
**kwargs
|
||||
) -> Tuple[bool, Union[Dict, str]]:
|
||||
"""
|
||||
Make a generic HTTP request and return JSON response
|
||||
|
||||
Args:
|
||||
method: HTTP method (GET, POST, etc.)
|
||||
url: Request URL
|
||||
use_auth: Whether to include authentication headers
|
||||
custom_headers: Additional headers to include in request
|
||||
**kwargs: Additional arguments for aiohttp request
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[Dict, str]]: (success, response data or error message)
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[make_request] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[make_request] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
# Add proxy to kwargs if not already present
|
||||
if 'proxy' not in kwargs:
|
||||
kwargs['proxy'] = self.proxy_url
|
||||
|
||||
async with session.request(method, url, headers=headers, **kwargs) as response:
|
||||
if response.status == 200:
|
||||
# Try to parse as JSON, fall back to text
|
||||
try:
|
||||
data = await response.json()
|
||||
return True, data
|
||||
except:
|
||||
text = await response.text()
|
||||
return True, text
|
||||
elif response.status == 401:
|
||||
return False, "Unauthorized access - invalid or missing API key"
|
||||
elif response.status == 403:
|
||||
return False, "Access forbidden"
|
||||
elif response.status == 404:
|
||||
return False, "Resource not found"
|
||||
else:
|
||||
return False, f"Request failed with status {response.status}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error making {method} request to {url}: {e}")
|
||||
return False, str(e)
|
||||
|
||||
async def close(self):
|
||||
"""Close the HTTP session"""
|
||||
if self._session is not None:
|
||||
await self._session.close()
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
self._proxy_url = None
|
||||
logger.debug("Closed HTTP session")
|
||||
|
||||
async def refresh_session(self):
|
||||
"""Force refresh the HTTP session (useful when proxy settings change)"""
|
||||
await self._create_session()
|
||||
logger.info("HTTP session refreshed due to settings change")
|
||||
|
||||
|
||||
# Global instance accessor
|
||||
async def get_downloader() -> Downloader:
|
||||
"""Get the global downloader instance"""
|
||||
return await Downloader.get_instance()
|
||||
151
py/services/metadata_archive_manager.py
Normal file
151
py/services/metadata_archive_manager.py
Normal file
@@ -0,0 +1,151 @@
|
||||
import zipfile
|
||||
import logging
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MetadataArchiveManager:
|
||||
"""Manages downloading and extracting Civitai metadata archive database"""
|
||||
|
||||
DOWNLOAD_URLS = [
|
||||
"https://github.com/willmiao/civitai-metadata-archive-db/releases/download/db-2025-08-08/civitai.zip",
|
||||
"https://huggingface.co/datasets/willmiao/civitai-metadata-archive-db/blob/main/civitai.zip"
|
||||
]
|
||||
|
||||
def __init__(self, base_path: str):
|
||||
"""Initialize with base path where files will be stored"""
|
||||
self.base_path = Path(base_path)
|
||||
self.civitai_folder = self.base_path / "civitai"
|
||||
self.archive_path = self.base_path / "civitai.zip"
|
||||
self.db_path = self.civitai_folder / "civitai.sqlite"
|
||||
|
||||
def is_database_available(self) -> bool:
|
||||
"""Check if the SQLite database is available and valid"""
|
||||
return self.db_path.exists() and self.db_path.stat().st_size > 0
|
||||
|
||||
def get_database_path(self) -> Optional[str]:
|
||||
"""Get the path to the SQLite database if available"""
|
||||
if self.is_database_available():
|
||||
return str(self.db_path)
|
||||
return None
|
||||
|
||||
async def download_and_extract_database(self, progress_callback=None) -> bool:
|
||||
"""Download and extract the metadata archive database
|
||||
|
||||
Args:
|
||||
progress_callback: Optional callback function to report progress
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Create directories if they don't exist
|
||||
self.base_path.mkdir(parents=True, exist_ok=True)
|
||||
self.civitai_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Download the archive
|
||||
if not await self._download_archive(progress_callback):
|
||||
return False
|
||||
|
||||
# Extract the archive
|
||||
if not await self._extract_archive(progress_callback):
|
||||
return False
|
||||
|
||||
# Clean up the archive file
|
||||
if self.archive_path.exists():
|
||||
self.archive_path.unlink()
|
||||
|
||||
logger.info(f"Successfully downloaded and extracted metadata database to {self.db_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading and extracting metadata database: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
async def _download_archive(self, progress_callback=None) -> bool:
|
||||
"""Download the zip archive from one of the available URLs"""
|
||||
downloader = await get_downloader()
|
||||
|
||||
for url in self.DOWNLOAD_URLS:
|
||||
try:
|
||||
logger.info(f"Attempting to download from {url}")
|
||||
|
||||
if progress_callback:
|
||||
progress_callback("download", f"Downloading from {url}")
|
||||
|
||||
# Custom progress callback to report download progress
|
||||
async def download_progress(progress):
|
||||
if progress_callback:
|
||||
progress_callback("download", f"Downloading archive... {progress:.1f}%")
|
||||
|
||||
success, result = await downloader.download_file(
|
||||
url=url,
|
||||
save_path=str(self.archive_path),
|
||||
progress_callback=download_progress,
|
||||
use_auth=False, # Public download, no auth needed
|
||||
allow_resume=True
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f"Successfully downloaded archive from {url}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Failed to download from {url}: {result}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error downloading from {url}: {e}")
|
||||
continue
|
||||
|
||||
logger.error("Failed to download archive from any URL")
|
||||
return False
|
||||
|
||||
async def _extract_archive(self, progress_callback=None) -> bool:
|
||||
"""Extract the zip archive to the civitai folder"""
|
||||
try:
|
||||
if progress_callback:
|
||||
progress_callback("extract", "Extracting archive...")
|
||||
|
||||
# Run extraction in thread pool to avoid blocking
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, self._extract_zip_sync)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback("extract", "Extraction completed")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting archive: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def _extract_zip_sync(self):
|
||||
"""Synchronous zip extraction (runs in thread pool)"""
|
||||
with zipfile.ZipFile(self.archive_path, 'r') as archive:
|
||||
archive.extractall(path=self.base_path)
|
||||
|
||||
async def remove_database(self) -> bool:
|
||||
"""Remove the metadata database and folder"""
|
||||
try:
|
||||
if self.civitai_folder.exists():
|
||||
# Remove all files in the civitai folder
|
||||
for file_path in self.civitai_folder.iterdir():
|
||||
if file_path.is_file():
|
||||
file_path.unlink()
|
||||
|
||||
# Remove the folder itself
|
||||
self.civitai_folder.rmdir()
|
||||
|
||||
# Also remove the archive file if it exists
|
||||
if self.archive_path.exists():
|
||||
self.archive_path.unlink()
|
||||
|
||||
logger.info("Successfully removed metadata database")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing metadata database: {e}", exc_info=True)
|
||||
return False
|
||||
117
py/services/metadata_service.py
Normal file
117
py/services/metadata_service.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import os
|
||||
import logging
|
||||
from .model_metadata_provider import (
|
||||
ModelMetadataProviderManager,
|
||||
SQLiteModelMetadataProvider,
|
||||
CivitaiModelMetadataProvider,
|
||||
FallbackMetadataProvider
|
||||
)
|
||||
from .settings_manager import settings
|
||||
from .metadata_archive_manager import MetadataArchiveManager
|
||||
from .service_registry import ServiceRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def initialize_metadata_providers():
|
||||
"""Initialize and configure all metadata providers based on settings"""
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
# Clear existing providers to allow reinitialization
|
||||
provider_manager.providers.clear()
|
||||
provider_manager.default_provider = None
|
||||
|
||||
# Get settings
|
||||
enable_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
providers = []
|
||||
|
||||
# Initialize archive database provider if enabled
|
||||
if enable_archive_db:
|
||||
try:
|
||||
# Initialize archive manager
|
||||
base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
archive_manager = MetadataArchiveManager(base_path)
|
||||
|
||||
db_path = archive_manager.get_database_path()
|
||||
if db_path and os.path.exists(db_path):
|
||||
sqlite_provider = SQLiteModelMetadataProvider(db_path)
|
||||
provider_manager.register_provider('sqlite', sqlite_provider)
|
||||
providers.append(('sqlite', sqlite_provider))
|
||||
logger.info(f"SQLite metadata provider registered with database: {db_path}")
|
||||
else:
|
||||
logger.warning("Metadata archive database is enabled but database file not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize SQLite metadata provider: {e}")
|
||||
|
||||
# Initialize Civitai API provider (always available as fallback)
|
||||
try:
|
||||
civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
civitai_provider = CivitaiModelMetadataProvider(civitai_client)
|
||||
provider_manager.register_provider('civitai_api', civitai_provider)
|
||||
providers.append(('civitai_api', civitai_provider))
|
||||
logger.debug("Civitai API metadata provider registered")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize Civitai API metadata provider: {e}")
|
||||
|
||||
# Register CivArchive provider, but do NOT add to fallback providers
|
||||
try:
|
||||
from .model_metadata_provider import CivArchiveModelMetadataProvider
|
||||
civarchive_provider = CivArchiveModelMetadataProvider()
|
||||
provider_manager.register_provider('civarchive', civarchive_provider)
|
||||
logger.debug("CivArchive metadata provider registered (not included in fallback)")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize CivArchive metadata provider: {e}")
|
||||
|
||||
# Set up fallback provider based on available providers
|
||||
if len(providers) > 1:
|
||||
# Always use Civitai API first, then Archive DB
|
||||
ordered_providers = []
|
||||
ordered_providers.extend([p[1] for p in providers if p[0] == 'civitai_api'])
|
||||
ordered_providers.extend([p[1] for p in providers if p[0] == 'sqlite'])
|
||||
|
||||
if ordered_providers:
|
||||
fallback_provider = FallbackMetadataProvider(ordered_providers)
|
||||
provider_manager.register_provider('fallback', fallback_provider, is_default=True)
|
||||
logger.info(f"Fallback metadata provider registered with {len(ordered_providers)} providers, Civitai API first")
|
||||
elif len(providers) == 1:
|
||||
# Only one provider available, set it as default
|
||||
provider_name, provider = providers[0]
|
||||
provider_manager.register_provider(provider_name, provider, is_default=True)
|
||||
logger.debug(f"Single metadata provider registered as default: {provider_name}")
|
||||
else:
|
||||
logger.warning("No metadata providers available - this may cause metadata lookup failures")
|
||||
|
||||
return provider_manager
|
||||
|
||||
async def update_metadata_providers():
|
||||
"""Update metadata providers based on current settings"""
|
||||
try:
|
||||
# Get current settings
|
||||
enable_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
# Reinitialize all providers with new settings
|
||||
provider_manager = await initialize_metadata_providers()
|
||||
|
||||
logger.info(f"Updated metadata providers, archive_db enabled: {enable_archive_db}")
|
||||
return provider_manager
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata providers: {e}")
|
||||
return await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
async def get_metadata_archive_manager():
|
||||
"""Get metadata archive manager instance"""
|
||||
base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
return MetadataArchiveManager(base_path)
|
||||
|
||||
async def get_metadata_provider(provider_name: str = None):
|
||||
"""Get a specific metadata provider or default provider"""
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
if provider_name:
|
||||
return provider_manager._get_provider(provider_name)
|
||||
|
||||
return provider_manager._get_provider()
|
||||
|
||||
async def get_default_metadata_provider():
|
||||
"""Get the default metadata provider (fallback or single provider)"""
|
||||
return await get_metadata_provider()
|
||||
463
py/services/model_file_service.py
Normal file
463
py/services/model_file_service.py
Normal file
@@ -0,0 +1,463 @@
|
||||
import asyncio
|
||||
import os
|
||||
import logging
|
||||
from typing import List, Dict, Optional, Any, Set
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from ..utils.utils import calculate_relative_path_for_model, remove_empty_dirs
|
||||
from ..utils.constants import AUTO_ORGANIZE_BATCH_SIZE
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProgressCallback(ABC):
|
||||
"""Abstract callback interface for progress reporting"""
|
||||
|
||||
@abstractmethod
|
||||
async def on_progress(self, progress_data: Dict[str, Any]) -> None:
|
||||
"""Called when progress is updated"""
|
||||
pass
|
||||
|
||||
|
||||
class AutoOrganizeResult:
|
||||
"""Result object for auto-organize operations"""
|
||||
|
||||
def __init__(self):
|
||||
self.total: int = 0
|
||||
self.processed: int = 0
|
||||
self.success_count: int = 0
|
||||
self.failure_count: int = 0
|
||||
self.skipped_count: int = 0
|
||||
self.operation_type: str = 'unknown'
|
||||
self.cleanup_counts: Dict[str, int] = {}
|
||||
self.results: List[Dict[str, Any]] = []
|
||||
self.results_truncated: bool = False
|
||||
self.sample_results: List[Dict[str, Any]] = []
|
||||
self.is_flat_structure: bool = False
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert result to dictionary"""
|
||||
result = {
|
||||
'success': True,
|
||||
'message': f'Auto-organize {self.operation_type} completed: {self.success_count} moved, {self.skipped_count} skipped, {self.failure_count} failed out of {self.total} total',
|
||||
'summary': {
|
||||
'total': self.total,
|
||||
'success': self.success_count,
|
||||
'skipped': self.skipped_count,
|
||||
'failures': self.failure_count,
|
||||
'organization_type': 'flat' if self.is_flat_structure else 'structured',
|
||||
'cleaned_dirs': self.cleanup_counts,
|
||||
'operation_type': self.operation_type
|
||||
}
|
||||
}
|
||||
|
||||
if self.results_truncated:
|
||||
result['results_truncated'] = True
|
||||
result['sample_results'] = self.sample_results
|
||||
else:
|
||||
result['results'] = self.results
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class ModelFileService:
|
||||
"""Service for handling model file operations and organization"""
|
||||
|
||||
def __init__(self, scanner, model_type: str):
|
||||
"""Initialize the service
|
||||
|
||||
Args:
|
||||
scanner: Model scanner instance
|
||||
model_type: Type of model (e.g., 'lora', 'checkpoint')
|
||||
"""
|
||||
self.scanner = scanner
|
||||
self.model_type = model_type
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get model root directories"""
|
||||
return self.scanner.get_model_roots()
|
||||
|
||||
async def auto_organize_models(
|
||||
self,
|
||||
file_paths: Optional[List[str]] = None,
|
||||
progress_callback: Optional[ProgressCallback] = None
|
||||
) -> AutoOrganizeResult:
|
||||
"""Auto-organize models based on current settings
|
||||
|
||||
Args:
|
||||
file_paths: Optional list of specific file paths to organize.
|
||||
If None, organizes all models.
|
||||
progress_callback: Optional callback for progress updates
|
||||
|
||||
Returns:
|
||||
AutoOrganizeResult object with operation results
|
||||
"""
|
||||
result = AutoOrganizeResult()
|
||||
source_directories: Set[str] = set()
|
||||
|
||||
try:
|
||||
# Get all models from cache
|
||||
cache = await self.scanner.get_cached_data()
|
||||
all_models = cache.raw_data
|
||||
|
||||
# Filter models if specific file paths are provided
|
||||
if file_paths:
|
||||
all_models = [model for model in all_models if model.get('file_path') in file_paths]
|
||||
result.operation_type = 'bulk'
|
||||
else:
|
||||
result.operation_type = 'all'
|
||||
|
||||
# Get model roots for this scanner
|
||||
model_roots = self.get_model_roots()
|
||||
if not model_roots:
|
||||
raise ValueError('No model roots configured')
|
||||
|
||||
# Check if flat structure is configured for this model type
|
||||
path_template = settings.get_download_path_template(self.model_type)
|
||||
result.is_flat_structure = not path_template
|
||||
|
||||
# Initialize tracking
|
||||
result.total = len(all_models)
|
||||
|
||||
# Send initial progress
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'started',
|
||||
'total': result.total,
|
||||
'processed': 0,
|
||||
'success': 0,
|
||||
'failures': 0,
|
||||
'skipped': 0,
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
# Process models in batches
|
||||
await self._process_models_in_batches(
|
||||
all_models,
|
||||
model_roots,
|
||||
result,
|
||||
progress_callback,
|
||||
source_directories # Pass the set to track source directories
|
||||
)
|
||||
|
||||
# Send cleanup progress
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'cleaning',
|
||||
'total': result.total,
|
||||
'processed': result.processed,
|
||||
'success': result.success_count,
|
||||
'failures': result.failure_count,
|
||||
'skipped': result.skipped_count,
|
||||
'message': 'Cleaning up empty directories...',
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
# Clean up empty directories - only in affected directories for bulk operations
|
||||
cleanup_paths = list(source_directories) if result.operation_type == 'bulk' else model_roots
|
||||
result.cleanup_counts = await self._cleanup_empty_directories(cleanup_paths)
|
||||
|
||||
# Send completion message
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'completed',
|
||||
'total': result.total,
|
||||
'processed': result.processed,
|
||||
'success': result.success_count,
|
||||
'failures': result.failure_count,
|
||||
'skipped': result.skipped_count,
|
||||
'cleanup': result.cleanup_counts,
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in auto_organize_models: {e}", exc_info=True)
|
||||
|
||||
# Send error message
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'error',
|
||||
'error': str(e),
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
raise e
|
||||
|
||||
async def _process_models_in_batches(
|
||||
self,
|
||||
all_models: List[Dict[str, Any]],
|
||||
model_roots: List[str],
|
||||
result: AutoOrganizeResult,
|
||||
progress_callback: Optional[ProgressCallback],
|
||||
source_directories: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""Process models in batches to avoid overwhelming the system"""
|
||||
|
||||
for i in range(0, result.total, AUTO_ORGANIZE_BATCH_SIZE):
|
||||
batch = all_models[i:i + AUTO_ORGANIZE_BATCH_SIZE]
|
||||
|
||||
for model in batch:
|
||||
await self._process_single_model(model, model_roots, result, source_directories)
|
||||
result.processed += 1
|
||||
|
||||
# Send progress update after each batch
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'processing',
|
||||
'total': result.total,
|
||||
'processed': result.processed,
|
||||
'success': result.success_count,
|
||||
'failures': result.failure_count,
|
||||
'skipped': result.skipped_count,
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
# Small delay between batches
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
async def _process_single_model(
|
||||
self,
|
||||
model: Dict[str, Any],
|
||||
model_roots: List[str],
|
||||
result: AutoOrganizeResult,
|
||||
source_directories: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""Process a single model for organization"""
|
||||
try:
|
||||
file_path = model.get('file_path')
|
||||
model_name = model.get('model_name', 'Unknown')
|
||||
|
||||
if not file_path:
|
||||
self._add_result(result, model_name, False, "No file path found")
|
||||
result.failure_count += 1
|
||||
return
|
||||
|
||||
# Find which model root this file belongs to
|
||||
current_root = self._find_model_root(file_path, model_roots)
|
||||
if not current_root:
|
||||
self._add_result(result, model_name, False,
|
||||
"Model file not found in any configured root directory")
|
||||
result.failure_count += 1
|
||||
return
|
||||
|
||||
# Determine target directory
|
||||
target_dir = await self._calculate_target_directory(
|
||||
model, current_root, result.is_flat_structure
|
||||
)
|
||||
|
||||
if target_dir is None:
|
||||
self._add_result(result, model_name, False,
|
||||
"Skipped - insufficient metadata for organization")
|
||||
result.skipped_count += 1
|
||||
return
|
||||
|
||||
current_dir = os.path.dirname(file_path)
|
||||
|
||||
# Skip if already in correct location
|
||||
if current_dir.replace(os.sep, '/') == target_dir.replace(os.sep, '/'):
|
||||
result.skipped_count += 1
|
||||
return
|
||||
|
||||
# Check for conflicts
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_dir, file_name)
|
||||
|
||||
if os.path.exists(target_file_path):
|
||||
self._add_result(result, model_name, False,
|
||||
f"Target file already exists: {target_file_path}")
|
||||
result.failure_count += 1
|
||||
return
|
||||
|
||||
# Store the source directory for potential cleanup
|
||||
if source_directories is not None:
|
||||
source_directories.add(current_dir)
|
||||
|
||||
# Perform the move
|
||||
success = await self.scanner.move_model(file_path, target_dir)
|
||||
|
||||
if success:
|
||||
result.success_count += 1
|
||||
else:
|
||||
self._add_result(result, model_name, False, "Failed to move model")
|
||||
result.failure_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing model {model.get('model_name', 'Unknown')}: {e}", exc_info=True)
|
||||
self._add_result(result, model.get('model_name', 'Unknown'), False, f"Error: {str(e)}")
|
||||
result.failure_count += 1
|
||||
|
||||
def _find_model_root(self, file_path: str, model_roots: List[str]) -> Optional[str]:
|
||||
"""Find which model root the file belongs to"""
|
||||
for root in model_roots:
|
||||
# Normalize paths for comparison
|
||||
normalized_root = os.path.normpath(root).replace(os.sep, '/')
|
||||
normalized_file = os.path.normpath(file_path).replace(os.sep, '/')
|
||||
|
||||
if normalized_file.startswith(normalized_root):
|
||||
return root
|
||||
return None
|
||||
|
||||
async def _calculate_target_directory(
|
||||
self,
|
||||
model: Dict[str, Any],
|
||||
current_root: str,
|
||||
is_flat_structure: bool
|
||||
) -> Optional[str]:
|
||||
"""Calculate the target directory for a model"""
|
||||
if is_flat_structure:
|
||||
file_path = model.get('file_path')
|
||||
current_dir = os.path.dirname(file_path)
|
||||
|
||||
# Check if already in root directory
|
||||
if os.path.normpath(current_dir) == os.path.normpath(current_root):
|
||||
return None # Signal to skip
|
||||
|
||||
return current_root
|
||||
else:
|
||||
# Calculate new relative path based on settings
|
||||
new_relative_path = calculate_relative_path_for_model(model, self.model_type)
|
||||
|
||||
if not new_relative_path:
|
||||
return None # Signal to skip
|
||||
|
||||
return os.path.join(current_root, new_relative_path).replace(os.sep, '/')
|
||||
|
||||
def _add_result(
|
||||
self,
|
||||
result: AutoOrganizeResult,
|
||||
model_name: str,
|
||||
success: bool,
|
||||
message: str
|
||||
) -> None:
|
||||
"""Add a result entry if under the limit"""
|
||||
if len(result.results) < 100: # Limit detailed results
|
||||
result.results.append({
|
||||
"model": model_name,
|
||||
"success": success,
|
||||
"message": message
|
||||
})
|
||||
elif len(result.results) == 100:
|
||||
# Mark as truncated and save sample
|
||||
result.results_truncated = True
|
||||
result.sample_results = result.results[:50]
|
||||
|
||||
async def _cleanup_empty_directories(self, paths: List[str]) -> Dict[str, int]:
|
||||
"""Clean up empty directories after organizing
|
||||
|
||||
Args:
|
||||
paths: List of paths to check for empty directories
|
||||
|
||||
Returns:
|
||||
Dictionary with counts of removed directories by root path
|
||||
"""
|
||||
cleanup_counts = {}
|
||||
for path in paths:
|
||||
removed = remove_empty_dirs(path)
|
||||
cleanup_counts[path] = removed
|
||||
return cleanup_counts
|
||||
|
||||
|
||||
class ModelMoveService:
|
||||
"""Service for handling individual model moves"""
|
||||
|
||||
def __init__(self, scanner):
|
||||
"""Initialize the service
|
||||
|
||||
Args:
|
||||
scanner: Model scanner instance
|
||||
"""
|
||||
self.scanner = scanner
|
||||
|
||||
async def move_model(self, file_path: str, target_path: str) -> Dict[str, Any]:
|
||||
"""Move a single model file
|
||||
|
||||
Args:
|
||||
file_path: Source file path
|
||||
target_path: Target directory path
|
||||
|
||||
Returns:
|
||||
Dictionary with move result
|
||||
"""
|
||||
try:
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
logger.info(f"Source and target directories are the same: {source_dir}")
|
||||
return {
|
||||
'success': True,
|
||||
'message': 'Source and target directories are the same',
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': file_path
|
||||
}
|
||||
|
||||
new_file_path = await self.scanner.move_model(file_path, target_path)
|
||||
if new_file_path:
|
||||
return {
|
||||
'success': True,
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': new_file_path
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Failed to move model',
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': None
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e),
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': None
|
||||
}
|
||||
|
||||
async def move_models_bulk(self, file_paths: List[str], target_path: str) -> Dict[str, Any]:
|
||||
"""Move multiple model files
|
||||
|
||||
Args:
|
||||
file_paths: List of source file paths
|
||||
target_path: Target directory path
|
||||
|
||||
Returns:
|
||||
Dictionary with bulk move results
|
||||
"""
|
||||
try:
|
||||
results = []
|
||||
|
||||
for file_path in file_paths:
|
||||
result = await self.move_model(file_path, target_path)
|
||||
results.append({
|
||||
"original_file_path": file_path,
|
||||
"new_file_path": result.get('new_file_path'),
|
||||
"success": result['success'],
|
||||
"message": result.get('message', result.get('error', 'Unknown'))
|
||||
})
|
||||
|
||||
success_count = sum(1 for r in results if r["success"])
|
||||
failure_count = len(results) - success_count
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': f'Moved {success_count} of {len(file_paths)} models',
|
||||
'results': results,
|
||||
'success_count': success_count,
|
||||
'failure_count': failure_count
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e),
|
||||
'results': [],
|
||||
'success_count': 0,
|
||||
'failure_count': len(file_paths)
|
||||
}
|
||||
463
py/services/model_metadata_provider.py
Normal file
463
py/services/model_metadata_provider.py
Normal file
@@ -0,0 +1,463 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import json
|
||||
import aiosqlite
|
||||
import logging
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import Optional, Dict, Tuple
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ModelMetadataProvider(ABC):
|
||||
"""Base abstract class for all model metadata providers"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Find model by hash value"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
"""Get all versions of a model with their details"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version with additional metadata"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version metadata"""
|
||||
pass
|
||||
|
||||
class CivitaiModelMetadataProvider(ModelMetadataProvider):
|
||||
"""Provider that uses Civitai API for metadata"""
|
||||
|
||||
def __init__(self, civitai_client):
|
||||
self.client = civitai_client
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
return await self.client.get_model_by_hash(model_hash)
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
return await self.client.get_model_versions(model_id)
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
return await self.client.get_model_version(model_id, version_id)
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
return await self.client.get_model_version_info(version_id)
|
||||
|
||||
class CivArchiveModelMetadataProvider(ModelMetadataProvider):
|
||||
"""Provider that uses CivArchive HTML page parsing for metadata"""
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Not supported by CivArchive provider"""
|
||||
return None, "CivArchive provider does not support hash lookup"
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
"""Not supported by CivArchive provider"""
|
||||
return None
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version by parsing CivArchive HTML page"""
|
||||
if model_id is None or version_id is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Construct CivArchive URL
|
||||
url = f"https://civarchive.com/models/{model_id}?modelVersionId={version_id}"
|
||||
|
||||
downloader = await get_downloader()
|
||||
session = await downloader.session
|
||||
async with session.get(url) as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
html_content = await response.text()
|
||||
|
||||
# Parse HTML to extract JSON data
|
||||
soup = BeautifulSoup(html_content, 'html.parser')
|
||||
script_tag = soup.find('script', {'id': '__NEXT_DATA__', 'type': 'application/json'})
|
||||
|
||||
if not script_tag:
|
||||
return None
|
||||
|
||||
# Parse JSON content
|
||||
json_data = json.loads(script_tag.string)
|
||||
model_data = json_data.get('props', {}).get('pageProps', {}).get('model')
|
||||
|
||||
if not model_data or 'version' not in model_data:
|
||||
return None
|
||||
|
||||
# Extract version data as base
|
||||
version = model_data['version'].copy()
|
||||
|
||||
# Restructure stats
|
||||
if 'downloadCount' in version and 'ratingCount' in version and 'rating' in version:
|
||||
version['stats'] = {
|
||||
'downloadCount': version.pop('downloadCount'),
|
||||
'ratingCount': version.pop('ratingCount'),
|
||||
'rating': version.pop('rating')
|
||||
}
|
||||
|
||||
# Rename trigger to trainedWords
|
||||
if 'trigger' in version:
|
||||
version['trainedWords'] = version.pop('trigger')
|
||||
|
||||
# Transform files data to expected format
|
||||
if 'files' in version:
|
||||
transformed_files = []
|
||||
for file_data in version['files']:
|
||||
# Find first available mirror (deletedAt is null)
|
||||
available_mirror = None
|
||||
for mirror in file_data.get('mirrors', []):
|
||||
if mirror.get('deletedAt') is None:
|
||||
available_mirror = mirror
|
||||
break
|
||||
|
||||
# Create transformed file entry
|
||||
transformed_file = {
|
||||
'id': file_data.get('id'),
|
||||
'sizeKB': file_data.get('sizeKB'),
|
||||
'name': available_mirror.get('filename', file_data.get('name')) if available_mirror else file_data.get('name'),
|
||||
'type': file_data.get('type'),
|
||||
'downloadUrl': available_mirror.get('url') if available_mirror else None,
|
||||
'primary': True,
|
||||
'mirrors': file_data.get('mirrors', [])
|
||||
}
|
||||
|
||||
# Transform hash format
|
||||
if 'sha256' in file_data:
|
||||
transformed_file['hashes'] = {
|
||||
'SHA256': file_data['sha256'].upper()
|
||||
}
|
||||
|
||||
transformed_files.append(transformed_file)
|
||||
|
||||
version['files'] = transformed_files
|
||||
|
||||
# Add model information
|
||||
version['model'] = {
|
||||
'name': model_data.get('name'),
|
||||
'type': model_data.get('type'),
|
||||
'nsfw': model_data.get('is_nsfw', False),
|
||||
'description': model_data.get('description'),
|
||||
'tags': model_data.get('tags', [])
|
||||
}
|
||||
|
||||
version['creator'] = {
|
||||
'username': model_data.get('username'),
|
||||
'image': ''
|
||||
}
|
||||
|
||||
# Add source identifier
|
||||
version['source'] = 'civarchive'
|
||||
version['is_deleted'] = json_data.get('query', {}).get('is_deleted', False)
|
||||
|
||||
return version
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivArchive model version {model_id}/{version_id}: {e}")
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Not supported by CivArchive provider - requires both model_id and version_id"""
|
||||
return None, "CivArchive provider requires both model_id and version_id"
|
||||
|
||||
class SQLiteModelMetadataProvider(ModelMetadataProvider):
|
||||
"""Provider that uses SQLite database for metadata"""
|
||||
|
||||
def __init__(self, db_path: str):
|
||||
self.db_path = db_path
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Find model by hash value from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Look up in model_files table to get model_id and version_id
|
||||
query = """
|
||||
SELECT model_id, version_id
|
||||
FROM model_files
|
||||
WHERE sha256 = ?
|
||||
LIMIT 1
|
||||
"""
|
||||
db.row_factory = aiosqlite.Row
|
||||
cursor = await db.execute(query, (model_hash.upper(),))
|
||||
file_row = await cursor.fetchone()
|
||||
|
||||
if not file_row:
|
||||
return None, "Model not found"
|
||||
|
||||
# Get version details
|
||||
model_id = file_row['model_id']
|
||||
version_id = file_row['version_id']
|
||||
|
||||
# Build response in the same format as Civitai API
|
||||
result = await self._get_version_with_model_data(db, model_id, version_id)
|
||||
return result, None if result else "Error retrieving model data"
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
"""Get all versions of a model from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# First check if model exists
|
||||
model_query = "SELECT * FROM models WHERE id = ?"
|
||||
cursor = await db.execute(model_query, (model_id,))
|
||||
model_row = await cursor.fetchone()
|
||||
|
||||
if not model_row:
|
||||
return None
|
||||
|
||||
model_data = json.loads(model_row['data'])
|
||||
model_type = model_row['type']
|
||||
model_name = model_row['name']
|
||||
|
||||
# Get all versions for this model
|
||||
versions_query = """
|
||||
SELECT id, name, base_model, data, position, published_at
|
||||
FROM model_versions
|
||||
WHERE model_id = ?
|
||||
ORDER BY position ASC
|
||||
"""
|
||||
cursor = await db.execute(versions_query, (model_id,))
|
||||
version_rows = await cursor.fetchall()
|
||||
|
||||
if not version_rows:
|
||||
return {'modelVersions': [], 'type': model_type}
|
||||
|
||||
# Format versions similar to Civitai API
|
||||
model_versions = []
|
||||
for row in version_rows:
|
||||
version_data = json.loads(row['data'])
|
||||
# Add fields from the row to ensure we have the basic fields
|
||||
version_entry = {
|
||||
'id': row['id'],
|
||||
'modelId': int(model_id),
|
||||
'name': row['name'],
|
||||
'baseModel': row['base_model'],
|
||||
'model': {
|
||||
'name': model_row['name'],
|
||||
'type': model_type,
|
||||
},
|
||||
'source': 'archive_db'
|
||||
}
|
||||
# Update with any additional data
|
||||
version_entry.update(version_data)
|
||||
model_versions.append(version_entry)
|
||||
|
||||
return {
|
||||
'modelVersions': model_versions,
|
||||
'type': model_type,
|
||||
'name': model_name
|
||||
}
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version with additional metadata from SQLite database"""
|
||||
if not model_id and not version_id:
|
||||
return None
|
||||
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# Case 1: Only version_id is provided
|
||||
if model_id is None and version_id is not None:
|
||||
# First get the version info to extract model_id
|
||||
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
|
||||
cursor = await db.execute(version_query, (version_id,))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None
|
||||
|
||||
model_id = version_row['model_id']
|
||||
|
||||
# Case 2: model_id is provided but version_id is not
|
||||
elif model_id is not None and version_id is None:
|
||||
# Find the latest version
|
||||
version_query = """
|
||||
SELECT id FROM model_versions
|
||||
WHERE model_id = ?
|
||||
ORDER BY position ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
cursor = await db.execute(version_query, (model_id,))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None
|
||||
|
||||
version_id = version_row['id']
|
||||
|
||||
# Now we have both model_id and version_id, get the full data
|
||||
return await self._get_version_with_model_data(db, model_id, version_id)
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version metadata from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# Get version details
|
||||
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
|
||||
cursor = await db.execute(version_query, (version_id,))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None, "Model version not found"
|
||||
|
||||
model_id = version_row['model_id']
|
||||
|
||||
# Build complete version data with model info
|
||||
version_data = await self._get_version_with_model_data(db, model_id, version_id)
|
||||
return version_data, None
|
||||
|
||||
async def _get_version_with_model_data(self, db, model_id, version_id) -> Optional[Dict]:
|
||||
"""Helper to build version data with model information"""
|
||||
# Get version details
|
||||
version_query = "SELECT name, base_model, data FROM model_versions WHERE id = ? AND model_id = ?"
|
||||
cursor = await db.execute(version_query, (version_id, model_id))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None
|
||||
|
||||
# Get model details
|
||||
model_query = "SELECT name, type, data, username FROM models WHERE id = ?"
|
||||
cursor = await db.execute(model_query, (model_id,))
|
||||
model_row = await cursor.fetchone()
|
||||
|
||||
if not model_row:
|
||||
return None
|
||||
|
||||
# Parse JSON data
|
||||
try:
|
||||
version_data = json.loads(version_row['data'])
|
||||
model_data = json.loads(model_row['data'])
|
||||
|
||||
# Build response
|
||||
result = {
|
||||
"id": int(version_id),
|
||||
"modelId": int(model_id),
|
||||
"name": version_row['name'],
|
||||
"baseModel": version_row['base_model'],
|
||||
"model": {
|
||||
"name": model_row['name'],
|
||||
"description": model_data.get("description"),
|
||||
"type": model_row['type'],
|
||||
"tags": model_data.get("tags", [])
|
||||
},
|
||||
"creator": {
|
||||
"username": model_row['username'] or model_data.get("creator", {}).get("username"),
|
||||
"image": model_data.get("creator", {}).get("image")
|
||||
},
|
||||
"source": "archive_db"
|
||||
}
|
||||
|
||||
# Add any additional fields from version data
|
||||
result.update(version_data)
|
||||
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
|
||||
class FallbackMetadataProvider(ModelMetadataProvider):
|
||||
"""Try providers in order, return first successful result."""
|
||||
def __init__(self, providers: list):
|
||||
self.providers = providers
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result, error = await provider.get_model_by_hash(model_hash)
|
||||
if result:
|
||||
return result, error
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_by_hash: {e}")
|
||||
continue
|
||||
return None, "Model not found"
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result = await provider.get_model_versions(model_id)
|
||||
if result:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_versions: {e}")
|
||||
continue
|
||||
return None
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result = await provider.get_model_version(model_id, version_id)
|
||||
if result:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_version: {e}")
|
||||
continue
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result, error = await provider.get_model_version_info(version_id)
|
||||
if result:
|
||||
return result, error
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_version_info: {e}")
|
||||
continue
|
||||
return None, "No provider could retrieve the data"
|
||||
|
||||
class ModelMetadataProviderManager:
|
||||
"""Manager for selecting and using model metadata providers"""
|
||||
|
||||
_instance = None
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of ModelMetadataProviderManager"""
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
self.providers = {}
|
||||
self.default_provider = None
|
||||
|
||||
def register_provider(self, name: str, provider: ModelMetadataProvider, is_default: bool = False):
|
||||
"""Register a metadata provider"""
|
||||
self.providers[name] = provider
|
||||
if is_default or self.default_provider is None:
|
||||
self.default_provider = name
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str, provider_name: str = None) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Find model by hash using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_by_hash(model_hash)
|
||||
|
||||
async def get_model_versions(self, model_id: str, provider_name: str = None) -> Optional[Dict]:
|
||||
"""Get model versions using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_versions(model_id)
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None, provider_name: str = None) -> Optional[Dict]:
|
||||
"""Get specific model version using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_version(model_id, version_id)
|
||||
|
||||
async def get_model_version_info(self, version_id: str, provider_name: str = None) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version info using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_version_info(version_id)
|
||||
|
||||
def _get_provider(self, provider_name: str = None) -> ModelMetadataProvider:
|
||||
"""Get provider by name or default provider"""
|
||||
if provider_name and provider_name in self.providers:
|
||||
return self.providers[provider_name]
|
||||
|
||||
if self.default_provider is None:
|
||||
raise ValueError("No default provider set and no valid provider specified")
|
||||
|
||||
return self.providers[self.default_provider]
|
||||
@@ -698,65 +698,13 @@ class ModelScanner:
|
||||
existing_path = self._hash_index.get_path(existing_hash)
|
||||
if existing_path and existing_path != file_path:
|
||||
logger.warning(f"Duplicate filename detected: '{filename}' - files: '{existing_path}' and '{file_path}'")
|
||||
|
||||
await self._fetch_missing_metadata(file_path, model_data)
|
||||
|
||||
rel_path = os.path.relpath(file_path, root_path)
|
||||
folder = os.path.dirname(rel_path)
|
||||
model_data['folder'] = folder.replace(os.path.sep, '/')
|
||||
|
||||
return model_data
|
||||
|
||||
async def _fetch_missing_metadata(self, file_path: str, model_data: Dict) -> None:
|
||||
"""Fetch missing description and tags from Civitai if needed"""
|
||||
try:
|
||||
if model_data.get('civitai_deleted', False):
|
||||
logger.debug(f"Skipping metadata fetch for {file_path}: marked as deleted on Civitai")
|
||||
return
|
||||
|
||||
needs_metadata_update = False
|
||||
model_id = None
|
||||
|
||||
if model_data.get('civitai'):
|
||||
model_id = model_data['civitai'].get('modelId')
|
||||
|
||||
if model_id:
|
||||
model_id = str(model_id)
|
||||
tags_missing = not model_data.get('tags') or len(model_data.get('tags', [])) == 0
|
||||
desc_missing = not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")
|
||||
# TODO: not for now, but later we should check if the creator is missing
|
||||
# creator_missing = not model_data.get('civitai', {}).get('creator')
|
||||
creator_missing = False
|
||||
needs_metadata_update = tags_missing or desc_missing or creator_missing
|
||||
|
||||
if needs_metadata_update and model_id:
|
||||
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
client = CivitaiClient()
|
||||
|
||||
model_metadata, status_code = await client.get_model_metadata(model_id)
|
||||
await client.close()
|
||||
|
||||
if status_code == 404:
|
||||
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
||||
model_data['civitai_deleted'] = True
|
||||
|
||||
await MetadataManager.save_metadata(file_path, model_data)
|
||||
|
||||
elif model_metadata:
|
||||
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
||||
|
||||
if model_metadata.get('tags') and (not model_data.get('tags') or len(model_data.get('tags', [])) == 0):
|
||||
model_data['tags'] = model_metadata['tags']
|
||||
|
||||
if model_metadata.get('description') and (not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")):
|
||||
model_data['modelDescription'] = model_metadata['description']
|
||||
|
||||
model_data['civitai']['creator'] = model_metadata['creator']
|
||||
|
||||
await MetadataManager.save_metadata(file_path, model_data)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
||||
|
||||
async def add_model_to_cache(self, metadata_dict: Dict, folder: str = '') -> bool:
|
||||
"""Add a model to the cache
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from ..config import config
|
||||
from .recipe_cache import RecipeCache
|
||||
from .service_registry import ServiceRegistry
|
||||
from .lora_scanner import LoraScanner
|
||||
from .metadata_service import get_default_metadata_provider
|
||||
from ..utils.utils import fuzzy_match
|
||||
from natsort import natsorted
|
||||
import sys
|
||||
@@ -431,13 +432,13 @@ class RecipeScanner:
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
# Get CivitaiClient from ServiceRegistry
|
||||
civitai_client = await self._get_civitai_client()
|
||||
if not civitai_client:
|
||||
logger.error("Failed to get CivitaiClient from ServiceRegistry")
|
||||
# Get metadata provider instead of civitai client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
if not metadata_provider:
|
||||
logger.error("Failed to get metadata provider")
|
||||
return None
|
||||
|
||||
version_info, error_msg = await civitai_client.get_model_version_info(model_version_id)
|
||||
version_info, error_msg = await metadata_provider.get_model_version_info(model_version_id)
|
||||
|
||||
if not version_info:
|
||||
if error_msg and "model not found" in error_msg.lower():
|
||||
|
||||
@@ -80,8 +80,15 @@ class SettingsManager:
|
||||
"""Return default settings"""
|
||||
return {
|
||||
"civitai_api_key": "",
|
||||
"show_only_sfw": False,
|
||||
"language": "en" # 添加默认语言设置
|
||||
"language": "en",
|
||||
"show_only_sfw": False, # Show only SFW content
|
||||
"enable_metadata_archive_db": False, # Enable metadata archive database
|
||||
"proxy_enabled": False, # Enable app-level proxy
|
||||
"proxy_host": "", # Proxy host
|
||||
"proxy_port": "", # Proxy port
|
||||
"proxy_username": "", # Proxy username (optional)
|
||||
"proxy_password": "", # Proxy password (optional)
|
||||
"proxy_type": "http" # Proxy type: http, https, socks4, socks5
|
||||
}
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
@@ -93,6 +100,13 @@ class SettingsManager:
|
||||
self.settings[key] = value
|
||||
self._save_settings()
|
||||
|
||||
def delete(self, key: str) -> None:
|
||||
"""Delete setting key and save"""
|
||||
if key in self.settings:
|
||||
del self.settings[key]
|
||||
self._save_settings()
|
||||
logger.info(f"Deleted setting: {key}")
|
||||
|
||||
def _save_settings(self) -> None:
|
||||
"""Save settings to file"""
|
||||
try:
|
||||
|
||||
11
py/services/websocket_progress_callback.py
Normal file
11
py/services/websocket_progress_callback.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from typing import Dict, Any
|
||||
from .model_file_service import ProgressCallback
|
||||
from .websocket_manager import ws_manager
|
||||
|
||||
|
||||
class WebSocketProgressCallback(ProgressCallback):
|
||||
"""WebSocket implementation of progress callback"""
|
||||
|
||||
async def on_progress(self, progress_data: Dict[str, Any]) -> None:
|
||||
"""Send progress data via WebSocket"""
|
||||
await ws_manager.broadcast_auto_organize_progress(progress_data)
|
||||
@@ -53,8 +53,8 @@ AUTO_ORGANIZE_BATCH_SIZE = 50 # Process models in batches to avoid overwhelming
|
||||
|
||||
# Civitai model tags in priority order for subfolder organization
|
||||
CIVITAI_MODEL_TAGS = [
|
||||
'character', 'style', 'concept', 'clothing',
|
||||
# 'base model', # exclude 'base model'
|
||||
'poses', 'background', 'tool', 'vehicle', 'buildings',
|
||||
'character', 'concept', 'clothing',
|
||||
'realistic', 'anime', 'toon', 'furry', 'style',
|
||||
'poses', 'background', 'tool', 'vehicle', 'buildings',
|
||||
'objects', 'assets', 'animal', 'action'
|
||||
]
|
||||
@@ -3,13 +3,14 @@ import os
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .example_images_processor import ExampleImagesProcessor
|
||||
from .example_images_metadata import MetadataUpdater
|
||||
from ..services.websocket_manager import ws_manager # Add this import at the top
|
||||
from ..services.downloader import get_downloader
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -40,10 +41,10 @@ class DownloadManager:
|
||||
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"output_dir": "path/to/output", # Base directory to save example images
|
||||
"optimize": true, # Whether to optimize images (default: true)
|
||||
"model_types": ["lora", "checkpoint"], # Model types to process (default: both)
|
||||
"delay": 1.0 # Delay between downloads to avoid rate limiting (default: 1.0)
|
||||
"delay": 1.0, # Delay between downloads to avoid rate limiting (default: 1.0)
|
||||
"auto_mode": false # Flag to indicate automatic download (default: false)
|
||||
}
|
||||
"""
|
||||
global download_task, is_downloading, download_progress
|
||||
@@ -64,17 +65,28 @@ class DownloadManager:
|
||||
try:
|
||||
# Parse the request body
|
||||
data = await request.json()
|
||||
output_dir = data.get('output_dir')
|
||||
auto_mode = data.get('auto_mode', False)
|
||||
optimize = data.get('optimize', True)
|
||||
model_types = data.get('model_types', ['lora', 'checkpoint'])
|
||||
delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
|
||||
delay = 0 # Temporary: Disable delay to speed up downloads
|
||||
|
||||
# Get output directory from settings
|
||||
output_dir = settings.get('example_images_path')
|
||||
|
||||
if not output_dir:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing output_dir parameter'
|
||||
}, status=400)
|
||||
error_msg = 'Example images path not configured in settings'
|
||||
if auto_mode:
|
||||
# For auto mode, just log and return success to avoid showing error toasts
|
||||
logger.debug(error_msg)
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Example images path not configured, skipping auto download'
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': error_msg
|
||||
}, status=400)
|
||||
|
||||
# Create the output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
@@ -200,19 +212,8 @@ class DownloadManager:
|
||||
"""Download example images for all models"""
|
||||
global is_downloading, download_progress
|
||||
|
||||
# Create independent download session
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=3,
|
||||
force_close=False,
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
|
||||
independent_session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=True,
|
||||
timeout=timeout
|
||||
)
|
||||
# Get unified downloader
|
||||
downloader = await get_downloader()
|
||||
|
||||
try:
|
||||
# Get scanners
|
||||
@@ -247,7 +248,7 @@ class DownloadManager:
|
||||
# Main logic for processing model is here, but actual operations are delegated to other classes
|
||||
was_remote_download = await DownloadManager._process_model(
|
||||
scanner_type, model, scanner,
|
||||
output_dir, optimize, independent_session
|
||||
output_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# Update progress
|
||||
@@ -271,12 +272,6 @@ class DownloadManager:
|
||||
download_progress['end_time'] = time.time()
|
||||
|
||||
finally:
|
||||
# Close the independent session
|
||||
try:
|
||||
await independent_session.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error closing download session: {e}")
|
||||
|
||||
# Save final progress to file
|
||||
try:
|
||||
DownloadManager._save_progress(output_dir)
|
||||
@@ -287,7 +282,7 @@ class DownloadManager:
|
||||
is_downloading = False
|
||||
|
||||
@staticmethod
|
||||
async def _process_model(scanner_type, model, scanner, output_dir, optimize, independent_session):
|
||||
async def _process_model(scanner_type, model, scanner, output_dir, optimize, downloader):
|
||||
"""Process a single model download"""
|
||||
global download_progress
|
||||
|
||||
@@ -348,7 +343,7 @@ class DownloadManager:
|
||||
images = model.get('civitai', {}).get('images', [])
|
||||
|
||||
success, is_stale = await ExampleImagesProcessor.download_model_images(
|
||||
model_hash, model_name, images, model_dir, optimize, independent_session
|
||||
model_hash, model_name, images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# If metadata is stale, try to refresh it
|
||||
@@ -366,7 +361,7 @@ class DownloadManager:
|
||||
# Retry download with updated metadata
|
||||
updated_images = updated_model.get('civitai', {}).get('images', [])
|
||||
success, _ = await ExampleImagesProcessor.download_model_images(
|
||||
model_hash, model_name, updated_images, model_dir, optimize, independent_session
|
||||
model_hash, model_name, updated_images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
download_progress['refreshed_models'].add(model_hash)
|
||||
@@ -444,7 +439,6 @@ class DownloadManager:
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"model_hashes": ["hash1", "hash2", ...], # List of model hashes to download
|
||||
"output_dir": "path/to/output", # Base directory to save example images
|
||||
"optimize": true, # Whether to optimize images (default: true)
|
||||
"model_types": ["lora", "checkpoint"], # Model types to process (default: both)
|
||||
"delay": 1.0 # Delay between downloads (default: 1.0)
|
||||
@@ -462,7 +456,6 @@ class DownloadManager:
|
||||
# Parse the request body
|
||||
data = await request.json()
|
||||
model_hashes = data.get('model_hashes', [])
|
||||
output_dir = data.get('output_dir')
|
||||
optimize = data.get('optimize', True)
|
||||
model_types = data.get('model_types', ['lora', 'checkpoint'])
|
||||
delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
|
||||
@@ -472,11 +465,14 @@ class DownloadManager:
|
||||
'success': False,
|
||||
'error': 'Missing model_hashes parameter'
|
||||
}, status=400)
|
||||
|
||||
|
||||
# Get output directory from settings
|
||||
output_dir = settings.get('example_images_path')
|
||||
|
||||
if not output_dir:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing output_dir parameter'
|
||||
'error': 'Example images path not configured in settings'
|
||||
}, status=400)
|
||||
|
||||
# Create the output directory
|
||||
@@ -530,19 +526,8 @@ class DownloadManager:
|
||||
"""Download example images for specific models only - synchronous version"""
|
||||
global download_progress
|
||||
|
||||
# Create independent download session
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=3,
|
||||
force_close=False,
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
|
||||
independent_session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=True,
|
||||
timeout=timeout
|
||||
)
|
||||
# Get unified downloader
|
||||
downloader = await get_downloader()
|
||||
|
||||
try:
|
||||
# Get scanners
|
||||
@@ -587,7 +572,7 @@ class DownloadManager:
|
||||
# Force process this model regardless of previous status
|
||||
was_successful = await DownloadManager._process_specific_model(
|
||||
scanner_type, model, scanner,
|
||||
output_dir, optimize, independent_session
|
||||
output_dir, optimize, downloader
|
||||
)
|
||||
|
||||
if was_successful:
|
||||
@@ -651,14 +636,11 @@ class DownloadManager:
|
||||
raise
|
||||
|
||||
finally:
|
||||
# Close the independent session
|
||||
try:
|
||||
await independent_session.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error closing download session: {e}")
|
||||
# No need to close any sessions since we use the global downloader
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
async def _process_specific_model(scanner_type, model, scanner, output_dir, optimize, independent_session):
|
||||
async def _process_specific_model(scanner_type, model, scanner, output_dir, optimize, downloader):
|
||||
"""Process a specific model for forced download, ignoring previous download status"""
|
||||
global download_progress
|
||||
|
||||
@@ -702,7 +684,7 @@ class DownloadManager:
|
||||
images = model.get('civitai', {}).get('images', [])
|
||||
|
||||
success, is_stale, failed_images = await ExampleImagesProcessor.download_model_images_with_tracking(
|
||||
model_hash, model_name, images, model_dir, optimize, independent_session
|
||||
model_hash, model_name, images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# If metadata is stale, try to refresh it
|
||||
@@ -720,7 +702,7 @@ class DownloadManager:
|
||||
# Retry download with updated metadata
|
||||
updated_images = updated_model.get('civitai', {}).get('images', [])
|
||||
success, _, additional_failed_images = await ExampleImagesProcessor.download_model_images_with_tracking(
|
||||
model_hash, model_name, updated_images, model_dir, optimize, independent_session
|
||||
model_hash, model_name, updated_images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# Combine failed images from both attempts
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
from aiohttp import web
|
||||
|
||||
@@ -53,7 +53,7 @@ class MetadataUpdater:
|
||||
async def update_cache_func(old_path, new_path, metadata):
|
||||
return await scanner.update_single_model_cache(old_path, new_path, metadata)
|
||||
|
||||
success = await ModelRouteUtils.fetch_and_update_model(
|
||||
success, error = await ModelRouteUtils.fetch_and_update_model(
|
||||
model_hash,
|
||||
file_path,
|
||||
model_data,
|
||||
@@ -64,7 +64,7 @@ class MetadataUpdater:
|
||||
logger.info(f"Successfully refreshed metadata for {model_name}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Failed to refresh metadata for {model_name}")
|
||||
logger.warning(f"Failed to refresh metadata for {model_name}, {error}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -23,19 +23,62 @@ class ExampleImagesProcessor:
|
||||
return ''.join(random.choice(chars) for _ in range(length))
|
||||
|
||||
@staticmethod
|
||||
def get_civitai_optimized_url(image_url):
|
||||
"""Convert Civitai image URL to its optimized WebP version"""
|
||||
def get_civitai_optimized_url(media_url):
|
||||
"""Convert Civitai media URL (image or video) to its optimized version"""
|
||||
base_pattern = r'(https://image\.civitai\.com/[^/]+/[^/]+)'
|
||||
match = re.match(base_pattern, image_url)
|
||||
match = re.match(base_pattern, media_url)
|
||||
|
||||
if match:
|
||||
base_url = match.group(1)
|
||||
return f"{base_url}/optimized=true/image.webp"
|
||||
return f"{base_url}/optimized=true"
|
||||
|
||||
return image_url
|
||||
return media_url
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, independent_session):
|
||||
def _get_file_extension_from_content_or_headers(content, headers, fallback_url=None):
|
||||
"""Determine file extension from content magic bytes or headers"""
|
||||
# Check magic bytes for common formats
|
||||
if content:
|
||||
if content.startswith(b'\xFF\xD8\xFF'):
|
||||
return '.jpg'
|
||||
elif content.startswith(b'\x89PNG\r\n\x1A\n'):
|
||||
return '.png'
|
||||
elif content.startswith(b'GIF87a') or content.startswith(b'GIF89a'):
|
||||
return '.gif'
|
||||
elif content.startswith(b'RIFF') and b'WEBP' in content[:12]:
|
||||
return '.webp'
|
||||
elif content.startswith(b'\x00\x00\x00\x18ftypmp4') or content.startswith(b'\x00\x00\x00\x20ftypmp4'):
|
||||
return '.mp4'
|
||||
elif content.startswith(b'\x1A\x45\xDF\xA3'):
|
||||
return '.webm'
|
||||
|
||||
# Check Content-Type header
|
||||
if headers:
|
||||
content_type = headers.get('content-type', '').lower()
|
||||
type_map = {
|
||||
'image/jpeg': '.jpg',
|
||||
'image/png': '.png',
|
||||
'image/gif': '.gif',
|
||||
'image/webp': '.webp',
|
||||
'video/mp4': '.mp4',
|
||||
'video/webm': '.webm',
|
||||
'video/quicktime': '.mov'
|
||||
}
|
||||
if content_type in type_map:
|
||||
return type_map[content_type]
|
||||
|
||||
# Fallback to URL extension if available
|
||||
if fallback_url:
|
||||
filename = os.path.basename(fallback_url.split('?')[0])
|
||||
ext = os.path.splitext(filename)[1].lower()
|
||||
if ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or ext in SUPPORTED_MEDIA_EXTENSIONS['videos']:
|
||||
return ext
|
||||
|
||||
# Default fallback
|
||||
return '.jpg'
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, downloader):
|
||||
"""Download images for a single model
|
||||
|
||||
Returns:
|
||||
@@ -48,53 +91,59 @@ class ExampleImagesProcessor:
|
||||
if not image_url:
|
||||
continue
|
||||
|
||||
# Get image filename from URL
|
||||
image_filename = os.path.basename(image_url.split('?')[0])
|
||||
image_ext = os.path.splitext(image_filename)[1].lower()
|
||||
|
||||
# Handle images and videos
|
||||
is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {image_filename}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing instead of 1-based indexing
|
||||
save_filename = f"image_{i}{image_ext}"
|
||||
|
||||
# If optimizing images and this is a Civitai image, use their pre-optimized WebP version
|
||||
if is_image and optimize and 'civitai.com' in image_url:
|
||||
# Apply optimization for Civitai URLs if enabled
|
||||
original_url = image_url
|
||||
if optimize and 'civitai.com' in image_url:
|
||||
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
|
||||
save_filename = f"image_{i}.webp"
|
||||
|
||||
# Check if already downloaded
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Download the file
|
||||
# Download the file first to determine the actual file type
|
||||
try:
|
||||
logger.debug(f"Downloading {save_filename} for {model_name}")
|
||||
logger.debug(f"Downloading media file {i} for {model_name}")
|
||||
|
||||
# Download directly using the independent session
|
||||
async with independent_session.get(image_url, timeout=60) as response:
|
||||
if response.status == 200:
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
elif response.status == 404:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True # (success, is_metadata_stale)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: {response.status}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
# Download using the unified downloader with headers
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
image_url,
|
||||
use_auth=False, # Example images don't need auth
|
||||
return_headers=True
|
||||
)
|
||||
|
||||
if success:
|
||||
# Determine file extension from content or headers
|
||||
media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
|
||||
content, headers, original_url
|
||||
)
|
||||
|
||||
# Check if the detected file type is supported
|
||||
is_image = media_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = media_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {media_ext}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing with the detected extension
|
||||
save_filename = f"image_{i}{media_ext}"
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
|
||||
# Check if already downloaded
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Save the file
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
elif "404" in str(content):
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True # (success, is_metadata_stale)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, error: {content}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
except Exception as e:
|
||||
error_msg = f"Error downloading file {image_url}: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
@@ -103,7 +152,7 @@ class ExampleImagesProcessor:
|
||||
return model_success, False # (success, is_metadata_stale)
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images_with_tracking(model_hash, model_name, model_images, model_dir, optimize, independent_session):
|
||||
async def download_model_images_with_tracking(model_hash, model_name, model_images, model_dir, optimize, downloader):
|
||||
"""Download images for a single model with tracking of failed image URLs
|
||||
|
||||
Returns:
|
||||
@@ -117,55 +166,61 @@ class ExampleImagesProcessor:
|
||||
if not image_url:
|
||||
continue
|
||||
|
||||
# Get image filename from URL
|
||||
image_filename = os.path.basename(image_url.split('?')[0])
|
||||
image_ext = os.path.splitext(image_filename)[1].lower()
|
||||
|
||||
# Handle images and videos
|
||||
is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {image_filename}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing instead of 1-based indexing
|
||||
save_filename = f"image_{i}{image_ext}"
|
||||
|
||||
# If optimizing images and this is a Civitai image, use their pre-optimized WebP version
|
||||
if is_image and optimize and 'civitai.com' in image_url:
|
||||
# Apply optimization for Civitai URLs if enabled
|
||||
original_url = image_url
|
||||
if optimize and 'civitai.com' in image_url:
|
||||
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
|
||||
save_filename = f"image_{i}.webp"
|
||||
|
||||
# Check if already downloaded
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Download the file
|
||||
# Download the file first to determine the actual file type
|
||||
try:
|
||||
logger.debug(f"Downloading {save_filename} for {model_name}")
|
||||
logger.debug(f"Downloading media file {i} for {model_name}")
|
||||
|
||||
# Download directly using the independent session
|
||||
async with independent_session.get(image_url, timeout=60) as response:
|
||||
if response.status == 200:
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
elif response.status == 404:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True, failed_images # (success, is_metadata_stale, failed_images)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: {response.status}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
# Download using the unified downloader with headers
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
image_url,
|
||||
use_auth=False, # Example images don't need auth
|
||||
return_headers=True
|
||||
)
|
||||
|
||||
if success:
|
||||
# Determine file extension from content or headers
|
||||
media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
|
||||
content, headers, original_url
|
||||
)
|
||||
|
||||
# Check if the detected file type is supported
|
||||
is_image = media_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = media_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {media_ext}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing with the detected extension
|
||||
save_filename = f"image_{i}{media_ext}"
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
|
||||
# Check if already downloaded
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Save the file
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
elif "404" in str(content):
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True, failed_images # (success, is_metadata_stale, failed_images)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, error: {content}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
except Exception as e:
|
||||
error_msg = f"Error downloading file {image_url}: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
@@ -565,4 +620,7 @@ class ExampleImagesProcessor:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
}, status=500)
|
||||
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ class BaseModelMetadata:
|
||||
civitai_deleted: bool = False # Whether deleted from Civitai
|
||||
favorite: bool = False # Whether the model is a favorite
|
||||
exclude: bool = False # Whether to exclude this model from the cache
|
||||
db_checked: bool = False # Whether checked in archive DB
|
||||
last_checked_at: float = 0 # Last checked timestamp
|
||||
_unknown_fields: Dict[str, Any] = field(default_factory=dict, repr=False, compare=False) # Store unknown fields
|
||||
|
||||
def __post_init__(self):
|
||||
|
||||
@@ -3,16 +3,18 @@ import json
|
||||
import logging
|
||||
from typing import Dict, List, Callable, Awaitable
|
||||
from aiohttp import web
|
||||
from datetime import datetime
|
||||
|
||||
from .model_utils import determine_base_model
|
||||
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
||||
from ..config import config
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.downloader import get_downloader
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from ..services.download_manager import DownloadManager
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.metadata_service import get_default_metadata_provider, get_metadata_provider
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -37,59 +39,72 @@ class ModelRouteUtils:
|
||||
local_metadata['from_civitai'] = False
|
||||
await MetadataManager.save_metadata(metadata_path, local_metadata)
|
||||
|
||||
@staticmethod
|
||||
def is_civitai_api_metadata(meta: dict) -> bool:
|
||||
"""
|
||||
Determine if the given civitai metadata is from the civitai API.
|
||||
Returns True if both 'files' and 'images' exist and are non-empty,
|
||||
and the 'source' is not 'archive_db'.
|
||||
"""
|
||||
if not isinstance(meta, dict):
|
||||
return False
|
||||
files = meta.get('files')
|
||||
images = meta.get('images')
|
||||
source = meta.get('source')
|
||||
return bool(files) and bool(images) and source != 'archive_db'
|
||||
|
||||
@staticmethod
|
||||
async def update_model_metadata(metadata_path: str, local_metadata: Dict,
|
||||
civitai_metadata: Dict, client: CivitaiClient) -> None:
|
||||
civitai_metadata: Dict, metadata_provider=None) -> None:
|
||||
"""Update local metadata with CivitAI data"""
|
||||
# Save existing trainedWords and customImages if they exist
|
||||
existing_civitai = local_metadata.get('civitai') or {} # Use empty dict if None
|
||||
|
||||
# Create a new civitai metadata by updating existing with new
|
||||
merged_civitai = existing_civitai.copy()
|
||||
merged_civitai.update(civitai_metadata)
|
||||
# Check if we should skip the update to avoid overwriting richer data
|
||||
if civitai_metadata.get('source') == 'archive_db' and ModelRouteUtils.is_civitai_api_metadata(existing_civitai):
|
||||
logger.info(f"Skip civitai update for {local_metadata.get('model_name', '')} ({existing_civitai.get('name', '')})")
|
||||
else:
|
||||
# Create a new civitai metadata by updating existing with new
|
||||
merged_civitai = existing_civitai.copy()
|
||||
merged_civitai.update(civitai_metadata)
|
||||
|
||||
# Special handling for trainedWords - ensure we don't lose any existing trained words
|
||||
if 'trainedWords' in existing_civitai:
|
||||
existing_trained_words = existing_civitai.get('trainedWords', [])
|
||||
new_trained_words = civitai_metadata.get('trainedWords', [])
|
||||
# Use a set to combine words without duplicates, then convert back to list
|
||||
merged_trained_words = list(set(existing_trained_words + new_trained_words))
|
||||
merged_civitai['trainedWords'] = merged_trained_words
|
||||
if civitai_metadata.get('source') == 'archive_db':
|
||||
model_name = civitai_metadata.get('model', {}).get('name', '')
|
||||
version_name = civitai_metadata.get('name', '')
|
||||
logger.info(f"Recovered metadata from archive_db for deleted model: {model_name} ({version_name})")
|
||||
|
||||
# Update local metadata with merged civitai data
|
||||
local_metadata['civitai'] = merged_civitai
|
||||
local_metadata['from_civitai'] = True
|
||||
# Special handling for trainedWords - ensure we don't lose any existing trained words
|
||||
if 'trainedWords' in existing_civitai:
|
||||
existing_trained_words = existing_civitai.get('trainedWords', [])
|
||||
new_trained_words = civitai_metadata.get('trainedWords', [])
|
||||
# Use a set to combine words without duplicates, then convert back to list
|
||||
merged_trained_words = list(set(existing_trained_words + new_trained_words))
|
||||
merged_civitai['trainedWords'] = merged_trained_words
|
||||
|
||||
# Update local metadata with merged civitai data
|
||||
local_metadata['civitai'] = merged_civitai
|
||||
|
||||
# Update model name if available
|
||||
if 'model' in civitai_metadata:
|
||||
if civitai_metadata.get('model', {}).get('name'):
|
||||
local_metadata['model_name'] = civitai_metadata['model']['name']
|
||||
|
||||
# Extract model metadata directly from civitai_metadata if available
|
||||
model_metadata = None
|
||||
# Update model-related metadata from civitai_metadata.model
|
||||
if 'model' in civitai_metadata and civitai_metadata['model']:
|
||||
model_data = civitai_metadata['model']
|
||||
|
||||
if 'model' in civitai_metadata and civitai_metadata.get('model'):
|
||||
# Data is already available in the response from get_model_version
|
||||
model_metadata = {
|
||||
'description': civitai_metadata.get('model', {}).get('description', ''),
|
||||
'tags': civitai_metadata.get('model', {}).get('tags', []),
|
||||
'creator': civitai_metadata.get('creator', {})
|
||||
}
|
||||
# Update model name if available and not already set
|
||||
if model_data.get('name'):
|
||||
local_metadata['model_name'] = model_data['name']
|
||||
|
||||
# If we have modelId and don't have enough metadata, fetch additional data
|
||||
if not model_metadata or not model_metadata.get('description'):
|
||||
model_id = civitai_metadata.get('modelId')
|
||||
if model_id:
|
||||
fetched_metadata, _ = await client.get_model_metadata(str(model_id))
|
||||
if fetched_metadata:
|
||||
model_metadata = fetched_metadata
|
||||
# Update modelDescription if missing or empty in local_metadata
|
||||
if not local_metadata.get('modelDescription') and model_data.get('description'):
|
||||
local_metadata['modelDescription'] = model_data['description']
|
||||
|
||||
# Update local metadata with the model information
|
||||
if model_metadata:
|
||||
local_metadata['modelDescription'] = model_metadata.get('description', '')
|
||||
local_metadata['tags'] = model_metadata.get('tags', [])
|
||||
if 'creator' in model_metadata and model_metadata['creator']:
|
||||
local_metadata['civitai']['creator'] = model_metadata['creator']
|
||||
# Update tags if missing or empty in local_metadata
|
||||
if not local_metadata.get('tags') and model_data.get('tags'):
|
||||
local_metadata['tags'] = model_data['tags']
|
||||
|
||||
# Update creator in civitai metadata if missing
|
||||
if model_data.get('creator') and not local_metadata.get('civitai', {}).get('creator'):
|
||||
if 'civitai' not in local_metadata:
|
||||
local_metadata['civitai'] = {}
|
||||
local_metadata['civitai']['creator'] = model_data['creator']
|
||||
|
||||
# Update base model
|
||||
local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
|
||||
@@ -113,22 +128,28 @@ class ModelRouteUtils:
|
||||
preview_path = os.path.join(os.path.dirname(metadata_path), preview_filename)
|
||||
|
||||
if is_video:
|
||||
# Download video as is
|
||||
if await client.download_preview_image(first_preview['url'], preview_path):
|
||||
# Download video as is using downloader
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.download_file(
|
||||
first_preview['url'],
|
||||
preview_path,
|
||||
use_auth=False
|
||||
)
|
||||
if success:
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
else:
|
||||
# For images, download and then optimize to WebP
|
||||
temp_path = preview_path + ".temp"
|
||||
if await client.download_preview_image(first_preview['url'], temp_path):
|
||||
# For images, download and then optimize to WebP using downloader
|
||||
downloader = await get_downloader()
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
first_preview['url'],
|
||||
use_auth=False
|
||||
)
|
||||
if success:
|
||||
try:
|
||||
# Read the downloaded image
|
||||
with open(temp_path, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
# Optimize and convert to WebP
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=image_data,
|
||||
image_data=content, # Use downloaded content directly
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
@@ -143,17 +164,16 @@ class ModelRouteUtils:
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
|
||||
# Remove the temporary file
|
||||
if os.path.exists(temp_path):
|
||||
os.remove(temp_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing preview image: {e}")
|
||||
# If optimization fails, try to use the downloaded image directly
|
||||
if os.path.exists(temp_path):
|
||||
os.rename(temp_path, preview_path)
|
||||
# If optimization fails, save the original content
|
||||
try:
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(content)
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
except Exception as save_error:
|
||||
logger.error(f"Error saving preview image: {save_error}")
|
||||
|
||||
# Save updated metadata
|
||||
await MetadataManager.save_metadata(metadata_path, local_metadata)
|
||||
@@ -164,7 +184,7 @@ class ModelRouteUtils:
|
||||
file_path: str,
|
||||
model_data: dict,
|
||||
update_cache_func: Callable[[str, str, Dict], Awaitable[bool]]
|
||||
) -> bool:
|
||||
) -> tuple[bool, str]:
|
||||
"""Fetch and update metadata for a single model
|
||||
|
||||
Args:
|
||||
@@ -174,59 +194,82 @@ class ModelRouteUtils:
|
||||
update_cache_func: Function to update the cache with new metadata
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
tuple[bool, str]: (success, error_message). When success is True, error_message is None.
|
||||
"""
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
# Validate input parameters
|
||||
if not isinstance(model_data, dict):
|
||||
logger.error(f"Invalid model_data type: {type(model_data)}")
|
||||
return False
|
||||
error_msg = f"Invalid model_data type: {type(model_data)}"
|
||||
logger.error(error_msg)
|
||||
return False, error_msg
|
||||
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Check if model metadata exists
|
||||
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
enable_metadata_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
# Fetch metadata from Civitai
|
||||
civitai_metadata = await client.get_model_by_hash(sha256)
|
||||
if model_data.get('civitai_deleted') is True:
|
||||
# If CivitAI deleted flag is set, skip CivitAI API provider
|
||||
if not enable_metadata_archive_db or model_data.get('db_checked') is True:
|
||||
return False, "CivitAI model is deleted and metadata archive DB is not enabled"
|
||||
# Likely deleted from CivitAI, use archive_db if available
|
||||
metadata_provider = await get_metadata_provider('sqlite')
|
||||
else:
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
civitai_metadata, error = await metadata_provider.get_model_by_hash(sha256)
|
||||
if not civitai_metadata:
|
||||
# Mark as not from CivitAI if not found
|
||||
local_metadata['from_civitai'] = False
|
||||
model_data['from_civitai'] = False
|
||||
await MetadataManager.save_metadata(file_path, local_metadata)
|
||||
return False
|
||||
if error == "Model not found":
|
||||
model_data['from_civitai'] = False
|
||||
model_data['civitai_deleted'] = True
|
||||
model_data['db_checked'] = enable_metadata_archive_db
|
||||
model_data['last_checked_at'] = datetime.now().timestamp()
|
||||
|
||||
# Remove 'folder' key from model_data if present before saving
|
||||
data_to_save = model_data.copy()
|
||||
data_to_save.pop('folder', None)
|
||||
await MetadataManager.save_metadata(file_path, data_to_save)
|
||||
|
||||
# For other errors, log and return False with error message
|
||||
error_msg = f"Error fetching metadata: {error} (model_name={model_data.get('model_name', '')})"
|
||||
logger.error(error_msg)
|
||||
return False, error_msg
|
||||
|
||||
model_data['from_civitai'] = True
|
||||
model_data['civitai_deleted'] = civitai_metadata.get('source') == 'archive_db'
|
||||
model_data['db_checked'] = enable_metadata_archive_db
|
||||
model_data['last_checked_at'] = datetime.now().timestamp()
|
||||
|
||||
local_metadata = model_data.copy()
|
||||
local_metadata.pop('folder', None) # Remove 'folder' key if present
|
||||
|
||||
# Update metadata
|
||||
await ModelRouteUtils.update_model_metadata(
|
||||
metadata_path,
|
||||
local_metadata,
|
||||
civitai_metadata,
|
||||
client
|
||||
metadata_provider
|
||||
)
|
||||
|
||||
# Update cache object directly using safe .get() method
|
||||
update_dict = {
|
||||
'model_name': local_metadata.get('model_name'),
|
||||
'preview_url': local_metadata.get('preview_url'),
|
||||
'from_civitai': True,
|
||||
'civitai': civitai_metadata
|
||||
'civitai': local_metadata.get('civitai'),
|
||||
}
|
||||
model_data.update(update_dict)
|
||||
|
||||
# Update cache using the provided function
|
||||
await update_cache_func(file_path, file_path, local_metadata)
|
||||
|
||||
return True
|
||||
return True, None
|
||||
|
||||
except KeyError as e:
|
||||
logger.error(f"Error fetching CivitAI data - Missing key: {e} in model_data={model_data}")
|
||||
return False
|
||||
error_msg = f"Error fetching metadata - Missing key: {e} in model_data={model_data}"
|
||||
logger.error(error_msg)
|
||||
return False, error_msg
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivitAI data: {str(e)}", exc_info=True) # Include stack trace
|
||||
return False
|
||||
finally:
|
||||
await client.close()
|
||||
error_msg = f"Error fetching metadata: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True) # Include stack trace
|
||||
return False, error_msg
|
||||
|
||||
@staticmethod
|
||||
def filter_civitai_data(data: Dict, minimal: bool = False) -> Dict:
|
||||
@@ -359,24 +402,22 @@ class ModelRouteUtils:
|
||||
if not local_metadata or not local_metadata.get('sha256'):
|
||||
return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
|
||||
|
||||
# Create a client for fetching from Civitai
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
# Fetch and update metadata
|
||||
civitai_metadata = await client.get_model_by_hash(local_metadata["sha256"])
|
||||
if not civitai_metadata:
|
||||
await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
|
||||
return web.json_response({"success": False, "error": "Not found on CivitAI"}, status=404)
|
||||
# Get metadata provider and fetch from unified provider
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Fetch and update metadata
|
||||
civitai_metadata, error = await metadata_provider.get_model_by_hash(local_metadata["sha256"])
|
||||
if not civitai_metadata:
|
||||
await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
|
||||
return web.json_response({"success": False, "error": error}, status=404)
|
||||
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
|
||||
|
||||
# Return the updated metadata along with success status
|
||||
return web.json_response({"success": True, "metadata": local_metadata})
|
||||
finally:
|
||||
await client.close()
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, metadata_provider)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
|
||||
|
||||
# Return the updated metadata along with success status
|
||||
return web.json_response({"success": True, "metadata": local_metadata})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
|
||||
@@ -610,6 +651,7 @@ class ModelRouteUtils:
|
||||
}, status=400)
|
||||
|
||||
use_default_paths = data.get('use_default_paths', False)
|
||||
source = data.get('source') # Optional source parameter
|
||||
|
||||
# Pass the download_id to download_from_civitai
|
||||
result = await download_manager.download_from_civitai(
|
||||
@@ -619,7 +661,8 @@ class ModelRouteUtils:
|
||||
relative_path=data.get('relative_path', ''),
|
||||
use_default_paths=use_default_paths,
|
||||
progress_callback=progress_callback,
|
||||
download_id=download_id # Pass download_id explicitly
|
||||
download_id=download_id, # Pass download_id explicitly
|
||||
source=source # Pass source parameter
|
||||
)
|
||||
|
||||
# Include download_id in the response
|
||||
@@ -777,43 +820,38 @@ class ModelRouteUtils:
|
||||
# Check if model metadata exists
|
||||
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Create a client for fetching from Civitai
|
||||
client = await CivitaiClient.get_instance()
|
||||
try:
|
||||
# Fetch metadata using get_model_version which includes more comprehensive data
|
||||
civitai_metadata = await client.get_model_version(model_id, model_version_id)
|
||||
if not civitai_metadata:
|
||||
error_msg = f"Model version not found on CivitAI for ID: {model_id}"
|
||||
if model_version_id:
|
||||
error_msg += f" with version: {model_version_id}"
|
||||
return web.json_response({"success": False, "error": error_msg}, status=404)
|
||||
|
||||
# Try to find the primary model file to get the SHA256 hash
|
||||
primary_model_file = None
|
||||
for file in civitai_metadata.get('files', []):
|
||||
if file.get('primary', False) and file.get('type') == 'Model':
|
||||
primary_model_file = file
|
||||
break
|
||||
|
||||
# Update the SHA256 hash in local metadata if available
|
||||
if primary_model_file and primary_model_file.get('hashes', {}).get('SHA256'):
|
||||
local_metadata['sha256'] = primary_model_file['hashes']['SHA256'].lower()
|
||||
|
||||
# Update metadata with CivitAI information
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, local_metadata)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"message": f"Model successfully re-linked to Civitai model {model_id}" +
|
||||
(f" version {model_version_id}" if model_version_id else ""),
|
||||
"hash": local_metadata.get('sha256', '')
|
||||
})
|
||||
|
||||
finally:
|
||||
await client.close()
|
||||
# Get metadata provider and fetch metadata using get_model_version which includes more comprehensive data
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
civitai_metadata = await metadata_provider.get_model_version(model_id, model_version_id)
|
||||
if not civitai_metadata:
|
||||
error_msg = f"Model version not found on CivitAI for ID: {model_id}"
|
||||
if model_version_id:
|
||||
error_msg += f" with version: {model_version_id}"
|
||||
return web.json_response({"success": False, "error": error_msg}, status=404)
|
||||
|
||||
# Try to find the primary model file to get the SHA256 hash
|
||||
primary_model_file = None
|
||||
for file in civitai_metadata.get('files', []):
|
||||
if file.get('primary', False) and file.get('type') == 'Model':
|
||||
primary_model_file = file
|
||||
break
|
||||
|
||||
# Update the SHA256 hash in local metadata if available
|
||||
if primary_model_file and primary_model_file.get('hashes', {}).get('SHA256'):
|
||||
local_metadata['sha256'] = primary_model_file['hashes']['SHA256'].lower()
|
||||
|
||||
# Update metadata with CivitAI information
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, metadata_provider)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, local_metadata)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"message": f"Model successfully re-linked to Civitai model {model_id}" +
|
||||
(f" version {model_version_id}" if model_version_id else ""),
|
||||
"hash": local_metadata.get('sha256', '')
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error re-linking to CivitAI: {e}", exc_info=True)
|
||||
@@ -870,11 +908,11 @@ class ModelRouteUtils:
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Compare hashes
|
||||
stored_hash = metadata.get('sha256', '').lower()
|
||||
stored_hash = metadata.get('sha256', '').lower();
|
||||
|
||||
# Set expected hash from first file if not yet set
|
||||
if not expected_hash:
|
||||
expected_hash = stored_hash
|
||||
expected_hash = stored_hash;
|
||||
|
||||
# Check if hash matches expected hash
|
||||
if actual_hash != expected_hash:
|
||||
@@ -978,7 +1016,7 @@ class ModelRouteUtils:
|
||||
if os.path.exists(metadata_path):
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
hash_value = metadata.get('sha256')
|
||||
|
||||
logger.info(f"hash_value: {hash_value}, metadata_path: {metadata_path}, metadata: {metadata}")
|
||||
# Rename all files
|
||||
renamed_files = []
|
||||
new_metadata_path = None
|
||||
@@ -1093,3 +1131,63 @@ class ModelRouteUtils:
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metadata: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
@staticmethod
|
||||
async def handle_add_tags(request: web.Request, scanner) -> web.Response:
|
||||
"""Handle adding tags to model metadata
|
||||
|
||||
Args:
|
||||
request: The aiohttp request
|
||||
scanner: The model scanner instance
|
||||
|
||||
Returns:
|
||||
web.Response: The HTTP response
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
new_tags = data.get('tags', [])
|
||||
|
||||
if not file_path:
|
||||
return web.Response(text='File path is required', status=400)
|
||||
|
||||
if not isinstance(new_tags, list):
|
||||
return web.Response(text='Tags must be a list', status=400)
|
||||
|
||||
# Get metadata file path
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Load existing metadata
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Get existing tags (case insensitive)
|
||||
existing_tags = metadata.get('tags', [])
|
||||
existing_tags_lower = [tag.lower() for tag in existing_tags]
|
||||
|
||||
# Add new tags that don't already exist (case insensitive check)
|
||||
tags_added = []
|
||||
for tag in new_tags:
|
||||
if isinstance(tag, str) and tag.strip():
|
||||
tag_stripped = tag.strip()
|
||||
if tag_stripped.lower() not in existing_tags_lower:
|
||||
existing_tags.append(tag_stripped)
|
||||
existing_tags_lower.append(tag_stripped.lower())
|
||||
tags_added.append(tag_stripped)
|
||||
|
||||
# Update metadata with combined tags
|
||||
metadata['tags'] = existing_tags
|
||||
|
||||
# Save updated metadata
|
||||
await MetadataManager.save_metadata(file_path, metadata)
|
||||
|
||||
# Update cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'tags': existing_tags
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding tags: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
@@ -62,13 +62,14 @@ class UsageStats:
|
||||
self._bg_task = asyncio.create_task(self._background_processor())
|
||||
|
||||
self._initialized = True
|
||||
logger.info("Usage statistics tracker initialized")
|
||||
logger.debug("Usage statistics tracker initialized")
|
||||
|
||||
def _get_stats_file_path(self) -> str:
|
||||
"""Get the path to the stats JSON file"""
|
||||
if not config.loras_roots or len(config.loras_roots) == 0:
|
||||
# Fallback to temporary directory if no lora roots
|
||||
return os.path.join(config.temp_directory, self.STATS_FILENAME)
|
||||
# If no lora roots are available, we can't save stats
|
||||
# This will be handled by the caller
|
||||
raise RuntimeError("No LoRA root directories configured. Cannot initialize usage statistics.")
|
||||
|
||||
# Use the first lora root
|
||||
return os.path.join(config.loras_roots[0], self.STATS_FILENAME)
|
||||
@@ -163,7 +164,7 @@ class UsageStats:
|
||||
if "last_save_time" in loaded_stats:
|
||||
self.stats["last_save_time"] = loaded_stats["last_save_time"]
|
||||
|
||||
logger.info(f"Loaded usage statistics from {self._stats_file_path}")
|
||||
logger.debug(f"Loaded usage statistics from {self._stats_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading usage statistics: {e}")
|
||||
|
||||
|
||||
@@ -154,7 +154,7 @@ def calculate_relative_path_for_model(model_data: Dict, model_type: str = 'lora'
|
||||
|
||||
# For CivitAI models, prefer civitai data only if 'id' exists; for non-CivitAI models, use model_data directly
|
||||
if civitai_data and civitai_data.get('id') is not None:
|
||||
base_model = civitai_data.get('baseModel', '')
|
||||
base_model = model_data.get('base_model', '')
|
||||
# Get author from civitai creator data
|
||||
creator_info = civitai_data.get('creator') or {}
|
||||
author = creator_info.get('username') or 'Anonymous'
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "comfyui-lora-manager"
|
||||
description = "Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!"
|
||||
version = "0.9.0"
|
||||
version = "0.9.4"
|
||||
license = {file = "LICENSE"}
|
||||
dependencies = [
|
||||
"aiohttp",
|
||||
@@ -12,7 +12,8 @@ dependencies = [
|
||||
"olefile", # for getting rid of warning message
|
||||
"toml",
|
||||
"natsort",
|
||||
"GitPython"
|
||||
"GitPython",
|
||||
"aiosqlite"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
38
refs/civitai.sql
Normal file
38
refs/civitai.sql
Normal file
@@ -0,0 +1,38 @@
|
||||
CREATE TABLE models (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
username TEXT,
|
||||
data TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
) STRICT;
|
||||
CREATE TABLE model_versions (
|
||||
id INTEGER PRIMARY KEY,
|
||||
model_id INTEGER NOT NULL,
|
||||
position INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
base_model TEXT NOT NULL,
|
||||
published_at INTEGER,
|
||||
data TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
) STRICT;
|
||||
CREATE INDEX model_versions_model_id_idx ON model_versions (model_id);
|
||||
CREATE TABLE model_files (
|
||||
id INTEGER PRIMARY KEY,
|
||||
model_id INTEGER NOT NULL,
|
||||
version_id INTEGER NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
sha256 TEXT,
|
||||
data TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
) STRICT;
|
||||
CREATE INDEX model_files_model_id_idx ON model_files (model_id);
|
||||
CREATE INDEX model_files_version_id_idx ON model_files (version_id);
|
||||
CREATE TABLE archived_model_files (
|
||||
file_id INTEGER PRIMARY KEY,
|
||||
model_id INTEGER NOT NULL,
|
||||
version_id INTEGER NOT NULL
|
||||
) STRICT;
|
||||
@@ -8,3 +8,5 @@ toml
|
||||
numpy
|
||||
natsort
|
||||
GitPython
|
||||
aiosqlite
|
||||
beautifulsoup4
|
||||
|
||||
305
scripts/sync_translation_keys.py
Normal file
305
scripts/sync_translation_keys.py
Normal file
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Translation Key Synchronization Script
|
||||
|
||||
This script synchronizes new translation keys from en.json to all other locale files
|
||||
while maintaining exact formatting consistency to pass test_i18n.py validation.
|
||||
|
||||
Features:
|
||||
- Preserves exact line-by-line formatting
|
||||
- Maintains proper indentation and structure
|
||||
- Adds missing keys with placeholder translations
|
||||
- Handles nested objects correctly
|
||||
- Ensures all locale files have identical structure
|
||||
|
||||
Usage:
|
||||
python scripts/sync_translation_keys.py [--dry-run] [--verbose]
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
import argparse
|
||||
from typing import Dict, List, Set, Tuple, Any, Optional
|
||||
from collections import OrderedDict
|
||||
|
||||
# Add the parent directory to the path so we can import modules if needed
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
class TranslationKeySynchronizer:
|
||||
"""Synchronizes translation keys across locale files while maintaining formatting."""
|
||||
|
||||
def __init__(self, locales_dir: str, verbose: bool = False):
|
||||
self.locales_dir = locales_dir
|
||||
self.verbose = verbose
|
||||
self.reference_locale = 'en'
|
||||
self.target_locales = ['zh-CN', 'zh-TW', 'ja', 'ru', 'de', 'fr', 'es', 'ko']
|
||||
|
||||
def log(self, message: str, level: str = 'INFO'):
|
||||
"""Log a message if verbose mode is enabled."""
|
||||
if self.verbose or level == 'ERROR':
|
||||
print(f"[{level}] {message}")
|
||||
|
||||
def load_json_preserve_order(self, file_path: str) -> Tuple[Dict[str, Any], List[str]]:
|
||||
"""
|
||||
Load a JSON file preserving the exact order and formatting.
|
||||
Returns both the parsed data and the original lines.
|
||||
"""
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
content = ''.join(lines)
|
||||
|
||||
# Parse JSON while preserving order
|
||||
data = json.loads(content, object_pairs_hook=OrderedDict)
|
||||
return data, lines
|
||||
|
||||
def get_all_leaf_keys(self, data: Any, prefix: str = '') -> Dict[str, Any]:
|
||||
"""
|
||||
Extract all leaf keys (non-object values) with their full paths.
|
||||
Returns a dictionary mapping full key paths to their values.
|
||||
"""
|
||||
keys = {}
|
||||
|
||||
if isinstance(data, (dict, OrderedDict)):
|
||||
for key, value in data.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, (dict, OrderedDict)):
|
||||
# Recursively get nested keys
|
||||
keys.update(self.get_all_leaf_keys(value, full_key))
|
||||
else:
|
||||
# Leaf node - actual translatable value
|
||||
keys[full_key] = value
|
||||
|
||||
return keys
|
||||
|
||||
def merge_json_structures(self, reference_data: Dict[str, Any], target_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Merge the reference JSON structure with existing target translations.
|
||||
This creates a new structure that matches the reference exactly but preserves
|
||||
existing translations where available. Keys not in reference are removed.
|
||||
"""
|
||||
def merge_recursive(ref_obj, target_obj):
|
||||
if isinstance(ref_obj, (dict, OrderedDict)):
|
||||
result = OrderedDict()
|
||||
# Only include keys that exist in the reference
|
||||
for key, ref_value in ref_obj.items():
|
||||
if key in target_obj and isinstance(target_obj[key], type(ref_value)):
|
||||
# Key exists in target with same type
|
||||
if isinstance(ref_value, (dict, OrderedDict)):
|
||||
# Recursively merge nested objects
|
||||
result[key] = merge_recursive(ref_value, target_obj[key])
|
||||
else:
|
||||
# Use existing translation
|
||||
result[key] = target_obj[key]
|
||||
else:
|
||||
# Key missing in target or type mismatch
|
||||
if isinstance(ref_value, (dict, OrderedDict)):
|
||||
# Recursively handle nested objects
|
||||
result[key] = merge_recursive(ref_value, {})
|
||||
else:
|
||||
# Create placeholder translation
|
||||
result[key] = f"[TODO: Translate] {ref_value}"
|
||||
return result
|
||||
else:
|
||||
# For non-dict values, use reference (this shouldn't happen at root level)
|
||||
return ref_obj
|
||||
|
||||
return merge_recursive(reference_data, target_data)
|
||||
|
||||
def format_json_like_reference(self, data: Dict[str, Any], reference_lines: List[str]) -> List[str]:
|
||||
"""
|
||||
Format the merged JSON data to match the reference file's formatting exactly.
|
||||
"""
|
||||
# Use json.dumps with proper formatting to match the reference style
|
||||
formatted_json = json.dumps(data, indent=4, ensure_ascii=False, separators=(',', ': '))
|
||||
|
||||
# Split into lines and ensure consistent line endings
|
||||
formatted_lines = [line + '\n' for line in formatted_json.split('\n')]
|
||||
|
||||
# Make sure the last line doesn't have extra newlines
|
||||
if formatted_lines and formatted_lines[-1].strip() == '':
|
||||
formatted_lines = formatted_lines[:-1]
|
||||
|
||||
# Ensure the last line ends with just a newline
|
||||
if formatted_lines and not formatted_lines[-1].endswith('\n'):
|
||||
formatted_lines[-1] += '\n'
|
||||
|
||||
return formatted_lines
|
||||
|
||||
def synchronize_locale_simple(self, locale: str, reference_data: Dict[str, Any],
|
||||
reference_lines: List[str], dry_run: bool = False) -> bool:
|
||||
"""
|
||||
Synchronize a locale file using JSON structure merging.
|
||||
Handles both addition of missing keys and removal of obsolete keys.
|
||||
"""
|
||||
locale_file = os.path.join(self.locales_dir, f'{locale}.json')
|
||||
|
||||
if not os.path.exists(locale_file):
|
||||
self.log(f"Locale file {locale_file} does not exist!", 'ERROR')
|
||||
return False
|
||||
|
||||
try:
|
||||
target_data, _ = self.load_json_preserve_order(locale_file)
|
||||
except Exception as e:
|
||||
self.log(f"Error loading {locale_file}: {e}", 'ERROR')
|
||||
return False
|
||||
|
||||
# Get keys to check for differences
|
||||
ref_keys = self.get_all_leaf_keys(reference_data)
|
||||
target_keys = self.get_all_leaf_keys(target_data)
|
||||
missing_keys = set(ref_keys.keys()) - set(target_keys.keys())
|
||||
obsolete_keys = set(target_keys.keys()) - set(ref_keys.keys())
|
||||
|
||||
if not missing_keys and not obsolete_keys:
|
||||
self.log(f"Locale {locale} is already up to date")
|
||||
return False
|
||||
|
||||
# Report changes
|
||||
if missing_keys:
|
||||
self.log(f"Found {len(missing_keys)} missing keys in {locale}:")
|
||||
for key in sorted(missing_keys):
|
||||
self.log(f" + {key}")
|
||||
|
||||
if obsolete_keys:
|
||||
self.log(f"Found {len(obsolete_keys)} obsolete keys in {locale}:")
|
||||
for key in sorted(obsolete_keys):
|
||||
self.log(f" - {key}")
|
||||
|
||||
if dry_run:
|
||||
total_changes = len(missing_keys) + len(obsolete_keys)
|
||||
self.log(f"DRY RUN: Would update {locale} with {len(missing_keys)} additions and {len(obsolete_keys)} deletions ({total_changes} total changes)")
|
||||
return True
|
||||
|
||||
# Merge the structures (this will both add missing keys and remove obsolete ones)
|
||||
try:
|
||||
merged_data = self.merge_json_structures(reference_data, target_data)
|
||||
|
||||
# Format to match reference style
|
||||
new_lines = self.format_json_like_reference(merged_data, reference_lines)
|
||||
|
||||
# Validate that the result is valid JSON
|
||||
reconstructed_content = ''.join(new_lines)
|
||||
json.loads(reconstructed_content) # This will raise an exception if invalid
|
||||
|
||||
# Write the updated file
|
||||
with open(locale_file, 'w', encoding='utf-8') as f:
|
||||
f.writelines(new_lines)
|
||||
|
||||
total_changes = len(missing_keys) + len(obsolete_keys)
|
||||
self.log(f"Successfully updated {locale} with {len(missing_keys)} additions and {len(obsolete_keys)} deletions ({total_changes} total changes)")
|
||||
return True
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
self.log(f"Generated invalid JSON for {locale}: {e}", 'ERROR')
|
||||
return False
|
||||
except Exception as e:
|
||||
self.log(f"Error updating {locale_file}: {e}", 'ERROR')
|
||||
return False
|
||||
|
||||
def synchronize_all(self, dry_run: bool = False) -> bool:
|
||||
"""
|
||||
Synchronize all locale files with the reference.
|
||||
Returns True if all operations were successful.
|
||||
"""
|
||||
# Load reference file
|
||||
reference_file = os.path.join(self.locales_dir, f'{self.reference_locale}.json')
|
||||
|
||||
if not os.path.exists(reference_file):
|
||||
self.log(f"Reference file {reference_file} does not exist!", 'ERROR')
|
||||
return False
|
||||
|
||||
try:
|
||||
reference_data, reference_lines = self.load_json_preserve_order(reference_file)
|
||||
reference_keys = self.get_all_leaf_keys(reference_data)
|
||||
except Exception as e:
|
||||
self.log(f"Error loading reference file: {e}", 'ERROR')
|
||||
return False
|
||||
|
||||
self.log(f"Loaded reference file with {len(reference_keys)} keys")
|
||||
|
||||
success = True
|
||||
changes_made = False
|
||||
|
||||
# Synchronize each target locale
|
||||
for locale in self.target_locales:
|
||||
try:
|
||||
if self.synchronize_locale_simple(locale, reference_data, reference_lines, dry_run):
|
||||
changes_made = True
|
||||
except Exception as e:
|
||||
self.log(f"Error synchronizing {locale}: {e}", 'ERROR')
|
||||
success = False
|
||||
|
||||
if changes_made:
|
||||
self.log("Synchronization completed with changes")
|
||||
else:
|
||||
self.log("All locale files are already up to date")
|
||||
|
||||
return success
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Synchronize translation keys from en.json to all other locale files'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
help='Show what would be changed without making actual changes'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
action='store_true',
|
||||
help='Enable verbose output'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--locales-dir',
|
||||
default=None,
|
||||
help='Path to locales directory (default: auto-detect from script location)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine locales directory
|
||||
if args.locales_dir:
|
||||
locales_dir = args.locales_dir
|
||||
else:
|
||||
# Auto-detect based on script location
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
locales_dir = os.path.join(os.path.dirname(script_dir), 'locales')
|
||||
|
||||
if not os.path.exists(locales_dir):
|
||||
print(f"ERROR: Locales directory not found: {locales_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Translation Key Synchronization")
|
||||
print(f"Locales directory: {locales_dir}")
|
||||
print(f"Mode: {'DRY RUN' if args.dry_run else 'LIVE UPDATE'}")
|
||||
print("-" * 50)
|
||||
|
||||
# Create synchronizer and run
|
||||
synchronizer = TranslationKeySynchronizer(locales_dir, args.verbose)
|
||||
|
||||
try:
|
||||
success = synchronizer.synchronize_all(args.dry_run)
|
||||
|
||||
if success:
|
||||
print("\n✅ Synchronization completed successfully!")
|
||||
if not args.dry_run:
|
||||
print("💡 Run 'python test_i18n.py' to verify formatting consistency")
|
||||
else:
|
||||
print("\n❌ Synchronization completed with errors!")
|
||||
sys.exit(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n⚠️ Operation cancelled by user")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"\n❌ Unexpected error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"civitai_api_key": "your_civitai_api_key_here",
|
||||
"show_only_sfw": false,
|
||||
"folder_paths": {
|
||||
"loras": [
|
||||
"C:/path/to/your/loras_folder",
|
||||
|
||||
@@ -2,6 +2,7 @@ from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from py.middleware.cache_middleware import cache_control
|
||||
|
||||
# Create mock modules for py/nodes directory - add this before any other imports
|
||||
def mock_nodes_directory():
|
||||
@@ -129,7 +130,7 @@ class StandaloneServer:
|
||||
"""Server implementation for standalone mode"""
|
||||
|
||||
def __init__(self):
|
||||
self.app = web.Application(logger=logger)
|
||||
self.app = web.Application(logger=logger, middlewares=[cache_control])
|
||||
self.instance = self # Make it compatible with PromptServer.instance pattern
|
||||
|
||||
# Ensure the app's access logger is configured to reduce verbosity
|
||||
@@ -213,6 +214,54 @@ class StandaloneServer:
|
||||
# After all mocks are in place, import LoraManager
|
||||
from py.lora_manager import LoraManager
|
||||
|
||||
def validate_settings():
|
||||
"""Validate that settings.json exists and has required configuration"""
|
||||
settings_path = os.path.join(os.path.dirname(__file__), 'settings.json')
|
||||
if not os.path.exists(settings_path):
|
||||
logger.error("=" * 80)
|
||||
logger.error("CONFIGURATION ERROR: settings.json file not found!")
|
||||
logger.error("")
|
||||
logger.error("To run in standalone mode, you need to create a settings.json file.")
|
||||
logger.error("Please follow these steps:")
|
||||
logger.error("")
|
||||
logger.error("1. Copy the provided settings.json.example file to create a new file")
|
||||
logger.error(" named settings.json in the comfyui-lora-manager folder")
|
||||
logger.error("")
|
||||
logger.error("2. Edit settings.json to include your correct model folder paths")
|
||||
logger.error(" and CivitAI API key")
|
||||
logger.error("=" * 80)
|
||||
return False
|
||||
|
||||
# Check if settings.json has valid folder paths
|
||||
try:
|
||||
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||
settings = json.load(f)
|
||||
|
||||
folder_paths = settings.get('folder_paths', {})
|
||||
has_valid_paths = False
|
||||
|
||||
for path_type in ['loras', 'checkpoints', 'embeddings']:
|
||||
paths = folder_paths.get(path_type, [])
|
||||
if paths and any(os.path.exists(p) for p in paths):
|
||||
has_valid_paths = True
|
||||
break
|
||||
|
||||
if not has_valid_paths:
|
||||
logger.warning("=" * 80)
|
||||
logger.warning("CONFIGURATION WARNING: No valid model folder paths found!")
|
||||
logger.warning("")
|
||||
logger.warning("Your settings.json exists but doesn't contain valid folder paths.")
|
||||
logger.warning("Please check and update the folder_paths section in settings.json")
|
||||
logger.warning("to include existing directories for your models.")
|
||||
logger.warning("=" * 80)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading settings.json: {e}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
class StandaloneLoraManager(LoraManager):
|
||||
"""Extended LoraManager for standalone mode"""
|
||||
|
||||
@@ -405,6 +454,12 @@ async def main():
|
||||
# Set log level
|
||||
logging.getLogger().setLevel(getattr(logging, args.log_level))
|
||||
|
||||
# Validate settings before proceeding
|
||||
if not validate_settings():
|
||||
logger.error("Cannot start server due to configuration issues.")
|
||||
logger.error("Please fix the settings.json file and try again.")
|
||||
return
|
||||
|
||||
# Create the server instance
|
||||
server = StandaloneServer()
|
||||
|
||||
|
||||
@@ -1,81 +1,3 @@
|
||||
/* Bulk Operations Styles */
|
||||
.bulk-operations-panel {
|
||||
position: fixed;
|
||||
bottom: 20px;
|
||||
left: 50%;
|
||||
transform: translateY(100px) translateX(-50%);
|
||||
background: var(--card-bg);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-base);
|
||||
padding: 12px 16px;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
||||
z-index: var(--z-overlay);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-width: 420px;
|
||||
max-width: 900px;
|
||||
width: auto;
|
||||
transition: all 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275);
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.bulk-operations-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 12px;
|
||||
gap: 20px; /* Increase space between count and buttons */
|
||||
}
|
||||
|
||||
#selectedCount {
|
||||
font-weight: 500;
|
||||
background: var(--bg-color);
|
||||
padding: 6px 12px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
min-width: 80px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.bulk-operations-actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.bulk-operations-actions button {
|
||||
padding: 6px 12px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
border: 1px solid var(--border-color);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
white-space: nowrap;
|
||||
min-height: 36px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.bulk-operations-actions button:hover {
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Danger button style - updated to use proper theme variables */
|
||||
.bulk-operations-actions button.danger-btn {
|
||||
background: oklch(70% 0.2 29); /* Light red background that works in both themes */
|
||||
color: oklch(98% 0.01 0); /* Almost white text for good contrast */
|
||||
border-color: var(--lora-error);
|
||||
}
|
||||
|
||||
.bulk-operations-actions button.danger-btn:hover {
|
||||
background: var(--lora-error);
|
||||
color: oklch(100% 0 0); /* Pure white text on hover for maximum contrast */
|
||||
}
|
||||
|
||||
/* Style for selected cards */
|
||||
.model-card.selected {
|
||||
box-shadow: 0 0 0 2px var(--lora-accent);
|
||||
@@ -99,203 +21,61 @@
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
/* Update bulk operations button to match others when active */
|
||||
#bulkOperationsBtn.active {
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.bulk-operations-panel {
|
||||
width: calc(100% - 40px);
|
||||
min-width: unset;
|
||||
max-width: unset;
|
||||
left: 20px;
|
||||
transform: none;
|
||||
border-radius: var(--border-radius-sm);
|
||||
}
|
||||
|
||||
.bulk-operations-actions {
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
}
|
||||
|
||||
.bulk-operations-panel.visible {
|
||||
transform: translateY(0) translateX(-50%);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* Thumbnail Strip Styles */
|
||||
.selected-thumbnails-strip {
|
||||
/* Marquee selection styles */
|
||||
.marquee-selection {
|
||||
position: fixed;
|
||||
bottom: 80px; /* Position above the bulk operations panel */
|
||||
left: 50%;
|
||||
transform: translateX(-50%) translateY(20px);
|
||||
background: var(--card-bg);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-base);
|
||||
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.15);
|
||||
z-index: calc(var(--z-overlay) - 1); /* Just below the bulk panel z-index */
|
||||
padding: 16px;
|
||||
max-width: 80%;
|
||||
width: auto;
|
||||
transition: all 0.3s ease;
|
||||
opacity: 0;
|
||||
overflow: hidden;
|
||||
border: 2px dashed var(--lora-accent, #007bff);
|
||||
background: rgba(0, 123, 255, 0.1);
|
||||
pointer-events: none;
|
||||
z-index: 9999;
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
.selected-thumbnails-strip.visible {
|
||||
opacity: 1;
|
||||
transform: translateX(-50%) translateY(0);
|
||||
/* Visual feedback when marquee selecting */
|
||||
.marquee-selecting {
|
||||
cursor: crosshair;
|
||||
user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
}
|
||||
|
||||
.thumbnails-container {
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
overflow-x: auto;
|
||||
padding-bottom: 8px; /* Space for scrollbar */
|
||||
/* Prevent text selection during marquee */
|
||||
.marquee-selecting * {
|
||||
user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
}
|
||||
|
||||
/* Remove bulk base model modal specific styles - now using shared components */
|
||||
/* Use shared metadata editing styles instead */
|
||||
|
||||
/* Override for bulk base model select to ensure proper width */
|
||||
.bulk-base-model-select {
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.selected-thumbnail {
|
||||
position: relative;
|
||||
width: 80px;
|
||||
min-width: 80px; /* Prevent shrinking */
|
||||
padding: 6px 10px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
overflow: hidden;
|
||||
cursor: pointer;
|
||||
background: var(--bg-color);
|
||||
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
||||
}
|
||||
|
||||
.selected-thumbnail:hover {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.selected-thumbnail img,
|
||||
.selected-thumbnail video {
|
||||
width: 100%;
|
||||
aspect-ratio: 1 / 1;
|
||||
object-fit: cover;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.thumbnail-name {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
background: rgba(0, 0, 0, 0.6);
|
||||
color: white;
|
||||
font-size: 10px;
|
||||
padding: 3px 5px;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.thumbnail-remove {
|
||||
position: absolute;
|
||||
top: 3px;
|
||||
right: 3px;
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
border-radius: 50%;
|
||||
background: rgba(0, 0, 0, 0.5);
|
||||
color: white;
|
||||
border: none;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
cursor: pointer;
|
||||
font-size: 10px;
|
||||
opacity: 0.7;
|
||||
transition: opacity 0.2s ease, background-color 0.2s ease;
|
||||
}
|
||||
|
||||
.thumbnail-remove:hover {
|
||||
opacity: 1;
|
||||
background: var(--lora-error);
|
||||
}
|
||||
|
||||
.strip-close-btn {
|
||||
position: absolute;
|
||||
top: 5px;
|
||||
right: 5px;
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
background: none;
|
||||
border: none;
|
||||
background-color: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
opacity: 0.7;
|
||||
transition: opacity 0.2s ease;
|
||||
font-size: 0.95em;
|
||||
height: 32px;
|
||||
}
|
||||
|
||||
.strip-close-btn:hover {
|
||||
opacity: 1;
|
||||
.bulk-base-model-select:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
/* Style the selectedCount to indicate it's clickable */
|
||||
.selectable-count {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 5px;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s ease;
|
||||
/* Dark theme support for bulk base model select */
|
||||
[data-theme="dark"] .bulk-base-model-select {
|
||||
background-color: rgba(30, 30, 30, 0.9);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.selectable-count:hover {
|
||||
background: var(--lora-border);
|
||||
}
|
||||
|
||||
.dropdown-caret {
|
||||
font-size: 12px;
|
||||
visibility: hidden; /* Will be shown via JS when items are selected */
|
||||
}
|
||||
|
||||
/* Scrollbar styling for the thumbnails container */
|
||||
.thumbnails-container::-webkit-scrollbar {
|
||||
height: 6px;
|
||||
}
|
||||
|
||||
.thumbnails-container::-webkit-scrollbar-track {
|
||||
background: var(--bg-color);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.thumbnails-container::-webkit-scrollbar-thumb {
|
||||
background: var(--border-color);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.thumbnails-container::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Mobile optimizations */
|
||||
@media (max-width: 768px) {
|
||||
.selected-thumbnails-strip {
|
||||
width: calc(100% - 40px);
|
||||
max-width: none;
|
||||
left: 20px;
|
||||
transform: translateY(20px);
|
||||
border-radius: var(--border-radius-sm);
|
||||
}
|
||||
|
||||
.selected-thumbnails-strip.visible {
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
.selected-thumbnail {
|
||||
width: 70px;
|
||||
min-width: 70px;
|
||||
}
|
||||
[data-theme="dark"] .bulk-base-model-select option {
|
||||
background-color: #2d2d2d;
|
||||
color: var(--text-color);
|
||||
}
|
||||
@@ -67,6 +67,14 @@
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.file-path[data-action="open-file-location"] {
|
||||
cursor: pointer;
|
||||
text-decoration: underline;
|
||||
}
|
||||
.file-path[data-action="open-file-location"]:hover {
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.description-text {
|
||||
line-height: 1.5;
|
||||
max-height: 100px;
|
||||
|
||||
@@ -176,11 +176,6 @@
|
||||
background: linear-gradient(45deg, #4a90e2, #357abd);
|
||||
}
|
||||
|
||||
/* Remove old node-color-indicator styles */
|
||||
.node-color-indicator {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.send-all-item {
|
||||
border-top: 1px solid var(--border-color);
|
||||
font-weight: 500;
|
||||
@@ -217,4 +212,24 @@
|
||||
font-size: 12px;
|
||||
color: var(--text-muted);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
/* Bulk Context Menu Header */
|
||||
.bulk-context-header {
|
||||
padding: 10px 12px;
|
||||
background: var(--card-bg); /* Use card background for subtlety */
|
||||
color: var(--text-color); /* Use standard text color */
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
font-weight: 500;
|
||||
font-size: 14px;
|
||||
border-radius: var(--border-radius-xs) var(--border-radius-xs) 0 0;
|
||||
border-bottom: 1px solid var(--border-color); /* Add subtle separator */
|
||||
}
|
||||
|
||||
.bulk-context-header i {
|
||||
width: 16px;
|
||||
text-align: center;
|
||||
color: var(--lora-accent); /* Accent only the icon for a hint of color */
|
||||
}
|
||||
@@ -37,6 +37,10 @@ body.modal-open {
|
||||
overflow-x: hidden; /* 防止水平滚动条 */
|
||||
}
|
||||
|
||||
.modal-content-large {
|
||||
min-height: 480px;
|
||||
}
|
||||
|
||||
/* 当 modal 打开时锁定 body */
|
||||
body.modal-open {
|
||||
overflow: hidden !important; /* 覆盖 base.css 中的 scroll */
|
||||
@@ -204,6 +208,14 @@ body.modal-open {
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
button:disabled,
|
||||
.primary-btn:disabled,
|
||||
.danger-btn:disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.restart-required-icon {
|
||||
color: var(--lora-warning);
|
||||
margin-left: 5px;
|
||||
@@ -224,14 +236,76 @@ body.modal-open {
|
||||
background-color: oklch(35% 0.02 256 / 0.98);
|
||||
}
|
||||
|
||||
.primary-btn.disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
/* Danger button styles */
|
||||
.danger-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
padding: 8px 16px;
|
||||
background-color: var(--lora-error);
|
||||
color: white;
|
||||
border: none;
|
||||
border-radius: var(--border-radius-sm);
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.primary-btn.disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
.danger-btn:hover {
|
||||
background-color: oklch(from var(--lora-error) l c h / 85%);
|
||||
color: white;
|
||||
}
|
||||
|
||||
/* Metadata archive status styles */
|
||||
.metadata-archive-status {
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
padding: var(--space-2);
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .metadata-archive-status {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
|
||||
.archive-status-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 8px;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.archive-status-item:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.archive-status-label {
|
||||
font-weight: 500;
|
||||
color: var(--text-color);
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.archive-status-value {
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.archive-status-value.status-available {
|
||||
color: var(--lora-success, #10b981);
|
||||
}
|
||||
|
||||
.archive-status-value.status-unavailable {
|
||||
color: var(--lora-warning, #f59e0b);
|
||||
}
|
||||
|
||||
.archive-status-value.status-enabled {
|
||||
color: var(--lora-success, #10b981);
|
||||
}
|
||||
|
||||
.archive-status-value.status-disabled {
|
||||
color: var(--lora-error, #ef4444);
|
||||
}
|
||||
|
||||
/* Add styles for delete preview image */
|
||||
|
||||
@@ -101,7 +101,7 @@
|
||||
.api-key-input input {
|
||||
width: 100%;
|
||||
padding: 6px 40px 6px 10px; /* Add left padding */
|
||||
height: 32px;
|
||||
height: 20px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
@@ -123,6 +123,36 @@
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* Text input wrapper styles for consistent input styling */
|
||||
.text-input-wrapper {
|
||||
width: 100%;
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.text-input-wrapper input {
|
||||
width: 100%;
|
||||
padding: 6px 10px;
|
||||
height: 20px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.text-input-wrapper input:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
box-shadow: 0 0 0 2px rgba(var(--lora-accent-rgb, 79, 70, 229), 0.1);
|
||||
}
|
||||
|
||||
/* Dark theme specific adjustments */
|
||||
[data-theme="dark"] .text-input-wrapper input {
|
||||
background-color: rgba(30, 30, 30, 0.9);
|
||||
}
|
||||
|
||||
.input-help {
|
||||
font-size: 0.85em;
|
||||
color: var(--text-color);
|
||||
@@ -312,7 +342,7 @@ input:checked + .toggle-slider:before {
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
color: var (--text-color);
|
||||
color: var(--text-color);
|
||||
font-size: 0.95em;
|
||||
height: 32px;
|
||||
}
|
||||
@@ -346,7 +376,7 @@ input:checked + .toggle-slider:before {
|
||||
padding: var(--space-1);
|
||||
margin-top: 8px;
|
||||
font-family: monospace;
|
||||
font-size: 1.1em;
|
||||
font-size: 0.9em;
|
||||
color: var(--lora-accent);
|
||||
display: none;
|
||||
}
|
||||
@@ -571,10 +601,31 @@ input:checked + .toggle-slider:before {
|
||||
background-color: rgba(30, 30, 30, 0.9);
|
||||
}
|
||||
|
||||
/* Proxy Settings Styles */
|
||||
.proxy-settings-group {
|
||||
margin-left: var(--space-1);
|
||||
padding-left: var(--space-1);
|
||||
border-left: 2px solid var(--lora-border);
|
||||
animation: slideDown 0.3s ease-out;
|
||||
}
|
||||
|
||||
.proxy-settings-group .setting-item {
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
/* Responsive adjustments */
|
||||
@media (max-width: 768px) {
|
||||
.placeholder-info {
|
||||
flex-direction: column;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.proxy-settings-group {
|
||||
margin-left: 0;
|
||||
padding-left: var(--space-1);
|
||||
border-left: none;
|
||||
border-top: 1px solid var(--lora-border);
|
||||
padding-top: var(--space-2);
|
||||
margin-top: var(--space-2);
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
align-items: flex-start;
|
||||
margin-bottom: var(--space-2);
|
||||
width: 100%;
|
||||
min-height: 30px; /* Ensure some height even if empty to prevent layout shifts */
|
||||
}
|
||||
|
||||
/* Individual Item */
|
||||
@@ -153,17 +154,42 @@
|
||||
}
|
||||
|
||||
.metadata-save-btn,
|
||||
.save-tags-btn {
|
||||
.save-tags-btn,
|
||||
.append-tags-btn,
|
||||
.replace-tags-btn {
|
||||
background: var(--lora-accent) !important;
|
||||
color: white !important;
|
||||
border-color: var(--lora-accent) !important;
|
||||
}
|
||||
|
||||
.metadata-save-btn:hover,
|
||||
.save-tags-btn:hover {
|
||||
.save-tags-btn:hover,
|
||||
.append-tags-btn:hover,
|
||||
.replace-tags-btn:hover {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
/* Specific styling for bulk tag action buttons */
|
||||
.bulk-append-tags-btn {
|
||||
background: var(--lora-accent) !important;
|
||||
color: white !important;
|
||||
border-color: var(--lora-accent) !important;
|
||||
}
|
||||
|
||||
.bulk-replace-tags-btn {
|
||||
background: var(--lora-warning, #f59e0b) !important;
|
||||
color: white !important;
|
||||
border-color: var(--lora-warning, #f59e0b) !important;
|
||||
}
|
||||
|
||||
.bulk-append-tags-btn:hover {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.bulk-replace-tags-btn:hover {
|
||||
background: var(--lora-warning-dark, #d97706) !important;
|
||||
}
|
||||
|
||||
/* Add Form */
|
||||
.metadata-add-form {
|
||||
display: flex;
|
||||
|
||||
@@ -233,7 +233,7 @@
|
||||
}
|
||||
|
||||
.sidebar-tree-children.expanded {
|
||||
max-height: 1000px;
|
||||
max-height: 50000px;
|
||||
}
|
||||
|
||||
.sidebar-tree-children .sidebar-tree-node-content {
|
||||
|
||||
@@ -55,41 +55,48 @@ export function getApiEndpoints(modelType) {
|
||||
|
||||
return {
|
||||
// Base CRUD operations
|
||||
list: `/api/${modelType}/list`,
|
||||
delete: `/api/${modelType}/delete`,
|
||||
exclude: `/api/${modelType}/exclude`,
|
||||
rename: `/api/${modelType}/rename`,
|
||||
save: `/api/${modelType}/save-metadata`,
|
||||
list: `/api/lm/${modelType}/list`,
|
||||
delete: `/api/lm/${modelType}/delete`,
|
||||
exclude: `/api/lm/${modelType}/exclude`,
|
||||
rename: `/api/lm/${modelType}/rename`,
|
||||
save: `/api/lm/${modelType}/save-metadata`,
|
||||
|
||||
// Bulk operations
|
||||
bulkDelete: `/api/${modelType}/bulk-delete`,
|
||||
bulkDelete: `/api/lm/${modelType}/bulk-delete`,
|
||||
|
||||
// Tag operations
|
||||
addTags: `/api/lm/${modelType}/add-tags`,
|
||||
|
||||
// Move operations (now common for all model types that support move)
|
||||
moveModel: `/api/${modelType}/move_model`,
|
||||
moveBulk: `/api/${modelType}/move_models_bulk`,
|
||||
moveModel: `/api/lm/${modelType}/move_model`,
|
||||
moveBulk: `/api/lm/${modelType}/move_models_bulk`,
|
||||
|
||||
// CivitAI integration
|
||||
fetchCivitai: `/api/${modelType}/fetch-civitai`,
|
||||
fetchAllCivitai: `/api/${modelType}/fetch-all-civitai`,
|
||||
relinkCivitai: `/api/${modelType}/relink-civitai`,
|
||||
civitaiVersions: `/api/${modelType}/civitai/versions`,
|
||||
fetchCivitai: `/api/lm/${modelType}/fetch-civitai`,
|
||||
fetchAllCivitai: `/api/lm/${modelType}/fetch-all-civitai`,
|
||||
relinkCivitai: `/api/lm/${modelType}/relink-civitai`,
|
||||
civitaiVersions: `/api/lm/${modelType}/civitai/versions`,
|
||||
|
||||
// Preview management
|
||||
replacePreview: `/api/${modelType}/replace-preview`,
|
||||
replacePreview: `/api/lm/${modelType}/replace-preview`,
|
||||
|
||||
// Query operations
|
||||
scan: `/api/${modelType}/scan`,
|
||||
topTags: `/api/${modelType}/top-tags`,
|
||||
baseModels: `/api/${modelType}/base-models`,
|
||||
roots: `/api/${modelType}/roots`,
|
||||
folders: `/api/${modelType}/folders`,
|
||||
folderTree: `/api/${modelType}/folder-tree`,
|
||||
unifiedFolderTree: `/api/${modelType}/unified-folder-tree`,
|
||||
duplicates: `/api/${modelType}/find-duplicates`,
|
||||
conflicts: `/api/${modelType}/find-filename-conflicts`,
|
||||
verify: `/api/${modelType}/verify-duplicates`,
|
||||
metadata: `/api/${modelType}/metadata`,
|
||||
modelDescription: `/api/${modelType}/model-description`,
|
||||
scan: `/api/lm/${modelType}/scan`,
|
||||
topTags: `/api/lm/${modelType}/top-tags`,
|
||||
baseModels: `/api/lm/${modelType}/base-models`,
|
||||
roots: `/api/lm/${modelType}/roots`,
|
||||
folders: `/api/lm/${modelType}/folders`,
|
||||
folderTree: `/api/lm/${modelType}/folder-tree`,
|
||||
unifiedFolderTree: `/api/lm/${modelType}/unified-folder-tree`,
|
||||
duplicates: `/api/lm/${modelType}/find-duplicates`,
|
||||
conflicts: `/api/lm/${modelType}/find-filename-conflicts`,
|
||||
verify: `/api/lm/${modelType}/verify-duplicates`,
|
||||
metadata: `/api/lm/${modelType}/metadata`,
|
||||
modelDescription: `/api/lm/${modelType}/model-description`,
|
||||
|
||||
// Auto-organize operations
|
||||
autoOrganize: `/api/lm/${modelType}/auto-organize`,
|
||||
autoOrganizeProgress: `/api/lm/${modelType}/auto-organize-progress`,
|
||||
|
||||
// Model-specific endpoints (will be merged with specific configs)
|
||||
specific: {}
|
||||
@@ -101,24 +108,24 @@ export function getApiEndpoints(modelType) {
|
||||
*/
|
||||
export const MODEL_SPECIFIC_ENDPOINTS = {
|
||||
[MODEL_TYPES.LORA]: {
|
||||
letterCounts: `/api/${MODEL_TYPES.LORA}/letter-counts`,
|
||||
notes: `/api/${MODEL_TYPES.LORA}/get-notes`,
|
||||
triggerWords: `/api/${MODEL_TYPES.LORA}/get-trigger-words`,
|
||||
previewUrl: `/api/${MODEL_TYPES.LORA}/preview-url`,
|
||||
civitaiUrl: `/api/${MODEL_TYPES.LORA}/civitai-url`,
|
||||
metadata: `/api/${MODEL_TYPES.LORA}/metadata`,
|
||||
getTriggerWordsPost: `/api/${MODEL_TYPES.LORA}/get_trigger_words`,
|
||||
civitaiModelByVersion: `/api/${MODEL_TYPES.LORA}/civitai/model/version`,
|
||||
civitaiModelByHash: `/api/${MODEL_TYPES.LORA}/civitai/model/hash`,
|
||||
letterCounts: `/api/lm/${MODEL_TYPES.LORA}/letter-counts`,
|
||||
notes: `/api/lm/${MODEL_TYPES.LORA}/get-notes`,
|
||||
triggerWords: `/api/lm/${MODEL_TYPES.LORA}/get-trigger-words`,
|
||||
previewUrl: `/api/lm/${MODEL_TYPES.LORA}/preview-url`,
|
||||
civitaiUrl: `/api/lm/${MODEL_TYPES.LORA}/civitai-url`,
|
||||
metadata: `/api/lm/${MODEL_TYPES.LORA}/metadata`,
|
||||
getTriggerWordsPost: `/api/lm/${MODEL_TYPES.LORA}/get_trigger_words`,
|
||||
civitaiModelByVersion: `/api/lm/${MODEL_TYPES.LORA}/civitai/model/version`,
|
||||
civitaiModelByHash: `/api/lm/${MODEL_TYPES.LORA}/civitai/model/hash`,
|
||||
},
|
||||
[MODEL_TYPES.CHECKPOINT]: {
|
||||
info: `/api/${MODEL_TYPES.CHECKPOINT}/info`,
|
||||
checkpoints_roots: `/api/${MODEL_TYPES.CHECKPOINT}/checkpoints_roots`,
|
||||
unet_roots: `/api/${MODEL_TYPES.CHECKPOINT}/unet_roots`,
|
||||
metadata: `/api/${MODEL_TYPES.CHECKPOINT}/metadata`,
|
||||
info: `/api/lm/${MODEL_TYPES.CHECKPOINT}/info`,
|
||||
checkpoints_roots: `/api/lm/${MODEL_TYPES.CHECKPOINT}/checkpoints_roots`,
|
||||
unet_roots: `/api/lm/${MODEL_TYPES.CHECKPOINT}/unet_roots`,
|
||||
metadata: `/api/lm/${MODEL_TYPES.CHECKPOINT}/metadata`,
|
||||
},
|
||||
[MODEL_TYPES.EMBEDDING]: {
|
||||
metadata: `/api/${MODEL_TYPES.EMBEDDING}/metadata`,
|
||||
metadata: `/api/lm/${MODEL_TYPES.EMBEDDING}/metadata`,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -166,11 +173,11 @@ export function getCurrentModelType(explicitType = null) {
|
||||
|
||||
// Download API endpoints (shared across all model types)
|
||||
export const DOWNLOAD_ENDPOINTS = {
|
||||
download: '/api/download-model',
|
||||
downloadGet: '/api/download-model-get',
|
||||
cancelGet: '/api/cancel-download-get',
|
||||
progress: '/api/download-progress',
|
||||
exampleImages: '/api/force-download-example-images' // New endpoint for downloading example images
|
||||
download: '/api/lm/download-model',
|
||||
downloadGet: '/api/lm/download-model-get',
|
||||
cancelGet: '/api/lm/cancel-download-get',
|
||||
progress: '/api/lm/download-progress',
|
||||
exampleImages: '/api/lm/force-download-example-images' // New endpoint for downloading example images
|
||||
};
|
||||
|
||||
// WebSocket endpoints
|
||||
|
||||
@@ -306,6 +306,34 @@ export class BaseModelApiClient {
|
||||
}
|
||||
}
|
||||
|
||||
async addTags(filePath, data) {
|
||||
try {
|
||||
const response = await fetch(this.apiConfig.endpoints.addTags, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
file_path: filePath,
|
||||
...data
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to add tags');
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
|
||||
if (result.success && result.tags) {
|
||||
state.virtualScroller.updateSingleItem(filePath, { tags: result.tags });
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
console.error('Error adding tags:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async refreshModels(fullRebuild = false) {
|
||||
try {
|
||||
state.loadingManager.showSimpleLoading(
|
||||
@@ -510,13 +538,13 @@ export class BaseModelApiClient {
|
||||
completionMessage = translate('toast.api.bulkMetadataCompletePartial', { success: successCount, total: totalItems, type: this.apiConfig.config.displayName }, `Refreshed ${successCount} of ${totalItems} ${this.apiConfig.config.displayName}s`);
|
||||
showToast('toast.api.bulkMetadataCompletePartial', { success: successCount, total: totalItems, type: this.apiConfig.config.displayName }, 'warning');
|
||||
|
||||
if (failedItems.length > 0) {
|
||||
const failureMessage = failedItems.length <= 3
|
||||
? failedItems.map(item => `${item.fileName}: ${item.error}`).join('\n')
|
||||
: failedItems.slice(0, 3).map(item => `${item.fileName}: ${item.error}`).join('\n') +
|
||||
`\n(and ${failedItems.length - 3} more)`;
|
||||
showToast('toast.api.bulkMetadataFailureDetails', { failures: failureMessage }, 'warning', 6000);
|
||||
}
|
||||
// if (failedItems.length > 0) {
|
||||
// const failureMessage = failedItems.length <= 3
|
||||
// ? failedItems.map(item => `${item.fileName}: ${item.error}`).join('\n')
|
||||
// : failedItems.slice(0, 3).map(item => `${item.fileName}: ${item.error}`).join('\n') +
|
||||
// `\n(and ${failedItems.length - 3} more)`;
|
||||
// showToast('toast.api.bulkMetadataFailureDetails', { failures: failureMessage }, 'warning', 6000);
|
||||
// }
|
||||
} else {
|
||||
completionMessage = translate('toast.api.bulkMetadataCompleteNone', { type: this.apiConfig.config.displayName }, `Failed to refresh metadata for any ${this.apiConfig.config.displayName}s`);
|
||||
showToast('toast.api.bulkMetadataCompleteNone', { type: this.apiConfig.config.displayName }, 'error');
|
||||
@@ -910,8 +938,8 @@ export class BaseModelApiClient {
|
||||
ws.onerror = reject;
|
||||
});
|
||||
|
||||
// Get the output directory from storage
|
||||
const outputDir = getStorageItem('example_images_path', '');
|
||||
// Get the output directory from state
|
||||
const outputDir = state.global?.settings?.example_images_path || '';
|
||||
if (!outputDir) {
|
||||
throw new Error('Please set the example images path in the settings first.');
|
||||
}
|
||||
@@ -1002,4 +1030,129 @@ export class BaseModelApiClient {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-organize models based on current path template settings
|
||||
* @param {Array} filePaths - Optional array of file paths to organize. If not provided, organizes all models.
|
||||
* @returns {Promise} - Promise that resolves when the operation is complete
|
||||
*/
|
||||
async autoOrganizeModels(filePaths = null) {
|
||||
let ws = null;
|
||||
|
||||
await state.loadingManager.showWithProgress(async (loading) => {
|
||||
try {
|
||||
// Connect to WebSocket for progress updates
|
||||
const wsProtocol = window.location.protocol === 'https:' ? 'wss://' : 'ws://';
|
||||
ws = new WebSocket(`${wsProtocol}${window.location.host}${WS_ENDPOINTS.fetchProgress}`);
|
||||
|
||||
const operationComplete = new Promise((resolve, reject) => {
|
||||
ws.onmessage = (event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
|
||||
if (data.type !== 'auto_organize_progress') return;
|
||||
|
||||
switch(data.status) {
|
||||
case 'started':
|
||||
loading.setProgress(0);
|
||||
const operationType = data.operation_type === 'bulk' ? 'selected models' : 'all models';
|
||||
loading.setStatus(translate('loras.bulkOperations.autoOrganizeProgress.starting', { type: operationType }, `Starting auto-organize for ${operationType}...`));
|
||||
break;
|
||||
|
||||
case 'processing':
|
||||
const percent = data.total > 0 ? ((data.processed / data.total) * 90).toFixed(1) : 0;
|
||||
loading.setProgress(percent);
|
||||
loading.setStatus(
|
||||
translate('loras.bulkOperations.autoOrganizeProgress.processing', {
|
||||
processed: data.processed,
|
||||
total: data.total,
|
||||
success: data.success,
|
||||
failures: data.failures,
|
||||
skipped: data.skipped
|
||||
}, `Processing (${data.processed}/${data.total}) - ${data.success} moved, ${data.skipped} skipped, ${data.failures} failed`)
|
||||
);
|
||||
break;
|
||||
|
||||
case 'cleaning':
|
||||
loading.setProgress(95);
|
||||
loading.setStatus(translate('loras.bulkOperations.autoOrganizeProgress.cleaning', {}, 'Cleaning up empty directories...'));
|
||||
break;
|
||||
|
||||
case 'completed':
|
||||
loading.setProgress(100);
|
||||
loading.setStatus(
|
||||
translate('loras.bulkOperations.autoOrganizeProgress.completed', {
|
||||
success: data.success,
|
||||
skipped: data.skipped,
|
||||
failures: data.failures,
|
||||
total: data.total
|
||||
}, `Completed: ${data.success} moved, ${data.skipped} skipped, ${data.failures} failed`)
|
||||
);
|
||||
|
||||
setTimeout(() => {
|
||||
resolve(data);
|
||||
}, 1500);
|
||||
break;
|
||||
|
||||
case 'error':
|
||||
loading.setStatus(translate('loras.bulkOperations.autoOrganizeProgress.error', { error: data.error }, `Error: ${data.error}`));
|
||||
reject(new Error(data.error));
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error('WebSocket error during auto-organize:', error);
|
||||
reject(new Error('Connection error'));
|
||||
};
|
||||
});
|
||||
|
||||
// Start the auto-organize operation
|
||||
const endpoint = this.apiConfig.endpoints.autoOrganize;
|
||||
const requestOptions = {
|
||||
method: filePaths ? 'POST' : 'GET',
|
||||
headers: filePaths ? { 'Content-Type': 'application/json' } : {}
|
||||
};
|
||||
|
||||
if (filePaths) {
|
||||
requestOptions.body = JSON.stringify({ file_paths: filePaths });
|
||||
}
|
||||
|
||||
const response = await fetch(endpoint, requestOptions);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
throw new Error(errorData.error || 'Failed to start auto-organize operation');
|
||||
}
|
||||
|
||||
// Wait for the operation to complete via WebSocket
|
||||
const result = await operationComplete;
|
||||
|
||||
// Show appropriate success message based on results
|
||||
if (result.failures === 0) {
|
||||
showToast('toast.loras.autoOrganizeSuccess', {
|
||||
count: result.success,
|
||||
type: result.operation_type === 'bulk' ? 'selected models' : 'all models'
|
||||
}, 'success');
|
||||
} else {
|
||||
showToast('toast.loras.autoOrganizePartialSuccess', {
|
||||
success: result.success,
|
||||
failures: result.failures,
|
||||
total: result.total
|
||||
}, 'warning');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error during auto-organize:', error);
|
||||
showToast('toast.loras.autoOrganizeFailed', { error: error.message }, 'error');
|
||||
throw error;
|
||||
} finally {
|
||||
if (ws && ws.readyState === WebSocket.OPEN) {
|
||||
ws.close();
|
||||
}
|
||||
}
|
||||
}, {
|
||||
initialMessage: translate('loras.bulkOperations.autoOrganizeProgress.initializing', {}, 'Initializing auto-organize...'),
|
||||
completionMessage: translate('loras.bulkOperations.autoOrganizeProgress.complete', {}, 'Auto-organize complete')
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@ export async function fetchRecipesPage(page = 1, pageSize = 100) {
|
||||
// If we have a specific recipe ID to load
|
||||
if (pageState.customFilter?.active && pageState.customFilter?.recipeId) {
|
||||
// Special case: load specific recipe
|
||||
const response = await fetch(`/api/recipe/${pageState.customFilter.recipeId}`);
|
||||
const response = await fetch(`/api/lm/recipe/${pageState.customFilter.recipeId}`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load recipe: ${response.statusText}`);
|
||||
@@ -72,7 +72,7 @@ export async function fetchRecipesPage(page = 1, pageSize = 100) {
|
||||
}
|
||||
|
||||
// Fetch recipes
|
||||
const response = await fetch(`/api/recipes?${params.toString()}`);
|
||||
const response = await fetch(`/api/lm/recipes?${params.toString()}`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load recipes: ${response.statusText}`);
|
||||
@@ -207,7 +207,7 @@ export async function refreshRecipes() {
|
||||
state.loadingManager.showSimpleLoading('Refreshing recipes...');
|
||||
|
||||
// Call the API endpoint to rebuild the recipe cache
|
||||
const response = await fetch('/api/recipes/scan');
|
||||
const response = await fetch('/api/lm/recipes/scan');
|
||||
|
||||
if (!response.ok) {
|
||||
const data = await response.json();
|
||||
@@ -274,7 +274,7 @@ export async function updateRecipeMetadata(filePath, updates) {
|
||||
const basename = filePath.split('/').pop().split('\\').pop();
|
||||
const recipeId = basename.substring(0, basename.lastIndexOf('.'));
|
||||
|
||||
const response = await fetch(`/api/recipe/${recipeId}/update`, {
|
||||
const response = await fetch(`/api/lm/recipe/${recipeId}/update`, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { appCore } from './core.js';
|
||||
import { confirmDelete, closeDeleteModal, confirmExclude, closeExcludeModal } from './utils/modalUtils.js';
|
||||
import { createPageControls } from './components/controls/index.js';
|
||||
import { CheckpointContextMenu } from './components/ContextMenu/index.js';
|
||||
import { ModelDuplicatesManager } from './components/ModelDuplicatesManager.js';
|
||||
import { MODEL_TYPES } from './api/apiConfig.js';
|
||||
|
||||
@@ -30,10 +29,7 @@ class CheckpointsPageManager {
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
// Initialize context menu
|
||||
new CheckpointContextMenu();
|
||||
|
||||
// Initialize common page features
|
||||
// Initialize common page features (including context menus)
|
||||
appCore.initializePageFeatures();
|
||||
|
||||
console.log('Checkpoints Manager initialized');
|
||||
@@ -48,4 +44,4 @@ document.addEventListener('DOMContentLoaded', async () => {
|
||||
// Initialize checkpoints page
|
||||
const checkpointsPage = new CheckpointsPageManager();
|
||||
await checkpointsPage.initialize();
|
||||
});
|
||||
});
|
||||
@@ -15,17 +15,6 @@ export class BaseContextMenu {
|
||||
init() {
|
||||
// Hide menu on regular clicks
|
||||
document.addEventListener('click', () => this.hideMenu());
|
||||
|
||||
// Show menu on right-click on cards
|
||||
document.addEventListener('contextmenu', (e) => {
|
||||
const card = e.target.closest(this.cardSelector);
|
||||
if (!card) {
|
||||
this.hideMenu();
|
||||
return;
|
||||
}
|
||||
e.preventDefault();
|
||||
this.showMenu(e.clientX, e.clientY, card);
|
||||
});
|
||||
|
||||
// Handle menu item clicks
|
||||
this.menu.addEventListener('click', (e) => {
|
||||
|
||||
117
static/js/components/ContextMenu/BulkContextMenu.js
Normal file
117
static/js/components/ContextMenu/BulkContextMenu.js
Normal file
@@ -0,0 +1,117 @@
|
||||
import { BaseContextMenu } from './BaseContextMenu.js';
|
||||
import { state } from '../../state/index.js';
|
||||
import { bulkManager } from '../../managers/BulkManager.js';
|
||||
import { updateElementText } from '../../utils/i18nHelpers.js';
|
||||
|
||||
export class BulkContextMenu extends BaseContextMenu {
|
||||
constructor() {
|
||||
super('bulkContextMenu', '.model-card.selected');
|
||||
this.setupBulkMenuItems();
|
||||
}
|
||||
|
||||
setupBulkMenuItems() {
|
||||
if (!this.menu) return;
|
||||
|
||||
// Update menu items visibility based on current model type
|
||||
this.updateMenuItemsForModelType();
|
||||
|
||||
// Update selected count in header
|
||||
this.updateSelectedCountHeader();
|
||||
}
|
||||
|
||||
updateMenuItemsForModelType() {
|
||||
const currentModelType = state.currentPageType;
|
||||
const config = bulkManager.actionConfig[currentModelType];
|
||||
|
||||
if (!config) return;
|
||||
|
||||
// Update button visibility based on model type
|
||||
const addTagsItem = this.menu.querySelector('[data-action="add-tags"]');
|
||||
const setBaseModelItem = this.menu.querySelector('[data-action="set-base-model"]');
|
||||
const sendToWorkflowAppendItem = this.menu.querySelector('[data-action="send-to-workflow-append"]');
|
||||
const sendToWorkflowReplaceItem = this.menu.querySelector('[data-action="send-to-workflow-replace"]');
|
||||
const copyAllItem = this.menu.querySelector('[data-action="copy-all"]');
|
||||
const refreshAllItem = this.menu.querySelector('[data-action="refresh-all"]');
|
||||
const moveAllItem = this.menu.querySelector('[data-action="move-all"]');
|
||||
const autoOrganizeItem = this.menu.querySelector('[data-action="auto-organize"]');
|
||||
const deleteAllItem = this.menu.querySelector('[data-action="delete-all"]');
|
||||
|
||||
if (sendToWorkflowAppendItem) {
|
||||
sendToWorkflowAppendItem.style.display = config.sendToWorkflow ? 'flex' : 'none';
|
||||
}
|
||||
if (sendToWorkflowReplaceItem) {
|
||||
sendToWorkflowReplaceItem.style.display = config.sendToWorkflow ? 'flex' : 'none';
|
||||
}
|
||||
if (copyAllItem) {
|
||||
copyAllItem.style.display = config.copyAll ? 'flex' : 'none';
|
||||
}
|
||||
if (refreshAllItem) {
|
||||
refreshAllItem.style.display = config.refreshAll ? 'flex' : 'none';
|
||||
}
|
||||
if (moveAllItem) {
|
||||
moveAllItem.style.display = config.moveAll ? 'flex' : 'none';
|
||||
}
|
||||
if (autoOrganizeItem) {
|
||||
autoOrganizeItem.style.display = config.autoOrganize ? 'flex' : 'none';
|
||||
}
|
||||
if (deleteAllItem) {
|
||||
deleteAllItem.style.display = config.deleteAll ? 'flex' : 'none';
|
||||
}
|
||||
if (addTagsItem) {
|
||||
addTagsItem.style.display = config.addTags ? 'flex' : 'none';
|
||||
}
|
||||
if (setBaseModelItem) {
|
||||
setBaseModelItem.style.display = 'flex'; // Base model editing is available for all model types
|
||||
}
|
||||
}
|
||||
|
||||
updateSelectedCountHeader() {
|
||||
const headerElement = this.menu.querySelector('.bulk-context-header');
|
||||
if (headerElement) {
|
||||
updateElementText(headerElement, 'loras.bulkOperations.selected', { count: state.selectedModels.size });
|
||||
}
|
||||
}
|
||||
|
||||
showMenu(x, y, card) {
|
||||
this.updateMenuItemsForModelType();
|
||||
this.updateSelectedCountHeader();
|
||||
super.showMenu(x, y, card);
|
||||
}
|
||||
|
||||
handleMenuAction(action, menuItem) {
|
||||
switch (action) {
|
||||
case 'add-tags':
|
||||
bulkManager.showBulkAddTagsModal();
|
||||
break;
|
||||
case 'set-base-model':
|
||||
bulkManager.showBulkBaseModelModal();
|
||||
break;
|
||||
case 'send-to-workflow-append':
|
||||
bulkManager.sendAllModelsToWorkflow(false);
|
||||
break;
|
||||
case 'send-to-workflow-replace':
|
||||
bulkManager.sendAllModelsToWorkflow(true);
|
||||
break;
|
||||
case 'copy-all':
|
||||
bulkManager.copyAllModelsSyntax();
|
||||
break;
|
||||
case 'refresh-all':
|
||||
bulkManager.refreshAllMetadata();
|
||||
break;
|
||||
case 'move-all':
|
||||
window.moveManager.showMoveModal('bulk');
|
||||
break;
|
||||
case 'auto-organize':
|
||||
bulkManager.autoOrganizeSelectedModels();
|
||||
break;
|
||||
case 'delete-all':
|
||||
bulkManager.showBulkDeleteModal();
|
||||
break;
|
||||
case 'clear':
|
||||
bulkManager.clearSelection();
|
||||
break;
|
||||
default:
|
||||
console.warn(`Unknown bulk action: ${action}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -125,8 +125,8 @@ export const ModelContextMenuMixin = {
|
||||
state.loadingManager.showSimpleLoading('Re-linking to Civitai...');
|
||||
|
||||
const endpoint = this.modelType === 'checkpoint' ?
|
||||
'/api/checkpoints/relink-civitai' :
|
||||
'/api/loras/relink-civitai';
|
||||
'/api/lm/checkpoints/relink-civitai' :
|
||||
'/api/lm/loras/relink-civitai';
|
||||
|
||||
const response = await fetch(endpoint, {
|
||||
method: 'POST',
|
||||
|
||||
@@ -103,7 +103,7 @@ export class RecipeContextMenu extends BaseContextMenu {
|
||||
return;
|
||||
}
|
||||
|
||||
fetch(`/api/recipe/${recipeId}/syntax`)
|
||||
fetch(`/api/lm/recipe/${recipeId}/syntax`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.success && data.syntax) {
|
||||
@@ -126,7 +126,7 @@ export class RecipeContextMenu extends BaseContextMenu {
|
||||
return;
|
||||
}
|
||||
|
||||
fetch(`/api/recipe/${recipeId}/syntax`)
|
||||
fetch(`/api/lm/recipe/${recipeId}/syntax`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.success && data.syntax) {
|
||||
@@ -149,7 +149,7 @@ export class RecipeContextMenu extends BaseContextMenu {
|
||||
}
|
||||
|
||||
// First get the recipe details to access its LoRAs
|
||||
fetch(`/api/recipe/${recipeId}`)
|
||||
fetch(`/api/lm/recipe/${recipeId}`)
|
||||
.then(response => response.json())
|
||||
.then(recipe => {
|
||||
// Clear any previous filters first
|
||||
@@ -189,7 +189,7 @@ export class RecipeContextMenu extends BaseContextMenu {
|
||||
|
||||
try {
|
||||
// First get the recipe details
|
||||
const response = await fetch(`/api/recipe/${recipeId}`);
|
||||
const response = await fetch(`/api/lm/recipe/${recipeId}`);
|
||||
const recipe = await response.json();
|
||||
|
||||
// Get missing LoRAs
|
||||
@@ -209,9 +209,9 @@ export class RecipeContextMenu extends BaseContextMenu {
|
||||
|
||||
// Determine which endpoint to use based on available data
|
||||
if (lora.modelVersionId) {
|
||||
endpoint = `/api/loras/civitai/model/version/${lora.modelVersionId}`;
|
||||
endpoint = `/api/lm/loras/civitai/model/version/${lora.modelVersionId}`;
|
||||
} else if (lora.hash) {
|
||||
endpoint = `/api/loras/civitai/model/hash/${lora.hash}`;
|
||||
endpoint = `/api/lm/loras/civitai/model/hash/${lora.hash}`;
|
||||
} else {
|
||||
console.error("Missing both hash and modelVersionId for lora:", lora);
|
||||
return null;
|
||||
|
||||
@@ -2,4 +2,25 @@ export { LoraContextMenu } from './LoraContextMenu.js';
|
||||
export { RecipeContextMenu } from './RecipeContextMenu.js';
|
||||
export { CheckpointContextMenu } from './CheckpointContextMenu.js';
|
||||
export { EmbeddingContextMenu } from './EmbeddingContextMenu.js';
|
||||
export { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
|
||||
export { ModelContextMenuMixin } from './ModelContextMenuMixin.js';
|
||||
|
||||
import { LoraContextMenu } from './LoraContextMenu.js';
|
||||
import { RecipeContextMenu } from './RecipeContextMenu.js';
|
||||
import { CheckpointContextMenu } from './CheckpointContextMenu.js';
|
||||
import { EmbeddingContextMenu } from './EmbeddingContextMenu.js';
|
||||
|
||||
// Factory method to create page-specific context menu instances
|
||||
export function createPageContextMenu(pageType) {
|
||||
switch (pageType) {
|
||||
case 'loras':
|
||||
return new LoraContextMenu();
|
||||
case 'recipes':
|
||||
return new RecipeContextMenu();
|
||||
case 'checkpoints':
|
||||
return new CheckpointContextMenu();
|
||||
case 'embeddings':
|
||||
return new EmbeddingContextMenu();
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@ export class DuplicatesManager {
|
||||
|
||||
async findDuplicates() {
|
||||
try {
|
||||
const response = await fetch('/api/recipes/find-duplicates');
|
||||
const response = await fetch('/api/lm/recipes/find-duplicates');
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to find duplicates');
|
||||
}
|
||||
@@ -354,7 +354,7 @@ export class DuplicatesManager {
|
||||
const recipeIds = Array.from(this.selectedForDeletion);
|
||||
|
||||
// Call API to bulk delete
|
||||
const response = await fetch('/api/recipes/bulk-delete', {
|
||||
const response = await fetch('/api/lm/recipes/bulk-delete', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
|
||||
@@ -48,7 +48,7 @@ export class ModelDuplicatesManager {
|
||||
// Method to check for duplicates count using existing endpoint
|
||||
async checkDuplicatesCount() {
|
||||
try {
|
||||
const endpoint = `/api/${this.modelType}/find-duplicates`;
|
||||
const endpoint = `/api/lm/${this.modelType}/find-duplicates`;
|
||||
const response = await fetch(endpoint);
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -104,7 +104,7 @@ export class ModelDuplicatesManager {
|
||||
async findDuplicates() {
|
||||
try {
|
||||
// Determine API endpoint based on model type
|
||||
const endpoint = `/api/${this.modelType}/find-duplicates`;
|
||||
const endpoint = `/api/lm/${this.modelType}/find-duplicates`;
|
||||
|
||||
const response = await fetch(endpoint);
|
||||
if (!response.ok) {
|
||||
@@ -623,7 +623,7 @@ export class ModelDuplicatesManager {
|
||||
const filePaths = Array.from(this.selectedForDeletion);
|
||||
|
||||
// Call API to bulk delete
|
||||
const response = await fetch(`/api/${this.modelType}/bulk-delete`, {
|
||||
const response = await fetch(`/api/lm/${this.modelType}/bulk-delete`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
@@ -648,7 +648,7 @@ export class ModelDuplicatesManager {
|
||||
|
||||
// Check if there are still duplicates
|
||||
try {
|
||||
const endpoint = `/api/${this.modelType}/find-duplicates`;
|
||||
const endpoint = `/api/lm/${this.modelType}/find-duplicates`;
|
||||
const dupResponse = await fetch(endpoint);
|
||||
|
||||
if (!dupResponse.ok) {
|
||||
@@ -756,7 +756,7 @@ export class ModelDuplicatesManager {
|
||||
const filePaths = group.models.map(model => model.file_path);
|
||||
|
||||
// Make API request to verify hashes
|
||||
const response = await fetch(`/api/${this.modelType}/verify-duplicates`, {
|
||||
const response = await fetch(`/api/lm/${this.modelType}/verify-duplicates`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
|
||||
@@ -203,7 +203,7 @@ class RecipeCard {
|
||||
return;
|
||||
}
|
||||
|
||||
fetch(`/api/recipe/${recipeId}/syntax`)
|
||||
fetch(`/api/lm/recipe/${recipeId}/syntax`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.success && data.syntax) {
|
||||
@@ -299,7 +299,7 @@ class RecipeCard {
|
||||
deleteBtn.disabled = true;
|
||||
|
||||
// Call API to delete the recipe
|
||||
fetch(`/api/recipe/${recipeId}`, {
|
||||
fetch(`/api/lm/recipe/${recipeId}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
@@ -341,7 +341,7 @@ class RecipeCard {
|
||||
showToast('toast.recipes.preparingForSharing', {}, 'info');
|
||||
|
||||
// Call the API to process the image with metadata
|
||||
fetch(`/api/recipe/${recipeId}/share`)
|
||||
fetch(`/api/lm/recipe/${recipeId}/share`)
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to prepare recipe for sharing');
|
||||
|
||||
@@ -784,7 +784,7 @@ class RecipeModal {
|
||||
|
||||
try {
|
||||
// Fetch recipe syntax from backend
|
||||
const response = await fetch(`/api/recipe/${this.recipeId}/syntax`);
|
||||
const response = await fetch(`/api/lm/recipe/${this.recipeId}/syntax`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to get recipe syntax: ${response.statusText}`);
|
||||
@@ -830,9 +830,9 @@ class RecipeModal {
|
||||
|
||||
// Determine which endpoint to use based on available data
|
||||
if (lora.modelVersionId) {
|
||||
endpoint = `/api/loras/civitai/model/version/${lora.modelVersionId}`;
|
||||
endpoint = `/api/lm/loras/civitai/model/version/${lora.modelVersionId}`;
|
||||
} else if (lora.hash) {
|
||||
endpoint = `/api/loras/civitai/model/hash/${lora.hash}`;
|
||||
endpoint = `/api/lm/loras/civitai/model/hash/${lora.hash}`;
|
||||
} else {
|
||||
console.error("Missing both hash and modelVersionId for lora:", lora);
|
||||
return null;
|
||||
@@ -1003,7 +1003,7 @@ class RecipeModal {
|
||||
state.loadingManager.showSimpleLoading('Reconnecting LoRA...');
|
||||
|
||||
// Call API to reconnect the LoRA
|
||||
const response = await fetch('/api/recipe/lora/reconnect', {
|
||||
const response = await fetch('/api/lm/recipe/lora/reconnect', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
|
||||
@@ -46,7 +46,7 @@ export class AlphabetBar {
|
||||
*/
|
||||
async fetchLetterCounts() {
|
||||
try {
|
||||
const response = await fetch('/api/loras/letter-counts');
|
||||
const response = await fetch('/api/lm/loras/letter-counts');
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch letter counts: ${response.statusText}`);
|
||||
|
||||
@@ -43,6 +43,14 @@ export class CheckpointsControls extends PageControls {
|
||||
showDownloadModal: () => {
|
||||
downloadManager.showDownloadModal();
|
||||
},
|
||||
|
||||
toggleBulkMode: () => {
|
||||
if (window.bulkManager) {
|
||||
window.bulkManager.toggleBulkMode();
|
||||
} else {
|
||||
console.error('Bulk manager not available');
|
||||
}
|
||||
},
|
||||
|
||||
// No clearCustomFilter implementation is needed for checkpoints
|
||||
// as custom filters are currently only used for LoRAs
|
||||
|
||||
@@ -43,6 +43,14 @@ export class EmbeddingsControls extends PageControls {
|
||||
showDownloadModal: () => {
|
||||
downloadManager.showDownloadModal();
|
||||
},
|
||||
|
||||
toggleBulkMode: () => {
|
||||
if (window.bulkManager) {
|
||||
window.bulkManager.toggleBulkMode();
|
||||
} else {
|
||||
console.error('Bulk manager not available');
|
||||
}
|
||||
},
|
||||
|
||||
// No clearCustomFilter implementation is needed for embeddings
|
||||
// as custom filters are currently only used for LoRAs
|
||||
|
||||
@@ -70,7 +70,6 @@ export class PageControls {
|
||||
async initSidebarManager() {
|
||||
try {
|
||||
await this.sidebarManager.initialize(this);
|
||||
console.log('SidebarManager initialized');
|
||||
} catch (error) {
|
||||
console.error('Failed to initialize SidebarManager:', error);
|
||||
}
|
||||
@@ -186,12 +185,9 @@ export class PageControls {
|
||||
duplicatesButton.addEventListener('click', () => this.findDuplicates());
|
||||
}
|
||||
|
||||
if (this.pageType === 'loras') {
|
||||
// Bulk operations button - LoRAs only
|
||||
const bulkButton = document.querySelector('[data-action="bulk"]');
|
||||
if (bulkButton) {
|
||||
bulkButton.addEventListener('click', () => this.toggleBulkMode());
|
||||
}
|
||||
const bulkButton = document.querySelector('[data-action="bulk"]');
|
||||
if (bulkButton) {
|
||||
bulkButton.addEventListener('click', () => this.toggleBulkMode());
|
||||
}
|
||||
|
||||
// Favorites filter button handler
|
||||
@@ -350,14 +346,9 @@ export class PageControls {
|
||||
}
|
||||
|
||||
/**
|
||||
* Toggle bulk mode (LoRAs only)
|
||||
* Toggle bulk mode
|
||||
*/
|
||||
toggleBulkMode() {
|
||||
if (this.pageType !== 'loras' || !this.api) {
|
||||
console.error('Bulk mode is only available for LoRAs');
|
||||
return;
|
||||
}
|
||||
|
||||
this.api.toggleBulkMode();
|
||||
}
|
||||
|
||||
|
||||
@@ -169,7 +169,7 @@ class InitializationManager {
|
||||
*/
|
||||
pollProgress() {
|
||||
const checkProgress = () => {
|
||||
fetch('/api/init-status')
|
||||
fetch('/api/lm/init-status')
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
this.handleProgressUpdate(data);
|
||||
|
||||
@@ -9,48 +9,46 @@ import { MODEL_TYPES } from '../../api/apiConfig.js';
|
||||
import { getModelApiClient } from '../../api/modelApiFactory.js';
|
||||
import { showDeleteModal } from '../../utils/modalUtils.js';
|
||||
import { translate } from '../../utils/i18nHelpers.js';
|
||||
import { eventManager } from '../../utils/EventManager.js';
|
||||
|
||||
// Add global event delegation handlers
|
||||
// Add global event delegation handlers using event manager
|
||||
export function setupModelCardEventDelegation(modelType) {
|
||||
const gridElement = document.getElementById('modelGrid');
|
||||
if (!gridElement) return;
|
||||
// Remove any existing handler first
|
||||
eventManager.removeHandler('click', 'modelCard-delegation');
|
||||
|
||||
// Remove any existing event listener to prevent duplication
|
||||
gridElement.removeEventListener('click', gridElement._handleModelCardEvent);
|
||||
|
||||
// Create event handler with modelType context
|
||||
const handleModelCardEvent = (event) => handleModelCardEvent_internal(event, modelType);
|
||||
|
||||
// Add the event delegation handler
|
||||
gridElement.addEventListener('click', handleModelCardEvent);
|
||||
|
||||
// Store reference to the handler for cleanup
|
||||
gridElement._handleModelCardEvent = handleModelCardEvent;
|
||||
// Register model card event delegation with event manager
|
||||
eventManager.addHandler('click', 'modelCard-delegation', (event) => {
|
||||
return handleModelCardEvent_internal(event, modelType);
|
||||
}, {
|
||||
priority: 60, // Medium priority for model card interactions
|
||||
targetSelector: '#modelGrid',
|
||||
skipWhenModalOpen: false // Allow model card interactions even when modals are open (for some actions)
|
||||
});
|
||||
}
|
||||
|
||||
// Event delegation handler for all model card events
|
||||
function handleModelCardEvent_internal(event, modelType) {
|
||||
// Find the closest card element
|
||||
const card = event.target.closest('.model-card');
|
||||
if (!card) return;
|
||||
if (!card) return false; // Continue with other handlers
|
||||
|
||||
// Handle specific elements within the card
|
||||
if (event.target.closest('.toggle-blur-btn')) {
|
||||
event.stopPropagation();
|
||||
toggleBlurContent(card);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.show-content-btn')) {
|
||||
event.stopPropagation();
|
||||
showBlurredContent(card);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-star')) {
|
||||
event.stopPropagation();
|
||||
toggleFavorite(card);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-globe')) {
|
||||
@@ -58,41 +56,42 @@ function handleModelCardEvent_internal(event, modelType) {
|
||||
if (card.dataset.from_civitai === 'true') {
|
||||
openCivitai(card.dataset.filepath);
|
||||
}
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-paper-plane')) {
|
||||
event.stopPropagation();
|
||||
handleSendToWorkflow(card, event.shiftKey, modelType);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-copy')) {
|
||||
event.stopPropagation();
|
||||
handleCopyAction(card, modelType);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-trash')) {
|
||||
event.stopPropagation();
|
||||
showDeleteModal(card.dataset.filepath);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-image')) {
|
||||
event.stopPropagation();
|
||||
getModelApiClient().replaceModelPreview(card.dataset.filepath);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
if (event.target.closest('.fa-folder-open')) {
|
||||
event.stopPropagation();
|
||||
handleExampleImagesAccess(card, modelType);
|
||||
return;
|
||||
return true; // Stop propagation
|
||||
}
|
||||
|
||||
// If no specific element was clicked, handle the card click (show modal or toggle selection)
|
||||
handleCardClick(card, modelType);
|
||||
return false; // Continue with other handlers (e.g., bulk selection)
|
||||
}
|
||||
|
||||
// Helper functions for event handling
|
||||
@@ -187,7 +186,7 @@ async function handleExampleImagesAccess(card, modelType) {
|
||||
const modelHash = card.dataset.sha256;
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/has-example-images?model_hash=${modelHash}`);
|
||||
const response = await fetch(`/api/lm/has-example-images?model_hash=${modelHash}`);
|
||||
const data = await response.json();
|
||||
|
||||
if (data.has_images) {
|
||||
@@ -217,13 +216,6 @@ function handleCardClick(card, modelType) {
|
||||
}
|
||||
|
||||
async function showModelModalFromCard(card, modelType) {
|
||||
// Get the appropriate preview versions map
|
||||
const previewVersionsKey = modelType;
|
||||
const previewVersions = state.pages[previewVersionsKey]?.previewVersions || new Map();
|
||||
const version = previewVersions.get(card.dataset.filepath);
|
||||
const previewUrl = card.dataset.preview_url || '/loras_static/images/no-preview.png';
|
||||
const versionedPreviewUrl = version ? `${previewUrl}?t=${version}` : previewUrl;
|
||||
|
||||
// Create model metadata object
|
||||
const modelMeta = {
|
||||
sha256: card.dataset.sha256,
|
||||
@@ -236,7 +228,6 @@ async function showModelModalFromCard(card, modelType) {
|
||||
from_civitai: card.dataset.from_civitai === 'true',
|
||||
base_model: card.dataset.base_model,
|
||||
notes: card.dataset.notes || '',
|
||||
preview_url: versionedPreviewUrl,
|
||||
favorite: card.dataset.favorite === 'true',
|
||||
// Parse civitai metadata from the card's dataset
|
||||
civitai: JSON.parse(card.dataset.meta || '{}'),
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
* ModelMetadata.js
|
||||
* Handles model metadata editing functionality - General version
|
||||
*/
|
||||
|
||||
import { BASE_MODEL_CATEGORIES } from '../../utils/constants.js';
|
||||
import { showToast } from '../../utils/uiHelpers.js';
|
||||
import { BASE_MODELS } from '../../utils/constants.js';
|
||||
import { getModelApiClient } from '../../api/modelApiFactory.js';
|
||||
import { translate } from '../../utils/i18nHelpers.js';
|
||||
|
||||
/**
|
||||
* Set up model name editing functionality
|
||||
@@ -172,28 +172,8 @@ export function setupBaseModelEditing(filePath) {
|
||||
// Flag to track if a change was made
|
||||
let valueChanged = false;
|
||||
|
||||
// Add options from BASE_MODELS constants
|
||||
const baseModelCategories = {
|
||||
'Stable Diffusion 1.x': [BASE_MODELS.SD_1_4, BASE_MODELS.SD_1_5, BASE_MODELS.SD_1_5_LCM, BASE_MODELS.SD_1_5_HYPER],
|
||||
'Stable Diffusion 2.x': [BASE_MODELS.SD_2_0, BASE_MODELS.SD_2_1],
|
||||
'Stable Diffusion 3.x': [BASE_MODELS.SD_3, BASE_MODELS.SD_3_5, BASE_MODELS.SD_3_5_MEDIUM, BASE_MODELS.SD_3_5_LARGE, BASE_MODELS.SD_3_5_LARGE_TURBO],
|
||||
'SDXL': [BASE_MODELS.SDXL, BASE_MODELS.SDXL_LIGHTNING, BASE_MODELS.SDXL_HYPER],
|
||||
'Video Models': [
|
||||
BASE_MODELS.SVD, BASE_MODELS.LTXV, BASE_MODELS.HUNYUAN_VIDEO, BASE_MODELS.WAN_VIDEO,
|
||||
BASE_MODELS.WAN_VIDEO_1_3B_T2V, BASE_MODELS.WAN_VIDEO_14B_T2V,
|
||||
BASE_MODELS.WAN_VIDEO_14B_I2V_480P, BASE_MODELS.WAN_VIDEO_14B_I2V_720P,
|
||||
BASE_MODELS.WAN_VIDEO_2_2_TI2V_5B, BASE_MODELS.WAN_VIDEO_2_2_T2V_A14B,
|
||||
BASE_MODELS.WAN_VIDEO_2_2_I2V_A14B
|
||||
],
|
||||
'Flux Models': [BASE_MODELS.FLUX_1_D, BASE_MODELS.FLUX_1_S, BASE_MODELS.FLUX_1_KONTEXT, BASE_MODELS.FLUX_1_KREA],
|
||||
'Other Models': [
|
||||
BASE_MODELS.ILLUSTRIOUS, BASE_MODELS.PONY, BASE_MODELS.HIDREAM,
|
||||
BASE_MODELS.QWEN, BASE_MODELS.AURAFLOW,
|
||||
BASE_MODELS.PIXART_A, BASE_MODELS.PIXART_E, BASE_MODELS.HUNYUAN_1,
|
||||
BASE_MODELS.LUMINA, BASE_MODELS.KOLORS, BASE_MODELS.NOOBAI,
|
||||
BASE_MODELS.UNKNOWN
|
||||
]
|
||||
};
|
||||
// Add options from BASE_MODEL_CATEGORIES constants
|
||||
const baseModelCategories = BASE_MODEL_CATEGORIES;
|
||||
|
||||
// Create option groups for better organization
|
||||
Object.entries(baseModelCategories).forEach(([category, models]) => {
|
||||
|
||||
@@ -166,10 +166,14 @@ export async function showModelModal(model, modelType) {
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="info-item location-size">
|
||||
<div class="info-item">
|
||||
<div class="location-wrapper">
|
||||
<label>${translate('modals.model.metadata.location', {}, 'Location')}</label>
|
||||
<span class="file-path">${modelWithFullData.file_path.replace(/[^/]+$/, '') || 'N/A'}</span>
|
||||
<span class="file-path" title="${translate('modals.model.actions.openFileLocation', {}, 'Open file location')}"
|
||||
data-action="open-file-location"
|
||||
data-filepath="${modelWithFullData.file_path}">
|
||||
${modelWithFullData.file_path.replace(/[^/]+$/, '') || 'N/A'}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="info-item base-size">
|
||||
@@ -318,6 +322,12 @@ function setupEventHandlers(filePath) {
|
||||
window.open(`https://civitai.com/user/${username}`, '_blank');
|
||||
}
|
||||
break;
|
||||
case 'open-file-location':
|
||||
const filePath = target.dataset.filepath;
|
||||
if (filePath) {
|
||||
openFileLocation(filePath);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -444,6 +454,24 @@ async function saveNotes(filePath) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call backend to open file location and select the file
|
||||
* @param {string} filePath
|
||||
*/
|
||||
async function openFileLocation(filePath) {
|
||||
try {
|
||||
const resp = await fetch('/api/lm/open-file-location', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ 'file_path': filePath })
|
||||
});
|
||||
if (!resp.ok) throw new Error('Failed to open file location');
|
||||
showToast('modals.model.openFileLocation.success', {}, 'success');
|
||||
} catch (err) {
|
||||
showToast('modals.model.openFileLocation.failed', {}, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
// Export the model modal API
|
||||
const modelModal = {
|
||||
show: showModelModal,
|
||||
|
||||
@@ -4,14 +4,7 @@
|
||||
*/
|
||||
import { showToast } from '../../utils/uiHelpers.js';
|
||||
import { getModelApiClient } from '../../api/modelApiFactory.js';
|
||||
import { translate } from '../../utils/i18nHelpers.js';
|
||||
|
||||
// Preset tag suggestions
|
||||
const PRESET_TAGS = [
|
||||
'character', 'style', 'concept', 'clothing',
|
||||
'poses', 'background', 'vehicle', 'buildings',
|
||||
'objects', 'animal'
|
||||
];
|
||||
import { PRESET_TAGS } from '../../utils/constants.js';
|
||||
|
||||
// Create a named function so we can remove it later
|
||||
let saveTagsHandler = null;
|
||||
@@ -139,7 +132,7 @@ export function setupTagEditMode() {
|
||||
// ...existing helper functions...
|
||||
|
||||
/**
|
||||
* Save tags - 支持LoRA和Checkpoint
|
||||
* Save tags
|
||||
*/
|
||||
async function saveTags() {
|
||||
const editBtn = document.querySelector('.edit-tags-btn');
|
||||
|
||||
@@ -22,7 +22,7 @@ export function loadRecipesForLora(loraName, sha256) {
|
||||
`;
|
||||
|
||||
// Fetch recipes that use this Lora by hash
|
||||
fetch(`/api/recipes/for-lora?hash=${encodeURIComponent(sha256.toLowerCase())}`)
|
||||
fetch(`/api/lm/recipes/for-lora?hash=${encodeURIComponent(sha256.toLowerCase())}`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (!data.success) {
|
||||
@@ -166,7 +166,7 @@ function copyRecipeSyntax(recipeId) {
|
||||
return;
|
||||
}
|
||||
|
||||
fetch(`/api/recipe/${recipeId}/syntax`)
|
||||
fetch(`/api/lm/recipe/${recipeId}/syntax`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.success && data.syntax) {
|
||||
|
||||
@@ -14,7 +14,7 @@ import { getModelApiClient } from '../../api/modelApiFactory.js';
|
||||
*/
|
||||
async function fetchTrainedWords(filePath) {
|
||||
try {
|
||||
const response = await fetch(`/api/trained-words?file_path=${encodeURIComponent(filePath)}`);
|
||||
const response = await fetch(`/api/lm/trained-words?file_path=${encodeURIComponent(filePath)}`);
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
|
||||
@@ -75,8 +75,6 @@ export function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText,
|
||||
data-remote-src="${remoteUrl}"
|
||||
data-nsfw-level="${nsfwLevel}"
|
||||
alt="Preview"
|
||||
crossorigin="anonymous"
|
||||
referrerpolicy="no-referrer"
|
||||
width="${media.width}"
|
||||
height="${media.height}"
|
||||
class="lazy ${shouldBlur ? 'blurred' : ''}">
|
||||
|
||||
@@ -408,7 +408,7 @@ export function initMediaControlHandlers(container) {
|
||||
|
||||
try {
|
||||
// Call the API to delete the custom example
|
||||
const response = await fetch('/api/delete-example-image', {
|
||||
const response = await fetch('/api/lm/delete-example-image', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
|
||||
@@ -29,7 +29,7 @@ export async function loadExampleImages(images, modelHash) {
|
||||
let localFiles = [];
|
||||
|
||||
try {
|
||||
const endpoint = '/api/example-image-files';
|
||||
const endpoint = '/api/lm/example-image-files';
|
||||
const params = `model_hash=${modelHash}`;
|
||||
|
||||
const response = await fetch(`${endpoint}?${params}`);
|
||||
@@ -191,7 +191,7 @@ function renderMediaItem(img, index, exampleFiles) {
|
||||
);
|
||||
|
||||
// Determine if this is a custom image (has id property)
|
||||
const isCustomImage = Boolean(img.id);
|
||||
const isCustomImage = Boolean(typeof img.id === 'string' && img.id);
|
||||
|
||||
// Create the media control buttons HTML
|
||||
const mediaControlsHtml = `
|
||||
@@ -235,7 +235,7 @@ function findLocalFile(img, index, exampleFiles) {
|
||||
|
||||
let localFile = null;
|
||||
|
||||
if (img.id) {
|
||||
if (typeof img.id === 'string' && img.id) {
|
||||
// This is a custom image, find by custom_<id>
|
||||
const customPrefix = `custom_${img.id}`;
|
||||
localFile = exampleFiles.find(file => file.name.startsWith(customPrefix));
|
||||
@@ -374,7 +374,7 @@ async function handleImportFiles(files, modelHash, importContainer) {
|
||||
});
|
||||
|
||||
// Call API to import files
|
||||
const response = await fetch('/api/import-example-images', {
|
||||
const response = await fetch('/api/lm/import-example-images', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
});
|
||||
@@ -386,7 +386,7 @@ async function handleImportFiles(files, modelHash, importContainer) {
|
||||
}
|
||||
|
||||
// Get updated local files
|
||||
const updatedFilesResponse = await fetch(`/api/example-image-files?model_hash=${modelHash}`);
|
||||
const updatedFilesResponse = await fetch(`/api/lm/example-image-files?model_hash=${modelHash}`);
|
||||
const updatedFilesResult = await updatedFilesResponse.json();
|
||||
|
||||
if (!updatedFilesResult.success) {
|
||||
|
||||
@@ -7,7 +7,7 @@ import { HeaderManager } from './components/Header.js';
|
||||
import { settingsManager } from './managers/SettingsManager.js';
|
||||
import { moveManager } from './managers/MoveManager.js';
|
||||
import { bulkManager } from './managers/BulkManager.js';
|
||||
import { exampleImagesManager } from './managers/ExampleImagesManager.js';
|
||||
import { ExampleImagesManager } from './managers/ExampleImagesManager.js';
|
||||
import { helpManager } from './managers/HelpManager.js';
|
||||
import { bannerService } from './managers/BannerService.js';
|
||||
import { initTheme, initBackToTop } from './utils/uiHelpers.js';
|
||||
@@ -15,6 +15,9 @@ import { initializeInfiniteScroll } from './utils/infiniteScroll.js';
|
||||
import { migrateStorageItems } from './utils/storageHelpers.js';
|
||||
import { i18n } from './i18n/index.js';
|
||||
import { onboardingManager } from './managers/OnboardingManager.js';
|
||||
import { BulkContextMenu } from './components/ContextMenu/BulkContextMenu.js';
|
||||
import { createPageContextMenu } from './components/ContextMenu/index.js';
|
||||
import { initializeEventManagement } from './utils/eventManagementInit.js';
|
||||
|
||||
// Core application class
|
||||
export class AppCore {
|
||||
@@ -35,6 +38,11 @@ export class AppCore {
|
||||
|
||||
console.log(`AppCore: Language set: ${i18n.getCurrentLocale()}`);
|
||||
|
||||
// Initialize settings manager and wait for it to sync from backend
|
||||
console.log('AppCore: Initializing settings...');
|
||||
await settingsManager.waitForInitialization();
|
||||
console.log('AppCore: Settings initialized');
|
||||
|
||||
// Initialize managers
|
||||
state.loadingManager = new LoadingManager();
|
||||
modalManager.initialize();
|
||||
@@ -42,7 +50,7 @@ export class AppCore {
|
||||
bannerService.initialize();
|
||||
window.modalManager = modalManager;
|
||||
window.settingsManager = settingsManager;
|
||||
window.exampleImagesManager = exampleImagesManager;
|
||||
window.exampleImagesManager = new ExampleImagesManager();
|
||||
window.helpManager = helpManager;
|
||||
window.moveManager = moveManager;
|
||||
window.bulkManager = bulkManager;
|
||||
@@ -51,9 +59,15 @@ export class AppCore {
|
||||
window.headerManager = new HeaderManager();
|
||||
initTheme();
|
||||
initBackToTop();
|
||||
|
||||
// Initialize the bulk manager and context menu only if not on recipes page
|
||||
if (state.currentPageType !== 'recipes') {
|
||||
bulkManager.initialize();
|
||||
|
||||
// Initialize the bulk manager
|
||||
bulkManager.initialize();
|
||||
// Initialize bulk context menu
|
||||
const bulkContextMenu = new BulkContextMenu();
|
||||
bulkManager.setBulkContextMenu(bulkContextMenu);
|
||||
}
|
||||
|
||||
// Initialize the example images manager
|
||||
exampleImagesManager.initialize();
|
||||
@@ -62,6 +76,8 @@ export class AppCore {
|
||||
|
||||
const cardInfoDisplay = state.global.settings.cardInfoDisplay || 'always';
|
||||
document.body.classList.toggle('hover-reveal', cardInfoDisplay === 'hover');
|
||||
|
||||
initializeEventManagement();
|
||||
|
||||
// Mark as initialized
|
||||
this.initialized = true;
|
||||
@@ -88,13 +104,19 @@ export class AppCore {
|
||||
initializePageFeatures() {
|
||||
const pageType = this.getPageType();
|
||||
|
||||
// Initialize virtual scroll for pages that need it
|
||||
if (['loras', 'recipes', 'checkpoints', 'embeddings'].includes(pageType)) {
|
||||
this.initializeContextMenus(pageType);
|
||||
initializeInfiniteScroll(pageType);
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
// Initialize context menus for the current page
|
||||
initializeContextMenus(pageType) {
|
||||
// Create page-specific context menu
|
||||
window.pageContextMenu = createPageContextMenu(pageType);
|
||||
}
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { appCore } from './core.js';
|
||||
import { confirmDelete, closeDeleteModal, confirmExclude, closeExcludeModal } from './utils/modalUtils.js';
|
||||
import { createPageControls } from './components/controls/index.js';
|
||||
import { EmbeddingContextMenu } from './components/ContextMenu/index.js';
|
||||
import { ModelDuplicatesManager } from './components/ModelDuplicatesManager.js';
|
||||
import { MODEL_TYPES } from './api/apiConfig.js';
|
||||
|
||||
@@ -30,10 +29,7 @@ class EmbeddingsPageManager {
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
// Initialize context menu
|
||||
new EmbeddingContextMenu();
|
||||
|
||||
// Initialize common page features
|
||||
// Initialize common page features (including context menus)
|
||||
appCore.initializePageFeatures();
|
||||
|
||||
console.log('Embeddings Manager initialized');
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { appCore } from './core.js';
|
||||
import { state } from './state/index.js';
|
||||
import { updateCardsForBulkMode } from './components/shared/ModelCard.js';
|
||||
import { LoraContextMenu } from './components/ContextMenu/index.js';
|
||||
import { createPageControls } from './components/controls/index.js';
|
||||
import { confirmDelete, closeDeleteModal, confirmExclude, closeExcludeModal } from './utils/modalUtils.js';
|
||||
import { ModelDuplicatesManager } from './components/ModelDuplicatesManager.js';
|
||||
@@ -37,13 +36,10 @@ class LoraPageManager {
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
// Initialize page-specific components
|
||||
new LoraContextMenu();
|
||||
|
||||
// Initialize cards for current bulk mode state (should be false initially)
|
||||
updateCardsForBulkMode(state.bulkMode);
|
||||
|
||||
// Initialize common page features (virtual scroll)
|
||||
// Initialize common page features (including context menus and virtual scroll)
|
||||
appCore.initializePageFeatures();
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user