mirror of
https://github.com/willmiao/ComfyUI-Lora-Manager.git
synced 2026-03-22 05:32:12 -03:00
Compare commits
291 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
adf7b6d4b2 | ||
|
|
0566d50346 | ||
|
|
4275dc3003 | ||
|
|
30956aeefc | ||
|
|
64e1dd3dd6 | ||
|
|
0dc4b6f728 | ||
|
|
86074c87d7 | ||
|
|
6f9245df01 | ||
|
|
4540e47055 | ||
|
|
4bb8981e78 | ||
|
|
c49be91aa0 | ||
|
|
2b847039d4 | ||
|
|
1147725fd7 | ||
|
|
26891e12a4 | ||
|
|
2f7e44a76f | ||
|
|
9366d3d2d0 | ||
|
|
6b606a5cc8 | ||
|
|
e5339c178a | ||
|
|
1a76f74482 | ||
|
|
13f13eb095 | ||
|
|
125fdecd61 | ||
|
|
d05076d258 | ||
|
|
00b77581fc | ||
|
|
897787d17c | ||
|
|
d5a280cf2b | ||
|
|
a0c2d9b5ad | ||
|
|
e713bd1ca2 | ||
|
|
beb8ff1dd1 | ||
|
|
6a8f0867d9 | ||
|
|
51ad1c9a33 | ||
|
|
34872eb612 | ||
|
|
8b4e3128ff | ||
|
|
c66cbc800b | ||
|
|
21941521a0 | ||
|
|
0d33884052 | ||
|
|
415df49377 | ||
|
|
f5f45002c7 | ||
|
|
1edf7126bb | ||
|
|
a1a55a1002 | ||
|
|
45f5cb46bd | ||
|
|
1b5e608a27 | ||
|
|
a7df8ae15c | ||
|
|
47ce0d0fe2 | ||
|
|
b220e288d0 | ||
|
|
1fc8b45b68 | ||
|
|
62f06302f0 | ||
|
|
3e5cb223f3 | ||
|
|
4ee5b7481c | ||
|
|
e104b78c01 | ||
|
|
ba1ac58721 | ||
|
|
a4fbeb6295 | ||
|
|
68f8871403 | ||
|
|
6fd74952b7 | ||
|
|
1ea468cfc4 | ||
|
|
14721c265f | ||
|
|
821827a375 | ||
|
|
9ba3e2c204 | ||
|
|
d287883671 | ||
|
|
ead34818db | ||
|
|
a060010b96 | ||
|
|
76a92ac847 | ||
|
|
74bc490383 | ||
|
|
510d476323 | ||
|
|
1e7257fd53 | ||
|
|
4ff1f51b1c | ||
|
|
74507cef05 | ||
|
|
c23ab04d90 | ||
|
|
d50dde6cf6 | ||
|
|
fcb1fb39be | ||
|
|
b0ef74f802 | ||
|
|
f332aef41d | ||
|
|
1f91a3da8e | ||
|
|
16840c321d | ||
|
|
c109e392ad | ||
|
|
5e69671366 | ||
|
|
52d23d9b75 | ||
|
|
4c4e6d7a7b | ||
|
|
03b6e78705 | ||
|
|
24c01141d7 | ||
|
|
6dc2811af4 | ||
|
|
e6425dce32 | ||
|
|
95e2ff5f1e | ||
|
|
92ac487128 | ||
|
|
3250fa89cb | ||
|
|
7475de366b | ||
|
|
affb507b37 | ||
|
|
3320b80150 | ||
|
|
fb2b69b787 | ||
|
|
29a05f6533 | ||
|
|
9fa3fac973 | ||
|
|
904b0d104a | ||
|
|
1d31dae110 | ||
|
|
476ecb7423 | ||
|
|
4eb67cf6da | ||
|
|
a5a9f7ed83 | ||
|
|
c0b029e228 | ||
|
|
9bebcc9a4b | ||
|
|
ac7d23011c | ||
|
|
491e09b7b5 | ||
|
|
192bc237bf | ||
|
|
f041f4a114 | ||
|
|
2546580377 | ||
|
|
8fbf2ab56d | ||
|
|
ea727aad2e | ||
|
|
5520aecbba | ||
|
|
6b738a4769 | ||
|
|
903a8050b3 | ||
|
|
31b032429d | ||
|
|
2bcf341f04 | ||
|
|
ca6f45b359 | ||
|
|
2a67cec16b | ||
|
|
1800afe31b | ||
|
|
8c6311355d | ||
|
|
91801dff85 | ||
|
|
be594133f0 | ||
|
|
8a538d117e | ||
|
|
8d9118cbee | ||
|
|
b67464ea13 | ||
|
|
33334da0bb | ||
|
|
40ce2baa7b | ||
|
|
1134466cc0 | ||
|
|
92341111ad | ||
|
|
4956d6781f | ||
|
|
63562240c4 | ||
|
|
84d801cf14 | ||
|
|
b56fe4ca68 | ||
|
|
6c83c65e02 | ||
|
|
a83f020fcc | ||
|
|
7f9a3bf272 | ||
|
|
f80e266d02 | ||
|
|
7bef562541 | ||
|
|
b2428f607c | ||
|
|
8303196b57 | ||
|
|
987b8c8742 | ||
|
|
e60a579b85 | ||
|
|
be8edafed0 | ||
|
|
a258a18fa4 | ||
|
|
59010ca431 | ||
|
|
75f3764e6c | ||
|
|
867ffd1163 | ||
|
|
6acccbbb94 | ||
|
|
b2c4efab45 | ||
|
|
408a435b71 | ||
|
|
36d3cd93d5 | ||
|
|
b36fea002e | ||
|
|
52acbd954a | ||
|
|
f6709a55c3 | ||
|
|
7b374d747b | ||
|
|
fd480a9360 | ||
|
|
ec8b228867 | ||
|
|
401200050b | ||
|
|
29160bd6e5 | ||
|
|
3c9e402bc0 | ||
|
|
ff4d0f0208 | ||
|
|
f82908221c | ||
|
|
4246908f2e | ||
|
|
f64597afd2 | ||
|
|
975ff2672d | ||
|
|
e90ba31784 | ||
|
|
a4074c93bc | ||
|
|
7a8b7598c7 | ||
|
|
cd0d832f14 | ||
|
|
5b0becaaf2 | ||
|
|
9817bac2fe | ||
|
|
f6bd48cfcd | ||
|
|
01843b8f2b | ||
|
|
94ed81de5e | ||
|
|
0700b8f399 | ||
|
|
d62cff9841 | ||
|
|
083f4805b2 | ||
|
|
8e5bfd379e | ||
|
|
2366f143d8 | ||
|
|
e997f5bc1b | ||
|
|
842beec7cc | ||
|
|
d2268fc9e0 | ||
|
|
a98e26139f | ||
|
|
522a3ea88b | ||
|
|
d7949fbc30 | ||
|
|
6df083a1d5 | ||
|
|
4dc80e7f6e | ||
|
|
c2a8508513 | ||
|
|
159193ef43 | ||
|
|
1f37ffb105 | ||
|
|
919fed05c5 | ||
|
|
1814f83bee | ||
|
|
1823840456 | ||
|
|
623c28bfc3 | ||
|
|
3079131337 | ||
|
|
a34ade0120 | ||
|
|
e9ada70088 | ||
|
|
597cc48248 | ||
|
|
ec3f857ef1 | ||
|
|
383b4de539 | ||
|
|
1bf9326604 | ||
|
|
d9f5459d46 | ||
|
|
e45a1b1e19 | ||
|
|
331ad8f644 | ||
|
|
52fa88b04c | ||
|
|
8895a64d24 | ||
|
|
fdec535559 | ||
|
|
6c5559ae2d | ||
|
|
9f54622b17 | ||
|
|
03b6f4b378 | ||
|
|
af4cbe2332 | ||
|
|
141f72963a | ||
|
|
3d3c66e12f | ||
|
|
ee84571bdb | ||
|
|
6500936aad | ||
|
|
32d2b6c013 | ||
|
|
05df40977d | ||
|
|
5d7a1dcde5 | ||
|
|
9c45d9db6c | ||
|
|
ca692ed0f2 | ||
|
|
af499565d3 | ||
|
|
fe2d7e3a9e | ||
|
|
9f69822221 | ||
|
|
bb43f047c2 | ||
|
|
2356662492 | ||
|
|
1624a45093 | ||
|
|
dcb9983786 | ||
|
|
83d1828905 | ||
|
|
6a281cf3ee | ||
|
|
ed1cd39a6c | ||
|
|
dda19b3920 | ||
|
|
25139ca922 | ||
|
|
3cd57a582c | ||
|
|
d3903ac655 | ||
|
|
199e374318 | ||
|
|
8375c1413d | ||
|
|
9e268cf016 | ||
|
|
112b3abc26 | ||
|
|
a8331a2357 | ||
|
|
52e3ad08c1 | ||
|
|
8d01d04ef0 | ||
|
|
a141384907 | ||
|
|
b8aa7184bd | ||
|
|
e4195f874d | ||
|
|
d04deff5ca | ||
|
|
20ce0778a0 | ||
|
|
5a0b3470f1 | ||
|
|
a920921570 | ||
|
|
286f4ff384 | ||
|
|
71ddfafa98 | ||
|
|
b7e3e53697 | ||
|
|
16df548b77 | ||
|
|
425c33ae00 | ||
|
|
c9289ed2dc | ||
|
|
96517cbdef | ||
|
|
b03420faac | ||
|
|
65a1aa7ca2 | ||
|
|
3a92e8eaf9 | ||
|
|
a8dc50d64a | ||
|
|
3397cc7d8d | ||
|
|
c3e8131b24 | ||
|
|
f8ca8584ae | ||
|
|
3050bbe260 | ||
|
|
e1dda2795a | ||
|
|
6d8408e626 | ||
|
|
0906271aa9 | ||
|
|
4c33c9d256 | ||
|
|
fa9c78209f | ||
|
|
6678ec8a60 | ||
|
|
854e467c12 | ||
|
|
e6b94c7b21 | ||
|
|
2c6f9d8602 | ||
|
|
c74033b9c0 | ||
|
|
d2b21d27bb | ||
|
|
215272469f | ||
|
|
f7d05ab0f1 | ||
|
|
6f2ad2be77 | ||
|
|
66575c719a | ||
|
|
677a239d53 | ||
|
|
3b96bfe5af | ||
|
|
83be5cfa64 | ||
|
|
6b834c2362 | ||
|
|
7abfc49e08 | ||
|
|
65d5f50088 | ||
|
|
4f1f4ffe3d | ||
|
|
b0c2027a1c | ||
|
|
33c83358b0 | ||
|
|
31223f0526 | ||
|
|
92daadb92c | ||
|
|
fae2e274fd | ||
|
|
342a722991 | ||
|
|
65ec6aacb7 | ||
|
|
9387470c69 | ||
|
|
31f6edf8f0 | ||
|
|
487b062175 | ||
|
|
d8e13de096 | ||
|
|
e8a30088ef | ||
|
|
bf7b07ba74 |
1
.github/copilot-instructions.md
vendored
Normal file
1
.github/copilot-instructions.md
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Always use English for comments.
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -5,3 +5,4 @@ output/*
|
||||
py/run_test.py
|
||||
.vscode/
|
||||
cache/
|
||||
civitai/
|
||||
|
||||
118
README.md
118
README.md
@@ -34,87 +34,53 @@ Enhance your Civitai browsing experience with our companion browser extension! S
|
||||
|
||||
## Release Notes
|
||||
|
||||
### v0.8.22
|
||||
* **Embeddings Management** - Added Embeddings page for comprehensive embedding model management.
|
||||
* **Advanced Sorting Options** - Introduced flexible sorting controls, allowing sorting by name, added date, or file size in both ascending and descending order.
|
||||
* **Custom Download Path Templates & Base Model Mapping** - Implemented UI settings for configuring download path templates and base model path mappings, allowing customized model organization and storage location when downloading models via LM Civitai Extension.
|
||||
* **LM Civitai Extension Enhancements** - Improved concurrent download performance and stability, with new support for canceling active downloads directly from the extension interface.
|
||||
* **Update Feature** - Added update functionality, allowing users to update LoRA Manager to the latest release version directly from the LoRA Manager UI.
|
||||
* **Bulk Operations: Refresh All** - Added bulk refresh functionality, allowing users to update Civitai metadata across multiple LoRAs.
|
||||
### v0.9.3
|
||||
* **Metadata Archive Database Support** - Added the ability to download and utilize a metadata archive database, enabling access to metadata for models that have been deleted from CivitAI.
|
||||
* **App-Level Proxy Settings** - Introduced support for configuring a global proxy within the application, making it easier to use the manager behind network restrictions.
|
||||
* **Bug Fixes** - Various bug fixes for improved stability and reliability.
|
||||
|
||||
### v0.8.20
|
||||
* **LM Civitai Extension** - Released [browser extension through Chrome Web Store](https://chromewebstore.google.com/detail/lm-civitai-extension/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) that works seamlessly with LoRA Manager to enhance Civitai browsing experience, showing which models are already in your local library, enabling one-click downloads, and providing queue and parallel download support
|
||||
* **Enhanced Lora Loader** - Added support for nunchaku, improving convenience when working with ComfyUI-nunchaku workflows, plus new template workflows for quick onboarding
|
||||
* **WanVideo Integration** - Introduced WanVideo Lora Select (LoraManager) node compatible with ComfyUI-WanVideoWrapper for streamlined lora usage in video workflows, including a template workflow to help you get started quickly
|
||||
### v0.9.2
|
||||
* **Bulk Auto-Organization Action** - Added a new bulk auto-organization feature. You can now select multiple models and automatically organize them according to your current path template settings for streamlined management.
|
||||
* **Bug Fixes** - Addressed several bugs to improve stability and reliability.
|
||||
|
||||
### v0.8.19
|
||||
* **Analytics Dashboard** - Added new Statistics page providing comprehensive visual analysis of model collection and usage patterns for better library insights
|
||||
* **Target Node Selection** - Enhanced workflow integration with intelligent target choosing when sending LoRAs/recipes to workflows with multiple loader/stacker nodes; a visual selector now appears showing node color, type, ID, and title for precise targeting
|
||||
* **Enhanced NSFW Controls** - Added support for setting NSFW levels on recipes with automatic content blurring based on user preferences
|
||||
* **Customizable Card Display** - New display settings allowing users to choose whether card information and action buttons are always visible or only revealed on hover
|
||||
* **Expanded Compatibility** - Added support for efficiency-nodes-comfyui in Save Recipe and Save Image nodes, plus fixed compatibility with ComfyUI_Custom_Nodes_AlekPet
|
||||
### v0.9.1
|
||||
* **Enhanced Bulk Operations** - Improved bulk operations with Marquee Selection and a bulk operation context menu, providing a more intuitive, desktop-application-like user experience.
|
||||
* **New Bulk Actions** - Added bulk operations for adding tags and setting base models to multiple models simultaneously.
|
||||
|
||||
### v0.8.18
|
||||
* **Custom Example Images** - Added ability to import your own example images for LoRAs and checkpoints with automatic metadata extraction from embedded information
|
||||
* **Enhanced Example Management** - New action buttons to set specific examples as previews or delete custom examples
|
||||
* **Improved Duplicate Detection** - Enhanced "Find Duplicates" with hash verification feature to eliminate false positives when identifying duplicate models
|
||||
* **Tag Management** - Added tag editing functionality allowing users to customize and manage model tags
|
||||
* **Advanced Selection Controls** - Implemented Ctrl+A shortcut for quickly selecting all filtered LoRAs, automatically entering bulk mode when needed
|
||||
* **Note**: Cache file functionality temporarily disabled pending rework
|
||||
### v0.9.0
|
||||
* **UI Overhaul for Enhanced Navigation** - Replaced the top flat folder tags with a new folder sidebar and breadcrumb navigation system for a more intuitive folder browsing and selection experience.
|
||||
* **Dual-Mode Folder Sidebar** - The new folder sidebar offers two display modes: 'List Mode,' which mirrors the classic folder view, and 'Tree Mode,' which presents a hierarchical folder structure for effortless navigation through nested directories.
|
||||
* **Internationalization Support** - Introduced multi-language support, now available in English, Simplified Chinese, Traditional Chinese, Spanish, Japanese, Korean, French, Russian, and German. Feedback from native speakers is welcome to improve the translations.
|
||||
* **Automatic Filename Conflict Resolution** - Implemented automatic file renaming (`original name + short hash`) to prevent conflicts when downloading or moving models.
|
||||
* **Performance Optimizations & Bug Fixes** - Various performance improvements and bug fixes for a more stable and responsive experience.
|
||||
|
||||
### v0.8.17
|
||||
* **Duplicate Model Detection** - Added "Find Duplicates" functionality for LoRAs and checkpoints using model file hash detection, enabling convenient viewing and batch deletion of duplicate models
|
||||
* **Enhanced URL Recipe Imports** - Optimized import recipe via URL functionality using CivitAI API calls instead of web scraping, now supporting all rated images (including NSFW) for recipe imports
|
||||
* **Improved TriggerWord Control** - Enhanced TriggerWord Toggle node with new default_active switch to set the initial state (active/inactive) when trigger words are added
|
||||
* **Centralized Example Management** - Added "Migrate Existing Example Images" feature to consolidate downloaded example images from model folders into central storage with customizable naming patterns
|
||||
* **Intelligent Word Suggestions** - Implemented smart trigger word suggestions by reading class tokens and tag frequency from safetensors files, displaying recommendations when editing trigger words
|
||||
* **Model Version Management** - Added "Re-link to CivitAI" context menu option for connecting models to different CivitAI versions when needed
|
||||
### v0.8.30
|
||||
* **Automatic Model Path Correction** - Added auto-correction for model paths in built-in nodes such as Load Checkpoint, Load Diffusion Model, Load LoRA, and other custom nodes with similar functionality. Workflows containing outdated or incorrect model paths will now be automatically updated to reflect the current location of your models.
|
||||
* **Node UI Enhancements** - Improved node interface for a smoother and more intuitive user experience.
|
||||
* **Bug Fixes** - Addressed various bugs to enhance stability and reliability.
|
||||
|
||||
### v0.8.16
|
||||
* **Dramatic Startup Speed Improvement** - Added cache serialization mechanism for significantly faster loading times, especially beneficial for large model collections
|
||||
* **Enhanced Refresh Options** - Extended functionality with "Full Rebuild (complete)" option alongside "Quick Refresh (incremental)" to fix potential memory cache issues without requiring application restart
|
||||
* **Customizable Display Density** - Replaced compact mode with adjustable display density settings for personalized layout customization
|
||||
* **Model Creator Information** - Added creator details to model information panels for better attribution
|
||||
* **Improved WebP Support** - Enhanced Save Image node with workflow embedding capability for WebP format images
|
||||
* **Direct Example Access** - Added "Open Example Images Folder" button to card interfaces for convenient browsing of downloaded model examples
|
||||
* **Enhanced Compatibility** - Full ComfyUI Desktop support for "Send lora or recipe to workflow" functionality
|
||||
* **Cache Management** - Added settings to clear existing cache files when needed
|
||||
* **Bug Fixes & Stability** - Various improvements for overall reliability and performance
|
||||
### v0.8.29
|
||||
* **Enhanced Recipe Imports** - Improved recipe importing with new target folder selection, featuring path input autocomplete and interactive folder tree navigation. Added a "Use Default Path" option when downloading missing LoRAs.
|
||||
* **WanVideo Lora Select Node Update** - Updated the WanVideo Lora Select node with a 'merge_loras' option to match the counterpart node in the WanVideoWrapper node package.
|
||||
* **Autocomplete Conflict Resolution** - Resolved an autocomplete feature conflict in LoRA nodes with pysssss autocomplete.
|
||||
* **Improved Download Functionality** - Enhanced download functionality with resumable downloads and improved error handling.
|
||||
* **Bug Fixes** - Addressed several bugs for improved stability and performance.
|
||||
|
||||
### v0.8.15
|
||||
* **Enhanced One-Click Integration** - Replaced copy button with direct send button allowing LoRAs/recipes to be sent directly to your current ComfyUI workflow without needing to paste
|
||||
* **Flexible Workflow Integration** - Click to append LoRAs/recipes to existing loader nodes or Shift+click to replace content, with additional right-click menu options for "Send to Workflow (Append)" or "Send to Workflow (Replace)"
|
||||
* **Improved LoRA Loader Controls** - Added header drag functionality for proportional strength adjustment of all LoRAs simultaneously (including CLIP strengths when expanded)
|
||||
* **Keyboard Navigation Support** - Implemented Page Up/Down for page scrolling, Home key to jump to top, and End key to jump to bottom for faster browsing through large collections
|
||||
### v0.8.28
|
||||
* **Autocomplete for Node Inputs** - Instantly find and add LoRAs by filename directly in Lora Loader, Lora Stacker, and WanVideo Lora Select nodes. Autocomplete suggestions include preview tooltips and preset weights, allowing you to quickly select LoRAs without opening the LoRA Manager UI.
|
||||
* **Duplicate Notification Control** - Added a switch to duplicates mode, enabling users to turn off duplicate model notifications for a more streamlined experience.
|
||||
* **Download Example Images from Context Menu** - Introduced a new context menu option to download example images for individual models.
|
||||
|
||||
### v0.8.14
|
||||
* **Virtualized Scrolling** - Completely rebuilt rendering mechanism for smooth browsing with no lag or freezing, now supporting virtually unlimited model collections with optimized layouts for large displays, improving space utilization and user experience
|
||||
* **Compact Display Mode** - Added space-efficient view option that displays more cards per row (7 on 1080p, 8 on 2K, 10 on 4K)
|
||||
* **Enhanced LoRA Node Functionality** - Comprehensive improvements to LoRA loader/stacker nodes including real-time trigger word updates (reflecting any change anywhere in the LoRA chain for precise updates) and expanded context menu with "Copy Notes" and "Copy Trigger Words" options for faster workflow
|
||||
### v0.8.27
|
||||
* **User Experience Enhancements** - Improved the model download target folder selection with path input autocomplete and interactive folder tree navigation, making it easier and faster to choose where models are saved.
|
||||
* **Default Path Option for Downloads** - Added a "Use Default Path" option when downloading models. When enabled, models are automatically organized and stored according to your configured path template settings.
|
||||
* **Advanced Download Path Templates** - Expanded path template settings, allowing users to set individual templates for LoRA, checkpoint, and embedding models for greater flexibility. Introduced the `{author}` placeholder, enabling automatic organization of model files by creator name.
|
||||
* **Bug Fixes & Stability Improvements** - Addressed various bugs and improved overall stability for a smoother experience.
|
||||
|
||||
### v0.8.13
|
||||
* **Enhanced Recipe Management** - Added "Find duplicates" feature to identify and batch delete duplicate recipes with duplicate detection notifications during imports
|
||||
* **Improved Source Tracking** - Source URLs are now saved with recipes imported via URL, allowing users to view original content with one click or manually edit links
|
||||
* **Advanced LoRA Control** - Double-click LoRAs in Loader/Stacker nodes to access expanded CLIP strength controls for more precise adjustments of model and CLIP strength separately
|
||||
* **Lycoris Model Support** - Added compatibility with Lycoris models for expanded creative options
|
||||
* **Bug Fixes & UX Improvements** - Resolved various issues and enhanced overall user experience with numerous optimizations
|
||||
|
||||
### v0.8.12
|
||||
* **Enhanced Model Discovery** - Added alphabetical navigation bar to LoRAs page for faster browsing through large collections
|
||||
* **Optimized Example Images** - Improved download logic to automatically refresh stale metadata before fetching example images
|
||||
* **Model Exclusion System** - New right-click option to exclude specific LoRAs or checkpoints from management
|
||||
* **Improved Showcase Experience** - Enhanced interaction in LoRA and checkpoint showcase areas for better usability
|
||||
|
||||
### v0.8.11
|
||||
* **Offline Image Support** - Added functionality to download and save all model example images locally, ensuring access even when offline or if images are removed from CivitAI or the site is down
|
||||
* **Resilient Download System** - Implemented pause/resume capability with checkpoint recovery that persists through restarts or unexpected exits
|
||||
* **Bug Fixes & Stability** - Resolved various issues to enhance overall reliability and performance
|
||||
|
||||
### v0.8.10
|
||||
* **Standalone Mode** - Run LoRA Manager independently from ComfyUI for a lightweight experience that works even with other stable diffusion interfaces
|
||||
* **Portable Edition** - New one-click portable version for easy startup and updates in standalone mode
|
||||
* **Enhanced Metadata Collection** - Added support for SamplerCustomAdvanced node in the metadata collector module
|
||||
* **Improved UI Organization** - Optimized Lora Loader node height to display up to 5 LoRAs at once with scrolling capability for larger collections
|
||||
### v0.8.26
|
||||
* **Creator Search Option** - Added ability to search models by creator name, making it easier to find models from specific authors.
|
||||
* **Enhanced Node Usability** - Improved user experience for Lora Loader, Lora Stacker, and WanVideo Lora Select nodes by fixing the maximum height of the text input area. Users can now freely and conveniently adjust the LoRA region within these nodes.
|
||||
* **Compatibility Fixes** - Resolved compatibility issues with ComfyUI and certain custom nodes, including ComfyUI-Custom-Scripts, ensuring smoother integration and operation.
|
||||
|
||||
[View Update History](./update_logs.md)
|
||||
|
||||
@@ -173,10 +139,11 @@ Enhance your Civitai browsing experience with our companion browser extension! S
|
||||
|
||||
### Option 2: **Portable Standalone Edition** (No ComfyUI required)
|
||||
|
||||
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.8.15/lora_manager_portable.7z)
|
||||
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.9.2/lora_manager_portable.7z)
|
||||
2. Copy the provided `settings.json.example` file to create a new file named `settings.json` in `comfyui-lora-manager` folder
|
||||
3. Edit `settings.json` to include your correct model folder paths and CivitAI API key
|
||||
4. Run run.bat
|
||||
- To change the startup port, edit `run.bat` and modify the parameter (e.g. `--port 9001`)
|
||||
|
||||
### Option 3: **Manual Installation**
|
||||
|
||||
@@ -306,3 +273,6 @@ Join our Discord community for support, discussions, and updates:
|
||||
[Discord Server](https://discord.gg/vcqNrWVFvM)
|
||||
|
||||
---
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#willmiao/ComfyUI-Lora-Manager&Date)
|
||||
|
||||
@@ -1,20 +1,23 @@
|
||||
from .py.lora_manager import LoraManager
|
||||
from .py.nodes.lora_loader import LoraManagerLoader
|
||||
from .py.nodes.lora_loader import LoraManagerLoader, LoraManagerTextLoader
|
||||
from .py.nodes.trigger_word_toggle import TriggerWordToggle
|
||||
from .py.nodes.lora_stacker import LoraStacker
|
||||
from .py.nodes.save_image import SaveImage
|
||||
from .py.nodes.debug_metadata import DebugMetadata
|
||||
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
|
||||
from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraSelectFromText
|
||||
# Import metadata collector to install hooks on startup
|
||||
from .py.metadata_collector import init as init_metadata_collector
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
LoraManagerLoader.NAME: LoraManagerLoader,
|
||||
LoraManagerTextLoader.NAME: LoraManagerTextLoader,
|
||||
TriggerWordToggle.NAME: TriggerWordToggle,
|
||||
LoraStacker.NAME: LoraStacker,
|
||||
SaveImage.NAME: SaveImage,
|
||||
DebugMetadata.NAME: DebugMetadata,
|
||||
WanVideoLoraSelect.NAME: WanVideoLoraSelect
|
||||
WanVideoLoraSelect.NAME: WanVideoLoraSelect,
|
||||
WanVideoLoraSelectFromText.NAME: WanVideoLoraSelectFromText
|
||||
}
|
||||
|
||||
WEB_DIRECTORY = "./web/comfyui"
|
||||
|
||||
182
docs/EventManagementImplementation.md
Normal file
182
docs/EventManagementImplementation.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# Event Management Implementation Summary
|
||||
|
||||
## What Has Been Implemented
|
||||
|
||||
### 1. Enhanced EventManager Class
|
||||
- **Location**: `static/js/utils/EventManager.js`
|
||||
- **Features**:
|
||||
- Priority-based event handling
|
||||
- Conditional execution based on application state
|
||||
- Element filtering (target/exclude selectors)
|
||||
- Mouse button filtering
|
||||
- Automatic cleanup with cleanup functions
|
||||
- State tracking for app modes
|
||||
- Error handling for event handlers
|
||||
|
||||
### 2. BulkManager Integration
|
||||
- **Location**: `static/js/managers/BulkManager.js`
|
||||
- **Migrated Events**:
|
||||
- Global keyboard shortcuts (Ctrl+A, Escape, B key)
|
||||
- Marquee selection events (mousedown, mousemove, mouseup, contextmenu)
|
||||
- State synchronization with EventManager
|
||||
- **Benefits**:
|
||||
- Centralized priority handling
|
||||
- Conditional execution based on modal state
|
||||
- Better coordination with other components
|
||||
|
||||
### 3. UIHelpers Integration
|
||||
- **Location**: `static/js/utils/uiHelpers.js`
|
||||
- **Migrated Events**:
|
||||
- Mouse position tracking for node selector positioning
|
||||
- Node selector click events (outside clicks and selection)
|
||||
- State management for node selector
|
||||
- **Benefits**:
|
||||
- Reduced direct DOM listeners
|
||||
- Coordinated state tracking
|
||||
- Better cleanup
|
||||
|
||||
### 4. ModelCard Integration
|
||||
- **Location**: `static/js/components/shared/ModelCard.js`
|
||||
- **Migrated Events**:
|
||||
- Model card click delegation
|
||||
- Action button handling (star, globe, copy, etc.)
|
||||
- Better return value handling for event propagation
|
||||
- **Benefits**:
|
||||
- Single event listener for all model cards
|
||||
- Priority-based execution
|
||||
- Better event flow control
|
||||
|
||||
### 5. Documentation and Initialization
|
||||
- **EventManagerDocs.md**: Comprehensive documentation
|
||||
- **eventManagementInit.js**: Initialization and global handlers
|
||||
- **Features**:
|
||||
- Global escape key handling
|
||||
- Modal state synchronization
|
||||
- Error handling
|
||||
- Analytics integration points
|
||||
- Cleanup on page unload
|
||||
|
||||
## Application States Tracked
|
||||
|
||||
1. **bulkMode**: When bulk selection mode is active
|
||||
2. **marqueeActive**: When marquee selection is in progress
|
||||
3. **modalOpen**: When any modal dialog is open
|
||||
4. **nodeSelectorActive**: When node selector popup is visible
|
||||
|
||||
## Priority Levels Used
|
||||
|
||||
- **250+**: Critical system events (escape keys)
|
||||
- **200+**: High priority system events (modal close)
|
||||
- **100-199**: Application-level shortcuts (bulk operations)
|
||||
- **80-99**: UI interactions (marquee selection)
|
||||
- **60-79**: Component interactions (model cards)
|
||||
- **10-49**: Tracking and monitoring
|
||||
- **1-9**: Analytics and low-priority tasks
|
||||
|
||||
## Event Flow Examples
|
||||
|
||||
### Bulk Mode Toggle (B key)
|
||||
1. **Priority 100**: BulkManager keyboard handler catches 'b' key
|
||||
2. Toggles bulk mode state
|
||||
3. Updates EventManager state
|
||||
4. Updates UI accordingly
|
||||
5. Stops propagation (returns true)
|
||||
|
||||
### Marquee Selection
|
||||
1. **Priority 80**: BulkManager mousedown handler (only in .models-container, excluding cards/buttons)
|
||||
2. Starts marquee selection
|
||||
3. **Priority 90**: BulkManager mousemove handler (only when marquee active)
|
||||
4. Updates selection rectangle
|
||||
5. **Priority 90**: BulkManager mouseup handler ends selection
|
||||
|
||||
### Model Card Click
|
||||
1. **Priority 60**: ModelCard delegation handler checks for specific elements
|
||||
2. If action button: handles action and stops propagation
|
||||
3. If general card click: continues to other handlers
|
||||
4. Bulk selection may also handle the event if in bulk mode
|
||||
|
||||
## Remaining Event Listeners (Not Yet Migrated)
|
||||
|
||||
### High Priority for Migration
|
||||
1. **SearchManager keyboard events** - Global search shortcuts
|
||||
2. **ModalManager escape handling** - Already integrated with initialization
|
||||
3. **Scroll-based events** - Back to top, virtual scrolling
|
||||
4. **Resize events** - Panel positioning, responsive layouts
|
||||
|
||||
### Medium Priority
|
||||
1. **Form input events** - Tag inputs, settings forms
|
||||
2. **Component-specific events** - Recipe modal, showcase view
|
||||
3. **Sidebar events** - Resize handling, toggle events
|
||||
|
||||
### Low Priority (Can Remain As-Is)
|
||||
1. **VirtualScroller events** - Performance-critical, specialized
|
||||
2. **Component lifecycle events** - Modal open/close callbacks
|
||||
3. **One-time setup events** - Theme initialization, etc.
|
||||
|
||||
## Benefits Achieved
|
||||
|
||||
### Performance Improvements
|
||||
- **Reduced DOM listeners**: From ~15+ individual listeners to ~5 coordinated handlers
|
||||
- **Conditional execution**: Handlers only run when conditions are met
|
||||
- **Priority ordering**: Important events handled first
|
||||
- **Better memory management**: Automatic cleanup prevents leaks
|
||||
|
||||
### Coordination Improvements
|
||||
- **State synchronization**: All components aware of app state
|
||||
- **Event flow control**: Proper propagation stopping
|
||||
- **Conflict resolution**: Priority system prevents conflicts
|
||||
- **Debugging**: Centralized event handling for easier debugging
|
||||
|
||||
### Code Quality Improvements
|
||||
- **Consistent patterns**: All event handling follows same patterns
|
||||
- **Better separation of concerns**: Event logic separated from business logic
|
||||
- **Error handling**: Centralized error catching and reporting
|
||||
- **Documentation**: Clear patterns for future development
|
||||
|
||||
## Next Steps (Recommendations)
|
||||
|
||||
### 1. Migrate Search Events
|
||||
```javascript
|
||||
// In SearchManager.js
|
||||
eventManager.addHandler('keydown', 'search-shortcuts', (e) => {
|
||||
if ((e.ctrlKey || e.metaKey) && e.key === 'f') {
|
||||
this.focusSearchInput();
|
||||
return true;
|
||||
}
|
||||
}, { priority: 120 });
|
||||
```
|
||||
|
||||
### 2. Integrate Resize Events
|
||||
```javascript
|
||||
// Create ResizeManager
|
||||
eventManager.addHandler('resize', 'layout-resize', debounce((e) => {
|
||||
this.updateLayoutDimensions();
|
||||
}, 250), { priority: 50 });
|
||||
```
|
||||
|
||||
### 3. Add Debug Mode
|
||||
```javascript
|
||||
// In EventManager.js
|
||||
if (window.DEBUG_EVENTS) {
|
||||
console.log(`Event ${eventType} handled by ${source} (priority: ${priority})`);
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Create Event Analytics
|
||||
```javascript
|
||||
// Track event patterns for optimization
|
||||
eventManager.addHandler('*', 'analytics', (e) => {
|
||||
this.trackEventUsage(e.type, performance.now());
|
||||
}, { priority: 1 });
|
||||
```
|
||||
|
||||
## Testing Recommendations
|
||||
|
||||
1. **Verify bulk mode interactions** work correctly
|
||||
2. **Test marquee selection** in various scenarios
|
||||
3. **Check modal state synchronization**
|
||||
4. **Verify node selector** positioning and cleanup
|
||||
5. **Test keyboard shortcuts** don't conflict
|
||||
6. **Verify proper cleanup** when components are destroyed
|
||||
|
||||
The centralized event management system provides a solid foundation for coordinated, efficient event handling across the application while maintaining good performance and code organization.
|
||||
301
docs/EventManagerDocs.md
Normal file
301
docs/EventManagerDocs.md
Normal file
@@ -0,0 +1,301 @@
|
||||
# Centralized Event Management System
|
||||
|
||||
This document describes the centralized event management system that coordinates event handling across the ComfyUI LoRA Manager application.
|
||||
|
||||
## Overview
|
||||
|
||||
The `EventManager` class provides a centralized way to handle DOM events with priority-based execution, conditional execution based on application state, and proper cleanup mechanisms.
|
||||
|
||||
## Features
|
||||
|
||||
- **Priority-based execution**: Handlers with higher priority run first
|
||||
- **Conditional execution**: Handlers can be executed based on application state
|
||||
- **Element filtering**: Handlers can target specific elements or exclude others
|
||||
- **Automatic cleanup**: Cleanup functions are called when handlers are removed
|
||||
- **State tracking**: Tracks application states like bulk mode, modal open, etc.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Importing
|
||||
|
||||
```javascript
|
||||
import { eventManager } from './EventManager.js';
|
||||
```
|
||||
|
||||
### Adding Event Handlers
|
||||
|
||||
```javascript
|
||||
eventManager.addHandler('click', 'myComponent', (event) => {
|
||||
console.log('Button clicked!');
|
||||
return true; // Stop propagation to other handlers
|
||||
}, {
|
||||
priority: 100,
|
||||
targetSelector: '.my-button',
|
||||
skipWhenModalOpen: true
|
||||
});
|
||||
```
|
||||
|
||||
### Removing Event Handlers
|
||||
|
||||
```javascript
|
||||
// Remove specific handler
|
||||
eventManager.removeHandler('click', 'myComponent');
|
||||
|
||||
// Remove all handlers for a component
|
||||
eventManager.removeAllHandlersForSource('myComponent');
|
||||
```
|
||||
|
||||
### Updating Application State
|
||||
|
||||
```javascript
|
||||
// Set state
|
||||
eventManager.setState('bulkMode', true);
|
||||
eventManager.setState('modalOpen', true);
|
||||
|
||||
// Get state
|
||||
const isBulkMode = eventManager.getState('bulkMode');
|
||||
```
|
||||
|
||||
## Available States
|
||||
|
||||
- `bulkMode`: Whether bulk selection mode is active
|
||||
- `marqueeActive`: Whether marquee selection is in progress
|
||||
- `modalOpen`: Whether any modal is currently open
|
||||
- `nodeSelectorActive`: Whether the node selector popup is active
|
||||
|
||||
## Handler Options
|
||||
|
||||
### Priority
|
||||
Higher numbers = higher priority. Handlers run in descending priority order.
|
||||
|
||||
```javascript
|
||||
{
|
||||
priority: 100 // High priority
|
||||
}
|
||||
```
|
||||
|
||||
### Conditional Execution
|
||||
|
||||
```javascript
|
||||
{
|
||||
onlyInBulkMode: true, // Only run when bulk mode is active
|
||||
onlyWhenMarqueeActive: true, // Only run when marquee selection is active
|
||||
skipWhenModalOpen: true, // Skip when any modal is open
|
||||
skipWhenNodeSelectorActive: true, // Skip when node selector is active
|
||||
onlyWhenNodeSelectorActive: true // Only run when node selector is active
|
||||
}
|
||||
```
|
||||
|
||||
### Element Filtering
|
||||
|
||||
```javascript
|
||||
{
|
||||
targetSelector: '.model-card', // Only handle events on matching elements
|
||||
excludeSelector: 'button, input', // Exclude events from these elements
|
||||
button: 0 // Only handle specific mouse button (0=left, 1=middle, 2=right)
|
||||
}
|
||||
```
|
||||
|
||||
### Cleanup Functions
|
||||
|
||||
```javascript
|
||||
{
|
||||
cleanup: () => {
|
||||
// Custom cleanup logic
|
||||
console.log('Handler cleaned up');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### BulkManager Integration
|
||||
|
||||
```javascript
|
||||
class BulkManager {
|
||||
registerEventHandlers() {
|
||||
// High priority keyboard shortcuts
|
||||
eventManager.addHandler('keydown', 'bulkManager-keyboard', (e) => {
|
||||
return this.handleGlobalKeyboard(e);
|
||||
}, {
|
||||
priority: 100,
|
||||
skipWhenModalOpen: true
|
||||
});
|
||||
|
||||
// Marquee selection
|
||||
eventManager.addHandler('mousedown', 'bulkManager-marquee-start', (e) => {
|
||||
return this.handleMarqueeStart(e);
|
||||
}, {
|
||||
priority: 80,
|
||||
skipWhenModalOpen: true,
|
||||
targetSelector: '.models-container',
|
||||
excludeSelector: '.model-card, button, input',
|
||||
button: 0
|
||||
});
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
eventManager.removeAllHandlersForSource('bulkManager-keyboard');
|
||||
eventManager.removeAllHandlersForSource('bulkManager-marquee-start');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Modal Integration
|
||||
|
||||
```javascript
|
||||
class ModalManager {
|
||||
showModal(modalId) {
|
||||
// Update state when modal opens
|
||||
eventManager.setState('modalOpen', true);
|
||||
this.displayModal(modalId);
|
||||
}
|
||||
|
||||
closeModal(modalId) {
|
||||
// Update state when modal closes
|
||||
eventManager.setState('modalOpen', false);
|
||||
this.hideModal(modalId);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Component Event Delegation
|
||||
|
||||
```javascript
|
||||
export function setupComponentEvents() {
|
||||
eventManager.addHandler('click', 'myComponent-actions', (event) => {
|
||||
const button = event.target.closest('.action-button');
|
||||
if (!button) return false;
|
||||
|
||||
this.handleAction(button.dataset.action);
|
||||
return true; // Stop propagation
|
||||
}, {
|
||||
priority: 60,
|
||||
targetSelector: '.component-container'
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Use Descriptive Source Names
|
||||
Use the format `componentName-purposeDescription`:
|
||||
```javascript
|
||||
// Good
|
||||
'bulkManager-marqueeSelection'
|
||||
'nodeSelector-clickOutside'
|
||||
'modelCard-delegation'
|
||||
|
||||
// Avoid
|
||||
'bulk'
|
||||
'click'
|
||||
'handler1'
|
||||
```
|
||||
|
||||
### 2. Set Appropriate Priorities
|
||||
- 200+: Critical system events (escape keys, critical modals)
|
||||
- 100-199: High priority application events (keyboard shortcuts)
|
||||
- 50-99: Normal UI interactions (buttons, cards)
|
||||
- 1-49: Low priority events (tracking, analytics)
|
||||
|
||||
### 3. Use Conditional Execution
|
||||
Instead of checking state inside handlers, use options:
|
||||
```javascript
|
||||
// Good
|
||||
eventManager.addHandler('click', 'bulk-action', handler, {
|
||||
onlyInBulkMode: true
|
||||
});
|
||||
|
||||
// Avoid
|
||||
eventManager.addHandler('click', 'bulk-action', (e) => {
|
||||
if (!state.bulkMode) return;
|
||||
// handler logic
|
||||
});
|
||||
```
|
||||
|
||||
### 4. Clean Up Properly
|
||||
Always clean up handlers when components are destroyed:
|
||||
```javascript
|
||||
class MyComponent {
|
||||
constructor() {
|
||||
this.registerEvents();
|
||||
}
|
||||
|
||||
destroy() {
|
||||
eventManager.removeAllHandlersForSource('myComponent');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Return Values Matter
|
||||
- Return `true` to stop event propagation to other handlers
|
||||
- Return `false` or `undefined` to continue with other handlers
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Direct Event Listeners
|
||||
|
||||
**Before:**
|
||||
```javascript
|
||||
document.addEventListener('click', (e) => {
|
||||
if (e.target.closest('.my-button')) {
|
||||
this.handleClick(e);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
**After:**
|
||||
```javascript
|
||||
eventManager.addHandler('click', 'myComponent-button', (e) => {
|
||||
this.handleClick(e);
|
||||
}, {
|
||||
targetSelector: '.my-button'
|
||||
});
|
||||
```
|
||||
|
||||
### From Event Delegation
|
||||
|
||||
**Before:**
|
||||
```javascript
|
||||
container.addEventListener('click', (e) => {
|
||||
const card = e.target.closest('.model-card');
|
||||
if (!card) return;
|
||||
|
||||
if (e.target.closest('.action-btn')) {
|
||||
this.handleAction(e);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
**After:**
|
||||
```javascript
|
||||
eventManager.addHandler('click', 'container-actions', (e) => {
|
||||
const card = e.target.closest('.model-card');
|
||||
if (!card) return false;
|
||||
|
||||
if (e.target.closest('.action-btn')) {
|
||||
this.handleAction(e);
|
||||
return true;
|
||||
}
|
||||
}, {
|
||||
targetSelector: '.container'
|
||||
});
|
||||
```
|
||||
|
||||
## Performance Benefits
|
||||
|
||||
1. **Reduced DOM listeners**: Single listener per event type instead of multiple
|
||||
2. **Conditional execution**: Handlers only run when conditions are met
|
||||
3. **Priority ordering**: Important handlers run first, avoiding unnecessary work
|
||||
4. **Automatic cleanup**: Prevents memory leaks from orphaned listeners
|
||||
5. **Centralized debugging**: All event handling flows through one system
|
||||
|
||||
## Debugging
|
||||
|
||||
Enable debug logging to trace event handling:
|
||||
```javascript
|
||||
// Add to EventManager.js for debugging
|
||||
console.log(`Handling ${eventType} event with ${handlers.length} handlers`);
|
||||
```
|
||||
|
||||
The event manager provides a foundation for coordinated, efficient event handling across the entire application.
|
||||
1227
locales/de.json
Normal file
1227
locales/de.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/en.json
Normal file
1227
locales/en.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/es.json
Normal file
1227
locales/es.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/fr.json
Normal file
1227
locales/fr.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/ja.json
Normal file
1227
locales/ja.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/ko.json
Normal file
1227
locales/ko.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/ru.json
Normal file
1227
locales/ru.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/zh-CN.json
Normal file
1227
locales/zh-CN.json
Normal file
File diff suppressed because it is too large
Load Diff
1227
locales/zh-TW.json
Normal file
1227
locales/zh-TW.json
Normal file
File diff suppressed because it is too large
Load Diff
41
py/config.py
41
py/config.py
@@ -5,6 +5,7 @@ from typing import List
|
||||
import logging
|
||||
import sys
|
||||
import json
|
||||
import urllib.parse
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
@@ -17,6 +18,7 @@ class Config:
|
||||
def __init__(self):
|
||||
self.templates_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'templates')
|
||||
self.static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static')
|
||||
self.i18n_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'locales')
|
||||
# Path mapping dictionary, target to link mapping
|
||||
self._path_mappings = {}
|
||||
# Static route mapping dictionary, target to route mapping
|
||||
@@ -204,16 +206,20 @@ class Config:
|
||||
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
|
||||
unet_map[real_path] = unet_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
|
||||
|
||||
# Merge both maps and deduplicate by real path
|
||||
merged_map = {}
|
||||
for real_path, orig_path in {**checkpoint_map, **unet_map}.items():
|
||||
if real_path not in merged_map:
|
||||
merged_map[real_path] = orig_path
|
||||
|
||||
# Now sort and use only the deduplicated real paths
|
||||
unique_checkpoint_paths = sorted(checkpoint_map.values(), key=lambda p: p.lower())
|
||||
unique_unet_paths = sorted(unet_map.values(), key=lambda p: p.lower())
|
||||
unique_paths = sorted(merged_map.values(), key=lambda p: p.lower())
|
||||
|
||||
# Store individual paths in class properties
|
||||
self.checkpoints_roots = unique_checkpoint_paths
|
||||
self.unet_roots = unique_unet_paths
|
||||
# Split back into checkpoints and unet roots for class properties
|
||||
self.checkpoints_roots = [p for p in unique_paths if p in checkpoint_map.values()]
|
||||
self.unet_roots = [p for p in unique_paths if p in unet_map.values()]
|
||||
|
||||
# Combine all checkpoint-related paths for return value
|
||||
all_paths = unique_checkpoint_paths + unique_unet_paths
|
||||
all_paths = unique_paths
|
||||
|
||||
logger.info("Found checkpoint roots:" + ("\n - " + "\n - ".join(all_paths) if all_paths else "[]"))
|
||||
|
||||
@@ -263,17 +269,26 @@ class Config:
|
||||
return []
|
||||
|
||||
def get_preview_static_url(self, preview_path: str) -> str:
|
||||
"""Convert local preview path to static URL"""
|
||||
if not preview_path:
|
||||
return ""
|
||||
|
||||
real_path = os.path.realpath(preview_path).replace(os.sep, '/')
|
||||
|
||||
|
||||
# Find longest matching path (most specific match)
|
||||
best_match = ""
|
||||
best_route = ""
|
||||
|
||||
for path, route in self._route_mappings.items():
|
||||
if real_path.startswith(path):
|
||||
relative_path = os.path.relpath(real_path, path)
|
||||
return f'{route}/{relative_path.replace(os.sep, "/")}'
|
||||
|
||||
if real_path.startswith(path) and len(path) > len(best_match):
|
||||
best_match = path
|
||||
best_route = route
|
||||
|
||||
if best_match:
|
||||
relative_path = os.path.relpath(real_path, best_match).replace(os.sep, '/')
|
||||
safe_parts = [urllib.parse.quote(part) for part in relative_path.split('/')]
|
||||
safe_path = '/'.join(safe_parts)
|
||||
return f'{best_route}/{safe_path}'
|
||||
|
||||
return ""
|
||||
|
||||
# Global config instance
|
||||
|
||||
@@ -145,7 +145,12 @@ class LoraManager:
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to add static route on initialization for {target_path}: {e}")
|
||||
continue
|
||||
|
||||
|
||||
# Add static route for locales JSON files
|
||||
if os.path.exists(config.i18n_path):
|
||||
app.router.add_static('/locales', config.i18n_path)
|
||||
logger.info(f"Added static route for locales: /locales -> {config.i18n_path}")
|
||||
|
||||
# Add static route for plugin assets
|
||||
app.router.add_static('/loras_static', config.static_path)
|
||||
|
||||
@@ -185,6 +190,9 @@ class LoraManager:
|
||||
|
||||
# Register DownloadManager with ServiceRegistry
|
||||
await ServiceRegistry.get_download_manager()
|
||||
|
||||
from .services.metadata_service import initialize_metadata_providers
|
||||
await initialize_metadata_providers()
|
||||
|
||||
# Initialize WebSocket manager
|
||||
await ServiceRegistry.get_websocket_manager()
|
||||
@@ -198,18 +206,264 @@ class LoraManager:
|
||||
recipe_scanner = await ServiceRegistry.get_recipe_scanner()
|
||||
|
||||
# Create low-priority initialization tasks
|
||||
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
|
||||
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
|
||||
asyncio.create_task(embedding_scanner.initialize_in_background(), name='embedding_cache_init')
|
||||
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
|
||||
init_tasks = [
|
||||
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init'),
|
||||
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init'),
|
||||
asyncio.create_task(embedding_scanner.initialize_in_background(), name='embedding_cache_init'),
|
||||
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
|
||||
]
|
||||
|
||||
await ExampleImagesMigration.check_and_run_migrations()
|
||||
|
||||
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
|
||||
# Schedule post-initialization tasks to run after scanners complete
|
||||
asyncio.create_task(
|
||||
cls._run_post_initialization_tasks(init_tasks),
|
||||
name='post_init_tasks'
|
||||
)
|
||||
|
||||
logger.debug("LoRA Manager: All services initialized and background tasks scheduled")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
async def _run_post_initialization_tasks(cls, init_tasks):
|
||||
"""Run post-initialization tasks after all scanners complete"""
|
||||
try:
|
||||
logger.debug("LoRA Manager: Waiting for scanner initialization to complete...")
|
||||
|
||||
# Wait for all scanner initialization tasks to complete
|
||||
await asyncio.gather(*init_tasks, return_exceptions=True)
|
||||
|
||||
logger.debug("LoRA Manager: Scanner initialization completed, starting post-initialization tasks...")
|
||||
|
||||
# Run post-initialization tasks
|
||||
post_tasks = [
|
||||
asyncio.create_task(cls._cleanup_backup_files(), name='cleanup_bak_files'),
|
||||
asyncio.create_task(cls._cleanup_example_images_folders(), name='cleanup_example_images'),
|
||||
# Add more post-initialization tasks here as needed
|
||||
# asyncio.create_task(cls._another_post_task(), name='another_task'),
|
||||
]
|
||||
|
||||
# Run all post-initialization tasks
|
||||
results = await asyncio.gather(*post_tasks, return_exceptions=True)
|
||||
|
||||
# Log results
|
||||
for i, result in enumerate(results):
|
||||
task_name = post_tasks[i].get_name()
|
||||
if isinstance(result, Exception):
|
||||
logger.error(f"Post-initialization task '{task_name}' failed: {result}")
|
||||
else:
|
||||
logger.debug(f"Post-initialization task '{task_name}' completed successfully")
|
||||
|
||||
logger.debug("LoRA Manager: All post-initialization tasks completed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LoRA Manager: Error in post-initialization tasks: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
async def _cleanup_backup_files(cls):
|
||||
"""Clean up .bak files in all model roots"""
|
||||
try:
|
||||
logger.debug("Starting cleanup of .bak files in model directories...")
|
||||
|
||||
# Collect all model roots
|
||||
all_roots = set()
|
||||
all_roots.update(config.loras_roots)
|
||||
all_roots.update(config.base_models_roots)
|
||||
all_roots.update(config.embeddings_roots)
|
||||
|
||||
total_deleted = 0
|
||||
total_size_freed = 0
|
||||
|
||||
for root_path in all_roots:
|
||||
if not os.path.exists(root_path):
|
||||
continue
|
||||
|
||||
try:
|
||||
deleted_count, size_freed = await cls._cleanup_backup_files_in_directory(root_path)
|
||||
total_deleted += deleted_count
|
||||
total_size_freed += size_freed
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.debug(f"Cleaned up {deleted_count} .bak files in {root_path} (freed {size_freed / (1024*1024):.2f} MB)")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up .bak files in {root_path}: {e}")
|
||||
|
||||
# Yield control periodically
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
if total_deleted > 0:
|
||||
logger.debug(f"Backup cleanup completed: removed {total_deleted} .bak files, freed {total_size_freed / (1024*1024):.2f} MB total")
|
||||
else:
|
||||
logger.debug("Backup cleanup completed: no .bak files found")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during backup file cleanup: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
async def _cleanup_backup_files_in_directory(cls, directory_path: str):
|
||||
"""Clean up .bak files in a specific directory recursively
|
||||
|
||||
Args:
|
||||
directory_path: Path to the directory to clean
|
||||
|
||||
Returns:
|
||||
Tuple[int, int]: (number of files deleted, total size freed in bytes)
|
||||
"""
|
||||
deleted_count = 0
|
||||
size_freed = 0
|
||||
visited_paths = set()
|
||||
|
||||
def cleanup_recursive(path):
|
||||
nonlocal deleted_count, size_freed
|
||||
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
for entry in it:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True) and entry.name.endswith('.bak'):
|
||||
file_size = entry.stat().st_size
|
||||
os.remove(entry.path)
|
||||
deleted_count += 1
|
||||
size_freed += file_size
|
||||
logger.debug(f"Deleted .bak file: {entry.path}")
|
||||
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
cleanup_recursive(entry.path)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not delete .bak file {entry.path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning directory {path} for .bak files: {e}")
|
||||
|
||||
# Run the recursive cleanup in a thread pool to avoid blocking
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, cleanup_recursive, directory_path)
|
||||
|
||||
return deleted_count, size_freed
|
||||
|
||||
@classmethod
|
||||
async def _cleanup_example_images_folders(cls):
|
||||
"""Clean up invalid or empty folders in example images directory"""
|
||||
try:
|
||||
example_images_path = settings.get('example_images_path')
|
||||
if not example_images_path or not os.path.exists(example_images_path):
|
||||
logger.debug("Example images path not configured or doesn't exist, skipping cleanup")
|
||||
return
|
||||
|
||||
logger.debug(f"Starting cleanup of example images folders in: {example_images_path}")
|
||||
|
||||
# Get all scanner instances to check hash validity
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
total_folders_checked = 0
|
||||
empty_folders_removed = 0
|
||||
invalid_hash_folders_removed = 0
|
||||
|
||||
# Scan the example images directory
|
||||
try:
|
||||
with os.scandir(example_images_path) as it:
|
||||
for entry in it:
|
||||
if not entry.is_dir(follow_symlinks=False):
|
||||
continue
|
||||
|
||||
folder_name = entry.name
|
||||
folder_path = entry.path
|
||||
total_folders_checked += 1
|
||||
|
||||
try:
|
||||
# Check if folder is empty
|
||||
is_empty = cls._is_folder_empty(folder_path)
|
||||
if is_empty:
|
||||
logger.debug(f"Removing empty example images folder: {folder_name}")
|
||||
await cls._remove_folder_safely(folder_path)
|
||||
empty_folders_removed += 1
|
||||
continue
|
||||
|
||||
# Check if folder name is a valid SHA256 hash (64 hex characters)
|
||||
if len(folder_name) != 64 or not all(c in '0123456789abcdefABCDEF' for c in folder_name):
|
||||
logger.debug(f"Removing invalid hash folder: {folder_name}")
|
||||
await cls._remove_folder_safely(folder_path)
|
||||
invalid_hash_folders_removed += 1
|
||||
continue
|
||||
|
||||
# Check if hash exists in any of the scanners
|
||||
hash_exists = (
|
||||
lora_scanner.has_hash(folder_name) or
|
||||
checkpoint_scanner.has_hash(folder_name) or
|
||||
embedding_scanner.has_hash(folder_name)
|
||||
)
|
||||
|
||||
if not hash_exists:
|
||||
logger.debug(f"Removing example images folder for deleted model: {folder_name}")
|
||||
await cls._remove_folder_safely(folder_path)
|
||||
invalid_hash_folders_removed += 1
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing example images folder {folder_name}: {e}")
|
||||
|
||||
# Yield control periodically
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning example images directory: {e}")
|
||||
return
|
||||
|
||||
# Log final cleanup report
|
||||
total_removed = empty_folders_removed + invalid_hash_folders_removed
|
||||
if total_removed > 0:
|
||||
logger.info(f"Example images cleanup completed: checked {total_folders_checked} folders, "
|
||||
f"removed {empty_folders_removed} empty folders and {invalid_hash_folders_removed} "
|
||||
f"folders for deleted/invalid models (total: {total_removed} removed)")
|
||||
else:
|
||||
logger.debug(f"Example images cleanup completed: checked {total_folders_checked} folders, "
|
||||
f"no cleanup needed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during example images cleanup: {e}", exc_info=True)
|
||||
|
||||
@classmethod
|
||||
def _is_folder_empty(cls, folder_path: str) -> bool:
|
||||
"""Check if a folder is empty
|
||||
|
||||
Args:
|
||||
folder_path: Path to the folder to check
|
||||
|
||||
Returns:
|
||||
bool: True if folder is empty, False otherwise
|
||||
"""
|
||||
try:
|
||||
with os.scandir(folder_path) as it:
|
||||
return not any(it)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking if folder is empty {folder_path}: {e}")
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
async def _remove_folder_safely(cls, folder_path: str):
|
||||
"""Safely remove a folder and all its contents
|
||||
|
||||
Args:
|
||||
folder_path: Path to the folder to remove
|
||||
"""
|
||||
try:
|
||||
import shutil
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, shutil.rmtree, folder_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to remove folder {folder_path}: {e}")
|
||||
|
||||
@classmethod
|
||||
async def _cleanup(cls, app):
|
||||
"""Cleanup resources using ServiceRegistry"""
|
||||
|
||||
@@ -295,7 +295,7 @@ class MetadataProcessor:
|
||||
"seed": None,
|
||||
"steps": None,
|
||||
"cfg_scale": None,
|
||||
"guidance": None, # Add guidance parameter
|
||||
# "guidance": None, # Add guidance parameter
|
||||
"sampler": None,
|
||||
"scheduler": None,
|
||||
"checkpoint": None,
|
||||
@@ -339,44 +339,8 @@ class MetadataProcessor:
|
||||
is_custom_advanced = prompt.original_prompt[primary_sampler_id].get("class_type") == "SamplerCustomAdvanced"
|
||||
|
||||
if is_custom_advanced:
|
||||
# For SamplerCustomAdvanced, trace specific inputs
|
||||
|
||||
# 1. Trace sigmas input to find BasicScheduler
|
||||
scheduler_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "sigmas", "BasicScheduler", max_depth=5)
|
||||
if scheduler_node_id and scheduler_node_id in metadata.get(SAMPLING, {}):
|
||||
scheduler_params = metadata[SAMPLING][scheduler_node_id].get("parameters", {})
|
||||
params["steps"] = scheduler_params.get("steps")
|
||||
params["scheduler"] = scheduler_params.get("scheduler")
|
||||
|
||||
# 2. Trace sampler input to find KSamplerSelect
|
||||
sampler_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "sampler", "KSamplerSelect", max_depth=5)
|
||||
if sampler_node_id and sampler_node_id in metadata.get(SAMPLING, {}):
|
||||
sampler_params = metadata[SAMPLING][sampler_node_id].get("parameters", {})
|
||||
params["sampler"] = sampler_params.get("sampler_name")
|
||||
|
||||
# 3. Trace guider input for CFGGuider and CLIPTextEncode
|
||||
guider_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "guider", max_depth=5)
|
||||
if guider_node_id and guider_node_id in prompt.original_prompt:
|
||||
# Check if the guider node is a CFGGuider
|
||||
if prompt.original_prompt[guider_node_id].get("class_type") == "CFGGuider":
|
||||
# Extract cfg value from the CFGGuider
|
||||
if guider_node_id in metadata.get(SAMPLING, {}):
|
||||
cfg_params = metadata[SAMPLING][guider_node_id].get("parameters", {})
|
||||
params["cfg_scale"] = cfg_params.get("cfg")
|
||||
|
||||
# Find CLIPTextEncode for positive prompt
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "positive", "CLIPTextEncode", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
# Find CLIPTextEncode for negative prompt
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "negative", "CLIPTextEncode", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
else:
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
# For SamplerCustomAdvanced, use the new handler method
|
||||
MetadataProcessor.handle_custom_advanced_sampler(metadata, prompt, primary_sampler_id, params)
|
||||
|
||||
else:
|
||||
# For standard samplers, match conditioning objects to prompts
|
||||
@@ -401,6 +365,9 @@ class MetadataProcessor:
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "negative", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
|
||||
# For SamplerCustom, handle any additional parameters
|
||||
MetadataProcessor.handle_custom_advanced_sampler(metadata, prompt, primary_sampler_id, params)
|
||||
|
||||
# Size extraction is same for all sampler types
|
||||
# Check if the sampler itself has size information (from latent_image)
|
||||
@@ -454,3 +421,59 @@ class MetadataProcessor:
|
||||
"""Convert metadata to JSON string"""
|
||||
params = MetadataProcessor.to_dict(metadata, id)
|
||||
return json.dumps(params, indent=4)
|
||||
|
||||
@staticmethod
|
||||
def handle_custom_advanced_sampler(metadata, prompt, primary_sampler_id, params):
|
||||
"""
|
||||
Handle parameter extraction for SamplerCustomAdvanced nodes
|
||||
|
||||
Parameters:
|
||||
- metadata: The workflow metadata
|
||||
- prompt: The prompt object containing node connections
|
||||
- primary_sampler_id: ID of the SamplerCustomAdvanced node
|
||||
- params: Parameters dictionary to update
|
||||
"""
|
||||
if not prompt.original_prompt or primary_sampler_id not in prompt.original_prompt:
|
||||
return
|
||||
|
||||
sampler_inputs = prompt.original_prompt[primary_sampler_id].get("inputs", {})
|
||||
|
||||
# 1. Trace sigmas input to find BasicScheduler (only if sigmas input exists)
|
||||
if "sigmas" in sampler_inputs:
|
||||
scheduler_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "sigmas", None, max_depth=5)
|
||||
if scheduler_node_id and scheduler_node_id in metadata.get(SAMPLING, {}):
|
||||
scheduler_params = metadata[SAMPLING][scheduler_node_id].get("parameters", {})
|
||||
params["steps"] = scheduler_params.get("steps")
|
||||
params["scheduler"] = scheduler_params.get("scheduler")
|
||||
|
||||
# 2. Trace sampler input to find KSamplerSelect (only if sampler input exists)
|
||||
if "sampler" in sampler_inputs:
|
||||
sampler_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "sampler", "KSamplerSelect", max_depth=5)
|
||||
if sampler_node_id and sampler_node_id in metadata.get(SAMPLING, {}):
|
||||
sampler_params = metadata[SAMPLING][sampler_node_id].get("parameters", {})
|
||||
params["sampler"] = sampler_params.get("sampler_name")
|
||||
|
||||
# 3. Trace guider input for CFGGuider and CLIPTextEncode
|
||||
if "guider" in sampler_inputs:
|
||||
guider_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "guider", max_depth=5)
|
||||
if guider_node_id and guider_node_id in prompt.original_prompt:
|
||||
# Check if the guider node is a CFGGuider
|
||||
if prompt.original_prompt[guider_node_id].get("class_type") == "CFGGuider":
|
||||
# Extract cfg value from the CFGGuider
|
||||
if guider_node_id in metadata.get(SAMPLING, {}):
|
||||
cfg_params = metadata[SAMPLING][guider_node_id].get("parameters", {})
|
||||
params["cfg_scale"] = cfg_params.get("cfg")
|
||||
|
||||
# Find CLIPTextEncode for positive prompt
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "positive", "CLIPTextEncode", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
# Find CLIPTextEncode for negative prompt
|
||||
negative_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "negative", "CLIPTextEncode", max_depth=10)
|
||||
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
|
||||
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
|
||||
else:
|
||||
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", max_depth=10)
|
||||
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
|
||||
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
|
||||
|
||||
@@ -642,7 +642,9 @@ NODE_EXTRACTORS = {
|
||||
# Sampling
|
||||
"KSampler": SamplerExtractor,
|
||||
"KSamplerAdvanced": KSamplerAdvancedExtractor,
|
||||
"SamplerCustom": KSamplerAdvancedExtractor,
|
||||
"SamplerCustomAdvanced": SamplerCustomAdvancedExtractor,
|
||||
"ClownsharKSampler_Beta": SamplerExtractor,
|
||||
"TSC_KSampler": TSCKSamplerExtractor, # Efficient Nodes
|
||||
"TSC_KSamplerAdvanced": TSCKSamplerAdvancedExtractor, # Efficient Nodes
|
||||
"KSamplerBasicPipe": KSamplerBasicPipeExtractor, # comfyui-impact-pack
|
||||
@@ -652,9 +654,11 @@ NODE_EXTRACTORS = {
|
||||
# Sampling Selectors
|
||||
"KSamplerSelect": KSamplerSelectExtractor, # Add KSamplerSelect
|
||||
"BasicScheduler": BasicSchedulerExtractor, # Add BasicScheduler
|
||||
"AlignYourStepsScheduler": BasicSchedulerExtractor, # Add AlignYourStepsScheduler
|
||||
# Loaders
|
||||
"CheckpointLoaderSimple": CheckpointLoaderExtractor,
|
||||
"comfyLoader": CheckpointLoaderExtractor, # easy comfyLoader
|
||||
"CheckpointLoaderSimpleWithImages": CheckpointLoaderExtractor, # CheckpointLoader|pysssss
|
||||
"TSC_EfficientLoader": TSCCheckpointLoaderExtractor, # Efficient Nodes
|
||||
"UNETLoader": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
"UnetLoaderGGUF": UNETLoaderExtractor, # Updated to use dedicated extractor
|
||||
@@ -667,6 +671,7 @@ NODE_EXTRACTORS = {
|
||||
"AdvancedCLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb
|
||||
"smZ_CLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/shiimizu/ComfyUI_smZNodes
|
||||
"CR_ApplyControlNetStack": CR_ApplyControlNetStackExtractor, # Add CR_ApplyControlNetStack
|
||||
"PCTextEncode": CLIPTextEncodeExtractor, # From https://github.com/asagi4/comfyui-prompt-control
|
||||
# Latent
|
||||
"EmptyLatentImage": ImageSizeExtractor,
|
||||
# Flux
|
||||
|
||||
1
py/middleware/__init__.py
Normal file
1
py/middleware/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Server middleware modules"""
|
||||
53
py/middleware/cache_middleware.py
Normal file
53
py/middleware/cache_middleware.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""Cache control middleware for ComfyUI server"""
|
||||
|
||||
from aiohttp import web
|
||||
from typing import Callable, Awaitable
|
||||
|
||||
# Time in seconds
|
||||
ONE_HOUR: int = 3600
|
||||
ONE_DAY: int = 86400
|
||||
IMG_EXTENSIONS = (
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".png",
|
||||
".ppm",
|
||||
".bmp",
|
||||
".pgm",
|
||||
".tif",
|
||||
".tiff",
|
||||
".webp",
|
||||
".mp4"
|
||||
)
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def cache_control(
|
||||
request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
|
||||
) -> web.Response:
|
||||
"""Cache control middleware that sets appropriate cache headers based on file type and response status"""
|
||||
response: web.Response = await handler(request)
|
||||
|
||||
if (
|
||||
request.path.endswith(".js")
|
||||
or request.path.endswith(".css")
|
||||
or request.path.endswith("index.json")
|
||||
):
|
||||
response.headers.setdefault("Cache-Control", "no-cache")
|
||||
return response
|
||||
|
||||
# Early return for non-image files - no cache headers needed
|
||||
if not request.path.lower().endswith(IMG_EXTENSIONS):
|
||||
return response
|
||||
|
||||
# Handle image files
|
||||
if response.status == 404:
|
||||
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}")
|
||||
elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308):
|
||||
# Success responses and permanent redirects - cache for 1 day
|
||||
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}")
|
||||
elif response.status in (302, 303, 307):
|
||||
# Temporary redirects - no cache
|
||||
response.headers.setdefault("Cache-Control", "no-cache")
|
||||
# Note: 304 Not Modified falls through - no cache headers set
|
||||
|
||||
return response
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import re
|
||||
from nodes import LoraLoader
|
||||
from comfy.comfy_types import IO # type: ignore
|
||||
from ..utils.utils import get_lora_info
|
||||
@@ -17,7 +18,8 @@ class LoraManagerLoader:
|
||||
"model": ("MODEL",),
|
||||
# "clip": ("CLIP",),
|
||||
"text": (IO.STRING, {
|
||||
"multiline": True,
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
@@ -128,4 +130,142 @@ class LoraManagerLoader:
|
||||
|
||||
formatted_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (model, clip, trigger_words_text, formatted_loras_text)
|
||||
|
||||
class LoraManagerTextLoader:
|
||||
NAME = "LoRA Text Loader (LoraManager)"
|
||||
CATEGORY = "Lora Manager/loaders"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"lora_syntax": (IO.STRING, {
|
||||
"defaultInput": True,
|
||||
"forceInput": True,
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation"
|
||||
}),
|
||||
},
|
||||
"optional": {
|
||||
"clip": ("CLIP",),
|
||||
"lora_stack": ("LORA_STACK",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP", IO.STRING, IO.STRING)
|
||||
RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras")
|
||||
FUNCTION = "load_loras_from_text"
|
||||
|
||||
def parse_lora_syntax(self, text):
|
||||
"""Parse LoRA syntax from text input."""
|
||||
# Pattern to match <lora:name:strength> or <lora:name:model_strength:clip_strength>
|
||||
pattern = r'<lora:([^:>]+):([^:>]+)(?::([^:>]+))?>'
|
||||
matches = re.findall(pattern, text, re.IGNORECASE)
|
||||
|
||||
loras = []
|
||||
for match in matches:
|
||||
lora_name = match[0].strip()
|
||||
model_strength = float(match[1])
|
||||
clip_strength = float(match[2]) if match[2] else model_strength
|
||||
|
||||
loras.append({
|
||||
'name': lora_name,
|
||||
'model_strength': model_strength,
|
||||
'clip_strength': clip_strength
|
||||
})
|
||||
|
||||
return loras
|
||||
|
||||
def load_loras_from_text(self, model, lora_syntax, clip=None, lora_stack=None):
|
||||
"""Load LoRAs based on text syntax input."""
|
||||
loaded_loras = []
|
||||
all_trigger_words = []
|
||||
|
||||
# Check if model is a Nunchaku Flux model - simplified approach
|
||||
is_nunchaku_model = False
|
||||
|
||||
try:
|
||||
model_wrapper = model.model.diffusion_model
|
||||
# Check if model is a Nunchaku Flux model using only class name
|
||||
if model_wrapper.__class__.__name__ == "ComfyFluxWrapper":
|
||||
is_nunchaku_model = True
|
||||
logger.info("Detected Nunchaku Flux model")
|
||||
except (AttributeError, TypeError):
|
||||
# Not a model with the expected structure
|
||||
pass
|
||||
|
||||
# First process lora_stack if available
|
||||
if lora_stack:
|
||||
for lora_path, model_strength, clip_strength in lora_stack:
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# Use our custom function for Flux models
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged for Nunchaku models
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
|
||||
# Extract lora name for trigger words lookup
|
||||
lora_name = extract_lora_name(lora_path)
|
||||
_, trigger_words = get_lora_info(lora_name)
|
||||
|
||||
all_trigger_words.extend(trigger_words)
|
||||
# Add clip strength to output if different from model strength (except for Nunchaku models)
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Parse and process LoRAs from text syntax
|
||||
parsed_loras = self.parse_lora_syntax(lora_syntax)
|
||||
for lora in parsed_loras:
|
||||
lora_name = lora['name']
|
||||
model_strength = lora['model_strength']
|
||||
clip_strength = lora['clip_strength']
|
||||
|
||||
# Get lora path and trigger words
|
||||
lora_path, trigger_words = get_lora_info(lora_name)
|
||||
|
||||
# Apply the LoRA using the appropriate loader
|
||||
if is_nunchaku_model:
|
||||
# For Nunchaku models, use our custom function
|
||||
model = nunchaku_load_lora(model, lora_path, model_strength)
|
||||
# clip remains unchanged
|
||||
else:
|
||||
# Use default loader for standard models
|
||||
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
|
||||
|
||||
# Include clip strength in output if different from model strength and not a Nunchaku model
|
||||
if not is_nunchaku_model and abs(model_strength - clip_strength) > 0.001:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
|
||||
else:
|
||||
loaded_loras.append(f"{lora_name}: {model_strength}")
|
||||
|
||||
# Add trigger words to collection
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
# use ',, ' to separate trigger words for group mode
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
# Format loaded_loras with support for both formats
|
||||
formatted_loras = []
|
||||
for item in loaded_loras:
|
||||
parts = item.split(":")
|
||||
lora_name = parts[0].strip()
|
||||
strength_parts = parts[1].strip().split(",")
|
||||
|
||||
if len(strength_parts) > 1:
|
||||
# Different model and clip strengths
|
||||
model_str = strength_parts[0].strip()
|
||||
clip_str = strength_parts[1].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}:{clip_str}>")
|
||||
else:
|
||||
# Same strength for both
|
||||
model_str = strength_parts[0].strip()
|
||||
formatted_loras.append(f"<lora:{lora_name}:{model_str}>")
|
||||
|
||||
formatted_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (model, clip, trigger_words_text, formatted_loras_text)
|
||||
@@ -17,6 +17,7 @@ class LoraStacker:
|
||||
"required": {
|
||||
"text": (IO.STRING, {
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import json
|
||||
import os
|
||||
import asyncio
|
||||
import re
|
||||
import numpy as np
|
||||
import folder_paths # type: ignore
|
||||
@@ -419,11 +418,15 @@ class SaveImage:
|
||||
# Make sure the output directory exists
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
|
||||
# Ensure images is always a list of images
|
||||
if len(images.shape) == 3: # Single image (height, width, channels)
|
||||
images = [images]
|
||||
else: # Multiple images (batch, height, width, channels)
|
||||
images = [img for img in images]
|
||||
# If images is already a list or array of images, do nothing; otherwise, convert to list
|
||||
if isinstance(images, (list, np.ndarray)):
|
||||
pass
|
||||
else:
|
||||
# Ensure images is always a list of images
|
||||
if len(images.shape) == 3: # Single image (height, width, channels)
|
||||
images = [images]
|
||||
else: # Multiple images (batch, height, width, channels)
|
||||
images = [img for img in images]
|
||||
|
||||
# Save all images
|
||||
results = self.save_images(
|
||||
|
||||
@@ -14,9 +14,11 @@ class WanVideoLoraSelect:
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load the LORA model with less VRAM usage, slower loading"}),
|
||||
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
|
||||
"merge_loras": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
|
||||
"text": (IO.STRING, {
|
||||
"multiline": True,
|
||||
"pysssss.autocomplete": False,
|
||||
"dynamicPrompts": True,
|
||||
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
|
||||
"placeholder": "LoRA syntax input: <lora:name:strength>"
|
||||
@@ -29,7 +31,7 @@ class WanVideoLoraSelect:
|
||||
RETURN_NAMES = ("lora", "trigger_words", "active_loras")
|
||||
FUNCTION = "process_loras"
|
||||
|
||||
def process_loras(self, text, low_mem_load=False, **kwargs):
|
||||
def process_loras(self, text, low_mem_load=False, merge_loras=True, **kwargs):
|
||||
loras_list = []
|
||||
all_trigger_words = []
|
||||
active_loras = []
|
||||
@@ -38,6 +40,9 @@ class WanVideoLoraSelect:
|
||||
prev_lora = kwargs.get('prev_lora', None)
|
||||
if prev_lora is not None:
|
||||
loras_list.extend(prev_lora)
|
||||
|
||||
if not merge_loras:
|
||||
low_mem_load = False # Unmerged LoRAs don't need low_mem_load
|
||||
|
||||
# Get blocks if available
|
||||
blocks = kwargs.get('blocks', {})
|
||||
@@ -65,6 +70,7 @@ class WanVideoLoraSelect:
|
||||
"blocks": selected_blocks,
|
||||
"layer_filter": layer_filter,
|
||||
"low_mem_load": low_mem_load,
|
||||
"merge_loras": merge_loras,
|
||||
}
|
||||
|
||||
# Add to list and collect active loras
|
||||
|
||||
127
py/nodes/wanvideo_lora_select_from_text.py
Normal file
127
py/nodes/wanvideo_lora_select_from_text.py
Normal file
@@ -0,0 +1,127 @@
|
||||
from comfy.comfy_types import IO
|
||||
import folder_paths
|
||||
from ..utils.utils import get_lora_info
|
||||
from .utils import any_type
|
||||
import logging
|
||||
|
||||
# 初始化日志记录器
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 定义新节点的类
|
||||
class WanVideoLoraSelectFromText:
|
||||
# 节点在UI中显示的名称
|
||||
NAME = "WanVideo Lora Select From Text (LoraManager)"
|
||||
# 节点所属的分类
|
||||
CATEGORY = "Lora Manager/stackers"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
|
||||
"merge_lora": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
|
||||
"lora_syntax": (IO.STRING, {
|
||||
"multiline": True,
|
||||
"defaultInput": True,
|
||||
"forceInput": True,
|
||||
"tooltip": "Connect a TEXT output for LoRA syntax: <lora:name:strength>"
|
||||
}),
|
||||
},
|
||||
|
||||
"optional": {
|
||||
"prev_lora": ("WANVIDLORA",),
|
||||
"blocks": ("BLOCKS",)
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("WANVIDLORA", IO.STRING, IO.STRING)
|
||||
RETURN_NAMES = ("lora", "trigger_words", "active_loras")
|
||||
|
||||
FUNCTION = "process_loras_from_syntax"
|
||||
|
||||
def process_loras_from_syntax(self, lora_syntax, low_mem_load=False, merge_lora=True, **kwargs):
|
||||
text_to_process = lora_syntax
|
||||
|
||||
blocks = kwargs.get('blocks', {})
|
||||
selected_blocks = blocks.get("selected_blocks", {})
|
||||
layer_filter = blocks.get("layer_filter", "")
|
||||
|
||||
loras_list = []
|
||||
all_trigger_words = []
|
||||
active_loras = []
|
||||
|
||||
prev_lora = kwargs.get('prev_lora', None)
|
||||
if prev_lora is not None:
|
||||
loras_list.extend(prev_lora)
|
||||
|
||||
if not merge_lora:
|
||||
low_mem_load = False
|
||||
|
||||
parts = text_to_process.split('<lora:')
|
||||
for part in parts[1:]:
|
||||
end_index = part.find('>')
|
||||
if end_index == -1:
|
||||
continue
|
||||
|
||||
content = part[:end_index]
|
||||
lora_parts = content.split(':')
|
||||
|
||||
lora_name_raw = ""
|
||||
model_strength = 1.0
|
||||
clip_strength = 1.0
|
||||
|
||||
if len(lora_parts) == 2:
|
||||
lora_name_raw = lora_parts[0].strip()
|
||||
try:
|
||||
model_strength = float(lora_parts[1])
|
||||
clip_strength = model_strength
|
||||
except (ValueError, IndexError):
|
||||
logger.warning(f"Invalid strength for LoRA '{lora_name_raw}'. Skipping.")
|
||||
continue
|
||||
elif len(lora_parts) >= 3:
|
||||
lora_name_raw = lora_parts[0].strip()
|
||||
try:
|
||||
model_strength = float(lora_parts[1])
|
||||
clip_strength = float(lora_parts[2])
|
||||
except (ValueError, IndexError):
|
||||
logger.warning(f"Invalid strengths for LoRA '{lora_name_raw}'. Skipping.")
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
lora_path, trigger_words = get_lora_info(lora_name_raw)
|
||||
|
||||
lora_item = {
|
||||
"path": folder_paths.get_full_path("loras", lora_path),
|
||||
"strength": model_strength,
|
||||
"name": lora_path.split(".")[0],
|
||||
"blocks": selected_blocks,
|
||||
"layer_filter": layer_filter,
|
||||
"low_mem_load": low_mem_load,
|
||||
"merge_loras": merge_lora,
|
||||
}
|
||||
|
||||
loras_list.append(lora_item)
|
||||
active_loras.append((lora_name_raw, model_strength, clip_strength))
|
||||
all_trigger_words.extend(trigger_words)
|
||||
|
||||
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
|
||||
|
||||
formatted_loras = []
|
||||
for name, model_strength, clip_strength in active_loras:
|
||||
if abs(model_strength - clip_strength) > 0.001:
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}:{str(clip_strength).strip()}>")
|
||||
else:
|
||||
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}>")
|
||||
|
||||
active_loras_text = " ".join(formatted_loras)
|
||||
|
||||
return (loras_list, trigger_words_text, active_loras_text)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"WanVideoLoraSelectFromText": WanVideoLoraSelectFromText
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"WanVideoLoraSelectFromText": "WanVideo Lora Select From Text (LoraManager)"
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -30,6 +31,9 @@ class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Automatic1111 format"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Split on Negative prompt if it exists
|
||||
if "Negative prompt:" in user_comment:
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
@@ -216,9 +220,9 @@ class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Get additional info from Civitai
|
||||
if civitai_client:
|
||||
if metadata_provider:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_version_info(resource.get("modelVersionId"))
|
||||
civitai_info = await metadata_provider.get_model_version_info(resource.get("modelVersionId"))
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
@@ -271,11 +275,11 @@ class AutomaticMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai
|
||||
if civitai_client:
|
||||
if metadata_provider:
|
||||
try:
|
||||
if lora_hash:
|
||||
# If we have hash, use it for lookup
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
|
||||
else:
|
||||
civitai_info = None
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
from typing import Dict, Any, Union
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -36,12 +37,15 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
Args:
|
||||
metadata: The metadata from the image (dict)
|
||||
recipe_scanner: Optional recipe scanner service
|
||||
civitai_client: Optional Civitai API client
|
||||
civitai_client: Optional Civitai API client (deprecated, use metadata_provider instead)
|
||||
|
||||
Returns:
|
||||
Dict containing parsed recipe data
|
||||
"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Initialize result structure
|
||||
result = {
|
||||
'base_model': None,
|
||||
@@ -53,6 +57,14 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
# Track already added LoRAs to prevent duplicates
|
||||
added_loras = {} # key: model_version_id or hash, value: index in result["loras"]
|
||||
|
||||
# Extract hash information from hashes field for LoRA matching
|
||||
lora_hashes = {}
|
||||
if "hashes" in metadata and isinstance(metadata["hashes"], dict):
|
||||
for key, hash_value in metadata["hashes"].items():
|
||||
if key.startswith("LORA:"):
|
||||
lora_name = key.replace("LORA:", "")
|
||||
lora_hashes[lora_name] = hash_value
|
||||
|
||||
# Extract prompt and negative prompt
|
||||
if "prompt" in metadata:
|
||||
result["gen_params"]["prompt"] = metadata["prompt"]
|
||||
@@ -77,9 +89,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
# Extract base model information - directly if available
|
||||
if "baseModel" in metadata:
|
||||
result["base_model"] = metadata["baseModel"]
|
||||
elif "Model hash" in metadata and civitai_client:
|
||||
elif "Model hash" in metadata and metadata_provider:
|
||||
model_hash = metadata["Model hash"]
|
||||
model_info = await civitai_client.get_model_by_hash(model_hash)
|
||||
model_info = await metadata_provider.get_model_by_hash(model_hash)
|
||||
if model_info:
|
||||
result["base_model"] = model_info.get("baseModel", "")
|
||||
elif "Model" in metadata and isinstance(metadata.get("resources"), list):
|
||||
@@ -87,8 +99,8 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
for resource in metadata.get("resources", []):
|
||||
if resource.get("type") == "model" and resource.get("name") == metadata.get("Model"):
|
||||
# This is likely the checkpoint model
|
||||
if civitai_client and resource.get("hash"):
|
||||
model_info = await civitai_client.get_model_by_hash(resource.get("hash"))
|
||||
if metadata_provider and resource.get("hash"):
|
||||
model_info = await metadata_provider.get_model_by_hash(resource.get("hash"))
|
||||
if model_info:
|
||||
result["base_model"] = model_info.get("baseModel", "")
|
||||
|
||||
@@ -101,6 +113,15 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
if resource.get("type", "lora") == "lora":
|
||||
lora_hash = resource.get("hash", "")
|
||||
|
||||
# Try to get hash from the hashes field if not present in resource
|
||||
if not lora_hash and resource.get("name"):
|
||||
lora_hash = lora_hashes.get(resource["name"], "")
|
||||
|
||||
# Skip LoRAs without proper identification (hash or modelVersionId)
|
||||
if not lora_hash and not resource.get("modelVersionId"):
|
||||
logger.debug(f"Skipping LoRA resource '{resource.get('name', 'Unknown')}' - no hash or modelVersionId")
|
||||
continue
|
||||
|
||||
# Skip if we've already added this LoRA by hash
|
||||
if lora_hash and lora_hash in added_loras:
|
||||
continue
|
||||
@@ -121,9 +142,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if hash is available
|
||||
if lora_entry['hash'] and civitai_client:
|
||||
if lora_entry['hash'] and metadata_provider:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
@@ -153,10 +174,6 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
# Process civitaiResources array
|
||||
if "civitaiResources" in metadata and isinstance(metadata["civitaiResources"], list):
|
||||
for resource in metadata["civitaiResources"]:
|
||||
# Skip resources that aren't LoRAs or LyCORIS
|
||||
if resource.get("type") not in ["lora", "lycoris"] and "type" not in resource:
|
||||
continue
|
||||
|
||||
# Get unique identifier for deduplication
|
||||
version_id = str(resource.get("modelVersionId", ""))
|
||||
|
||||
@@ -181,10 +198,10 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if modelVersionId is available
|
||||
if version_id and civitai_client:
|
||||
if version_id and metadata_provider:
|
||||
try:
|
||||
# Use get_model_version_info instead of get_model_version
|
||||
civitai_info, error = await civitai_client.get_model_version_info(version_id)
|
||||
civitai_info, error = await metadata_provider.get_model_version_info(version_id)
|
||||
|
||||
if error:
|
||||
logger.warning(f"Error getting model version info: {error}")
|
||||
@@ -246,11 +263,11 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# If we have a version ID and civitai client, try to get more info
|
||||
if version_id and civitai_client:
|
||||
# If we have a version ID and metadata provider, try to get more info
|
||||
if version_id and metadata_provider:
|
||||
try:
|
||||
# Use get_model_version_info with the version ID
|
||||
civitai_info, error = await civitai_client.get_model_version_info(version_id)
|
||||
civitai_info, error = await metadata_provider.get_model_version_info(version_id)
|
||||
|
||||
if error:
|
||||
logger.warning(f"Error getting model version info: {error}")
|
||||
@@ -275,6 +292,66 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
# Check for LoRA info in the format "Lora_0 Model hash", "Lora_0 Model name", etc.
|
||||
lora_index = 0
|
||||
while f"Lora_{lora_index} Model hash" in metadata and f"Lora_{lora_index} Model name" in metadata:
|
||||
lora_hash = metadata[f"Lora_{lora_index} Model hash"]
|
||||
lora_name = metadata[f"Lora_{lora_index} Model name"]
|
||||
lora_strength_model = float(metadata.get(f"Lora_{lora_index} Strength model", 1.0))
|
||||
|
||||
# Skip if we've already added this LoRA by hash
|
||||
if lora_hash and lora_hash in added_loras:
|
||||
lora_index += 1
|
||||
continue
|
||||
|
||||
lora_entry = {
|
||||
'name': lora_name,
|
||||
'type': "lora",
|
||||
'weight': lora_strength_model,
|
||||
'hash': lora_hash,
|
||||
'existsLocally': False,
|
||||
'localPath': None,
|
||||
'file_name': lora_name,
|
||||
'thumbnailUrl': '/loras_static/images/no-preview.png',
|
||||
'baseModel': '',
|
||||
'size': 0,
|
||||
'downloadUrl': '',
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Try to get info from Civitai if hash is available
|
||||
if lora_entry['hash'] and metadata_provider:
|
||||
try:
|
||||
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
|
||||
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
civitai_info,
|
||||
recipe_scanner,
|
||||
base_model_counts,
|
||||
lora_hash
|
||||
)
|
||||
|
||||
if populated_entry is None:
|
||||
lora_index += 1
|
||||
continue # Skip invalid LoRA types
|
||||
|
||||
lora_entry = populated_entry
|
||||
|
||||
# If we have a version ID from Civitai, track it for deduplication
|
||||
if 'id' in lora_entry and lora_entry['id']:
|
||||
added_loras[str(lora_entry['id'])] = len(result["loras"])
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Civitai info for LoRA hash {lora_entry['hash']}: {e}")
|
||||
|
||||
# Track by hash if we have it
|
||||
if lora_hash:
|
||||
added_loras[lora_hash] = len(result["loras"])
|
||||
|
||||
result["loras"].append(lora_entry)
|
||||
|
||||
lora_index += 1
|
||||
|
||||
# If base model wasn't found earlier, use the most common one from LoRAs
|
||||
if not result["base_model"] and base_model_counts:
|
||||
result["base_model"] = max(base_model_counts.items(), key=lambda x: x[1])[0]
|
||||
|
||||
@@ -6,6 +6,7 @@ import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -26,6 +27,9 @@ class ComfyMetadataParser(RecipeMetadataParser):
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from Civitai ComfyUI metadata format"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
data = json.loads(user_comment)
|
||||
loras = []
|
||||
|
||||
@@ -73,10 +77,10 @@ class ComfyMetadataParser(RecipeMetadataParser):
|
||||
'isDeleted': False
|
||||
}
|
||||
|
||||
# Get additional info from Civitai if client is available
|
||||
if civitai_client:
|
||||
# Get additional info from Civitai if metadata provider is available
|
||||
if metadata_provider:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(model_version_id)
|
||||
civitai_info_tuple = await metadata_provider.get_model_version_info(model_version_id)
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
@@ -116,9 +120,9 @@ class ComfyMetadataParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Get additional checkpoint info from Civitai
|
||||
if civitai_client:
|
||||
if metadata_provider:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(checkpoint_version_id)
|
||||
civitai_info_tuple = await metadata_provider.get_model_version_info(checkpoint_version_id)
|
||||
civitai_info, _ = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
|
||||
# Populate checkpoint with Civitai info
|
||||
checkpoint = await self.populate_checkpoint_from_civitai(checkpoint, civitai_info)
|
||||
|
||||
@@ -5,6 +5,7 @@ import logging
|
||||
from typing import Dict, Any
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,8 +19,11 @@ class MetaFormatParser(RecipeMetadataParser):
|
||||
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
|
||||
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from images with meta format metadata"""
|
||||
"""Parse metadata from images with meta format metadata (Lora_N Model hash format)"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Extract prompt and negative prompt
|
||||
parts = user_comment.split('Negative prompt:', 1)
|
||||
prompt = parts[0].strip()
|
||||
@@ -122,9 +126,9 @@ class MetaFormatParser(RecipeMetadataParser):
|
||||
}
|
||||
|
||||
# Get info from Civitai by hash if available
|
||||
if civitai_client and hash_value:
|
||||
if metadata_provider and hash_value:
|
||||
try:
|
||||
civitai_info = await civitai_client.get_model_by_hash(hash_value)
|
||||
civitai_info = await metadata_provider.get_model_by_hash(hash_value)
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
|
||||
@@ -7,6 +7,7 @@ from typing import Dict, Any
|
||||
from ...config import config
|
||||
from ..base import RecipeMetadataParser
|
||||
from ..constants import GEN_PARAM_KEYS
|
||||
from ...services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -23,6 +24,9 @@ class RecipeFormatParser(RecipeMetadataParser):
|
||||
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
|
||||
"""Parse metadata from images with dedicated recipe metadata format"""
|
||||
try:
|
||||
# Get metadata provider instead of using civitai_client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Extract recipe metadata from user comment
|
||||
try:
|
||||
# Look for recipe metadata section
|
||||
@@ -71,9 +75,9 @@ class RecipeFormatParser(RecipeMetadataParser):
|
||||
lora_entry['localPath'] = None
|
||||
|
||||
# Try to get additional info from Civitai if we have a model version ID
|
||||
if lora.get('modelVersionId') and civitai_client:
|
||||
if lora.get('modelVersionId') and metadata_provider:
|
||||
try:
|
||||
civitai_info_tuple = await civitai_client.get_model_version_info(lora['modelVersionId'])
|
||||
civitai_info_tuple = await metadata_provider.get_model_version_info(lora['modelVersionId'])
|
||||
# Populate lora entry with Civitai info
|
||||
populated_entry = await self.populate_lora_from_civitai(
|
||||
lora_entry,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from aiohttp import web
|
||||
@@ -10,6 +11,9 @@ import jinja2
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.server_i18n import server_i18n
|
||||
from ..services.model_file_service import ModelFileService, ModelMoveService
|
||||
from ..services.websocket_progress_callback import WebSocketProgressCallback
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -29,6 +33,11 @@ class BaseModelRoutes(ABC):
|
||||
loader=jinja2.FileSystemLoader(config.templates_path),
|
||||
autoescape=True
|
||||
)
|
||||
|
||||
# Initialize file services with dependency injection
|
||||
self.model_file_service = ModelFileService(service.scanner, service.model_type)
|
||||
self.model_move_service = ModelMoveService(service.scanner)
|
||||
self.websocket_progress_callback = WebSocketProgressCallback()
|
||||
|
||||
def setup_routes(self, app: web.Application, prefix: str):
|
||||
"""Setup common routes for the model type
|
||||
@@ -38,16 +47,23 @@ class BaseModelRoutes(ABC):
|
||||
prefix: URL prefix (e.g., 'loras', 'checkpoints')
|
||||
"""
|
||||
# Common model management routes
|
||||
app.router.add_get(f'/api/{prefix}', self.get_models)
|
||||
app.router.add_get(f'/api/{prefix}/list', self.get_models)
|
||||
app.router.add_post(f'/api/{prefix}/delete', self.delete_model)
|
||||
app.router.add_post(f'/api/{prefix}/exclude', self.exclude_model)
|
||||
app.router.add_post(f'/api/{prefix}/fetch-civitai', self.fetch_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/fetch-all-civitai', self.fetch_all_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/relink-civitai', self.relink_civitai)
|
||||
app.router.add_post(f'/api/{prefix}/replace-preview', self.replace_preview)
|
||||
app.router.add_post(f'/api/{prefix}/save-metadata', self.save_metadata)
|
||||
app.router.add_post(f'/api/{prefix}/add-tags', self.add_tags)
|
||||
app.router.add_post(f'/api/{prefix}/rename', self.rename_model)
|
||||
app.router.add_post(f'/api/{prefix}/bulk-delete', self.bulk_delete_models)
|
||||
app.router.add_post(f'/api/{prefix}/verify-duplicates', self.verify_duplicates)
|
||||
app.router.add_post(f'/api/{prefix}/move_model', self.move_model)
|
||||
app.router.add_post(f'/api/{prefix}/move_models_bulk', self.move_models_bulk)
|
||||
app.router.add_get(f'/api/{prefix}/auto-organize', self.auto_organize_models)
|
||||
app.router.add_post(f'/api/{prefix}/auto-organize', self.auto_organize_models)
|
||||
app.router.add_get(f'/api/{prefix}/auto-organize-progress', self.get_auto_organize_progress)
|
||||
|
||||
# Common query routes
|
||||
app.router.add_get(f'/api/{prefix}/top-tags', self.get_top_tags)
|
||||
@@ -55,8 +71,18 @@ class BaseModelRoutes(ABC):
|
||||
app.router.add_get(f'/api/{prefix}/scan', self.scan_models)
|
||||
app.router.add_get(f'/api/{prefix}/roots', self.get_model_roots)
|
||||
app.router.add_get(f'/api/{prefix}/folders', self.get_folders)
|
||||
app.router.add_get(f'/api/{prefix}/folder-tree', self.get_folder_tree)
|
||||
app.router.add_get(f'/api/{prefix}/unified-folder-tree', self.get_unified_folder_tree)
|
||||
app.router.add_get(f'/api/{prefix}/find-duplicates', self.find_duplicate_models)
|
||||
app.router.add_get(f'/api/{prefix}/find-filename-conflicts', self.find_filename_conflicts)
|
||||
app.router.add_get(f'/api/{prefix}/get-notes', self.get_model_notes)
|
||||
app.router.add_get(f'/api/{prefix}/preview-url', self.get_model_preview_url)
|
||||
app.router.add_get(f'/api/{prefix}/civitai-url', self.get_model_civitai_url)
|
||||
app.router.add_get(f'/api/{prefix}/metadata', self.get_model_metadata)
|
||||
app.router.add_get(f'/api/{prefix}/model-description', self.get_model_description)
|
||||
|
||||
# Autocomplete route
|
||||
app.router.add_get(f'/api/{prefix}/relative-paths', self.get_relative_paths)
|
||||
|
||||
# Common Download management
|
||||
app.router.add_post(f'/api/download-model', self.download_model)
|
||||
@@ -64,8 +90,6 @@ class BaseModelRoutes(ABC):
|
||||
app.router.add_get(f'/api/cancel-download-get', self.cancel_download_get)
|
||||
app.router.add_get(f'/api/download-progress/{{download_id}}', self.get_download_progress)
|
||||
|
||||
# CivitAI integration routes
|
||||
app.router.add_post(f'/api/{prefix}/fetch-all-civitai', self.fetch_all_civitai)
|
||||
# app.router.add_get(f'/api/civitai/versions/{{model_id}}', self.get_civitai_versions)
|
||||
|
||||
# Add generic page route
|
||||
@@ -96,30 +120,36 @@ class BaseModelRoutes(ABC):
|
||||
if not self.template_env or not template_name:
|
||||
return web.Response(text="Template environment or template name not set", status=500)
|
||||
|
||||
if is_initializing:
|
||||
rendered = self.template_env.get_template(template_name).render(
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
else:
|
||||
# Get user's language setting
|
||||
user_language = settings.get('language', 'en')
|
||||
|
||||
# Set server-side i18n locale
|
||||
server_i18n.set_locale(user_language)
|
||||
|
||||
# Add i18n filter to the template environment if not already added
|
||||
if not hasattr(self.template_env, '_i18n_filter_added'):
|
||||
self.template_env.filters['t'] = server_i18n.create_template_filter()
|
||||
self.template_env._i18n_filter_added = True
|
||||
|
||||
# Prepare template context
|
||||
template_context = {
|
||||
'is_initializing': is_initializing,
|
||||
'settings': settings,
|
||||
'request': request,
|
||||
'folders': [],
|
||||
't': server_i18n.get_translation,
|
||||
}
|
||||
|
||||
if not is_initializing:
|
||||
try:
|
||||
cache = await self.service.scanner.get_cached_data(force_refresh=False)
|
||||
rendered = self.template_env.get_template(template_name).render(
|
||||
folders=getattr(cache, "folders", []),
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
template_context['folders'] = getattr(cache, "folders", [])
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading cache data: {cache_error}")
|
||||
rendered = self.template_env.get_template(template_name).render(
|
||||
folders=[],
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
)
|
||||
template_context['is_initializing'] = True
|
||||
|
||||
rendered = self.template_env.get_template(template_name).render(**template_context)
|
||||
|
||||
return web.Response(
|
||||
text=rendered,
|
||||
content_type='text/html'
|
||||
@@ -175,7 +205,8 @@ class BaseModelRoutes(ABC):
|
||||
'filename': request.query.get('search_filename', 'true').lower() == 'true',
|
||||
'modelname': request.query.get('search_modelname', 'true').lower() == 'true',
|
||||
'tags': request.query.get('search_tags', 'false').lower() == 'true',
|
||||
'recursive': request.query.get('recursive', 'false').lower() == 'true',
|
||||
'creator': request.query.get('search_creator', 'false').lower() == 'true',
|
||||
'recursive': request.query.get('recursive', 'true').lower() == 'true',
|
||||
}
|
||||
|
||||
# Parse hash filters if provided
|
||||
@@ -247,6 +278,10 @@ class BaseModelRoutes(ABC):
|
||||
"""Handle saving metadata updates"""
|
||||
return await ModelRouteUtils.handle_save_metadata(request, self.service.scanner)
|
||||
|
||||
async def add_tags(self, request: web.Request) -> web.Response:
|
||||
"""Handle adding tags to model metadata"""
|
||||
return await ModelRouteUtils.handle_add_tags(request, self.service.scanner)
|
||||
|
||||
async def rename_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle renaming a model file and its associated files"""
|
||||
return await ModelRouteUtils.handle_rename_model(request, self.service.scanner)
|
||||
@@ -343,6 +378,43 @@ class BaseModelRoutes(ABC):
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_folder_tree(self, request: web.Request) -> web.Response:
|
||||
"""Get hierarchical folder tree structure for download modal"""
|
||||
try:
|
||||
model_root = request.query.get('model_root')
|
||||
if not model_root:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'model_root parameter is required'
|
||||
}, status=400)
|
||||
|
||||
folder_tree = await self.service.get_folder_tree(model_root)
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'tree': folder_tree
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting folder tree: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_unified_folder_tree(self, request: web.Request) -> web.Response:
|
||||
"""Get unified folder tree across all model roots"""
|
||||
try:
|
||||
unified_tree = await self.service.get_unified_folder_tree()
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'tree': unified_tree
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting unified folder tree: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def find_duplicate_models(self, request: web.Request) -> web.Response:
|
||||
"""Find models with duplicate SHA256 hashes"""
|
||||
try:
|
||||
@@ -451,6 +523,7 @@ class BaseModelRoutes(ABC):
|
||||
model_version_id = request.query.get('model_version_id')
|
||||
download_id = request.query.get('download_id')
|
||||
use_default_paths = request.query.get('use_default_paths', 'false').lower() == 'true'
|
||||
source = request.query.get('source') # Optional source parameter
|
||||
|
||||
# Create a data dictionary that mimics what would be received from a POST request
|
||||
data = {
|
||||
@@ -466,6 +539,10 @@ class BaseModelRoutes(ABC):
|
||||
|
||||
data['use_default_paths'] = use_default_paths
|
||||
|
||||
# Add source parameter if provided
|
||||
if source:
|
||||
data['source'] = source
|
||||
|
||||
# Create a mock request object with the data
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
future.set_result(data)
|
||||
@@ -543,10 +620,24 @@ class BaseModelRoutes(ABC):
|
||||
success = 0
|
||||
needs_resort = False
|
||||
|
||||
# Prepare models to process
|
||||
# Prepare models to process, only those without CivitAI data or missing tags, description, or creator
|
||||
enable_metadata_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
to_process = [
|
||||
model for model in cache.raw_data
|
||||
if model.get('sha256') and (not model.get('civitai') or 'id' not in model.get('civitai')) and model.get('from_civitai', True)
|
||||
model for model in cache.raw_data
|
||||
if (
|
||||
model.get('sha256')
|
||||
and (
|
||||
not model.get('civitai')
|
||||
or not model['civitai'].get('id')
|
||||
# or not model.get('tags') # Skipping tag cause it could be empty legitimately
|
||||
# or not model.get('modelDescription')
|
||||
# or not (model.get('civitai') and model['civitai'].get('creator'))
|
||||
)
|
||||
and (
|
||||
(enable_metadata_archive_db)
|
||||
or (not enable_metadata_archive_db and model.get('from_civitai') is True)
|
||||
)
|
||||
)
|
||||
]
|
||||
total_to_process = len(to_process)
|
||||
|
||||
@@ -616,4 +707,267 @@ class BaseModelRoutes(ABC):
|
||||
# This will be implemented by subclasses as they need CivitAI client access
|
||||
return web.json_response({
|
||||
"error": "Not implemented in base class"
|
||||
}, status=501)
|
||||
}, status=501)
|
||||
|
||||
# Common model move handlers
|
||||
async def move_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model move request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
target_path = data.get('target_path')
|
||||
|
||||
if not file_path or not target_path:
|
||||
return web.Response(text='File path and target path are required', status=400)
|
||||
|
||||
result = await self.model_move_service.move_model(file_path, target_path)
|
||||
|
||||
if result['success']:
|
||||
return web.json_response(result)
|
||||
else:
|
||||
return web.json_response(result, status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def move_models_bulk(self, request: web.Request) -> web.Response:
|
||||
"""Handle bulk model move request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths', [])
|
||||
target_path = data.get('target_path')
|
||||
|
||||
if not file_paths or not target_path:
|
||||
return web.Response(text='File paths and target path are required', status=400)
|
||||
|
||||
result = await self.model_move_service.move_models_bulk(file_paths, target_path)
|
||||
return web.json_response(result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def auto_organize_models(self, request: web.Request) -> web.Response:
|
||||
"""Auto-organize all models or a specific set of models based on current settings"""
|
||||
try:
|
||||
# Check if auto-organize is already running
|
||||
if ws_manager.is_auto_organize_running():
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Auto-organize is already running. Please wait for it to complete.'
|
||||
}, status=409)
|
||||
|
||||
# Acquire lock to prevent concurrent auto-organize operations
|
||||
auto_organize_lock = await ws_manager.get_auto_organize_lock()
|
||||
|
||||
if auto_organize_lock.locked():
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Auto-organize is already running. Please wait for it to complete.'
|
||||
}, status=409)
|
||||
|
||||
# Get specific file paths from request if this is a POST with selected models
|
||||
file_paths = None
|
||||
if request.method == 'POST':
|
||||
try:
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths')
|
||||
except Exception:
|
||||
pass # Continue with all models if no valid JSON
|
||||
|
||||
async with auto_organize_lock:
|
||||
# Use the service layer for business logic
|
||||
result = await self.model_file_service.auto_organize_models(
|
||||
file_paths=file_paths,
|
||||
progress_callback=self.websocket_progress_callback
|
||||
)
|
||||
|
||||
return web.json_response(result.to_dict())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in auto_organize_models: {e}", exc_info=True)
|
||||
|
||||
# Send error message via WebSocket
|
||||
await ws_manager.broadcast_auto_organize_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'error',
|
||||
'error': str(e)
|
||||
})
|
||||
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_auto_organize_progress(self, request: web.Request) -> web.Response:
|
||||
"""Get current auto-organize progress for polling"""
|
||||
try:
|
||||
progress_data = ws_manager.get_auto_organize_progress()
|
||||
|
||||
if progress_data is None:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'No auto-organize operation in progress'
|
||||
}, status=404)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'progress': progress_data
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting auto-organize progress: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_model_notes(self, request: web.Request) -> web.Response:
|
||||
"""Get notes for a specific model file"""
|
||||
try:
|
||||
model_name = request.query.get('name')
|
||||
if not model_name:
|
||||
return web.Response(text=f'{self.model_type.capitalize()} file name is required', status=400)
|
||||
|
||||
notes = await self.service.get_model_notes(model_name)
|
||||
if notes is not None:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'notes': notes
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'{self.model_type.capitalize()} not found in cache'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting {self.model_type} notes: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_model_preview_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the static preview URL for a model file"""
|
||||
try:
|
||||
model_name = request.query.get('name')
|
||||
if not model_name:
|
||||
return web.Response(text=f'{self.model_type.capitalize()} file name is required', status=400)
|
||||
|
||||
preview_url = await self.service.get_model_preview_url(model_name)
|
||||
if preview_url:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'preview_url': preview_url
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'No preview URL found for the specified {self.model_type}'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting {self.model_type} preview URL: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_model_civitai_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the Civitai URL for a model file"""
|
||||
try:
|
||||
model_name = request.query.get('name')
|
||||
if not model_name:
|
||||
return web.Response(text=f'{self.model_type.capitalize()} file name is required', status=400)
|
||||
|
||||
result = await self.service.get_model_civitai_url(model_name)
|
||||
if result['civitai_url']:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
**result
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'No Civitai data found for the specified {self.model_type}'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting {self.model_type} Civitai URL: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_model_metadata(self, request: web.Request) -> web.Response:
|
||||
"""Get filtered CivitAI metadata for a model by file path"""
|
||||
try:
|
||||
file_path = request.query.get('file_path')
|
||||
if not file_path:
|
||||
return web.Response(text='File path is required', status=400)
|
||||
|
||||
metadata = await self.service.get_model_metadata(file_path)
|
||||
if metadata is not None:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'metadata': metadata
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'{self.model_type.capitalize()} not found or no CivitAI metadata available'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting {self.model_type} metadata: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_model_description(self, request: web.Request) -> web.Response:
|
||||
"""Get model description by file path"""
|
||||
try:
|
||||
file_path = request.query.get('file_path')
|
||||
if not file_path:
|
||||
return web.Response(text='File path is required', status=400)
|
||||
|
||||
description = await self.service.get_model_description(file_path)
|
||||
if description is not None:
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'description': description
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f'{self.model_type.capitalize()} not found or no description available'
|
||||
}, status=404)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting {self.model_type} description: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_relative_paths(self, request: web.Request) -> web.Response:
|
||||
"""Get model relative file paths for autocomplete functionality"""
|
||||
try:
|
||||
search = request.query.get('search', '').strip()
|
||||
limit = min(int(request.query.get('limit', '15')), 50) # Max 50 items
|
||||
|
||||
matching_paths = await self.service.search_relative_paths(search, limit)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'relative_paths': matching_paths
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting relative paths for autocomplete: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
@@ -4,6 +4,8 @@ from aiohttp import web
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.checkpoint_service import CheckpointService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
from ..config import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -14,14 +16,12 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
"""Initialize Checkpoint routes with Checkpoint service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "checkpoints.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.service = CheckpointService(checkpoint_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
@@ -41,6 +41,10 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
|
||||
# Checkpoint info by name
|
||||
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_checkpoint_info)
|
||||
|
||||
# Checkpoint roots and Unet roots
|
||||
app.router.add_get(f'/api/{prefix}/checkpoints_roots', self.get_checkpoints_roots)
|
||||
app.router.add_get(f'/api/{prefix}/unet_roots', self.get_unet_roots)
|
||||
|
||||
async def get_checkpoint_info(self, request: web.Request) -> web.Response:
|
||||
"""Get detailed information for a specific checkpoint by name"""
|
||||
@@ -61,7 +65,8 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
"""Get available versions for a Civitai checkpoint model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
response = await metadata_provider.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
@@ -102,4 +107,34 @@ class CheckpointRoutes(BaseModelRoutes):
|
||||
return web.json_response(versions)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching checkpoint model versions: {e}")
|
||||
return web.Response(status=500, text=str(e))
|
||||
return web.Response(status=500, text=str(e))
|
||||
|
||||
async def get_checkpoints_roots(self, request: web.Request) -> web.Response:
|
||||
"""Return the list of checkpoint roots from config"""
|
||||
try:
|
||||
roots = config.checkpoints_roots
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"roots": roots
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting checkpoint roots: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_unet_roots(self, request: web.Request) -> web.Response:
|
||||
"""Return the list of unet roots from config"""
|
||||
try:
|
||||
roots = config.unet_roots
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"roots": roots
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting unet roots: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
"success": False,
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
@@ -4,6 +4,7 @@ from aiohttp import web
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.embedding_service import EmbeddingService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -14,14 +15,12 @@ class EmbeddingRoutes(BaseModelRoutes):
|
||||
"""Initialize Embedding routes with Embedding service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "embeddings.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
self.service = EmbeddingService(embedding_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
@@ -61,7 +60,8 @@ class EmbeddingRoutes(BaseModelRoutes):
|
||||
"""Get available versions for a Civitai embedding model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
response = await metadata_provider.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import logging
|
||||
from ..utils.example_images_download_manager import DownloadManager
|
||||
from ..utils.example_images_processor import ExampleImagesProcessor
|
||||
from ..utils.example_images_file_manager import ExampleImagesFileManager
|
||||
from ..services.websocket_manager import ws_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -20,6 +21,7 @@ class ExampleImagesRoutes:
|
||||
app.router.add_get('/api/example-image-files', ExampleImagesRoutes.get_example_image_files)
|
||||
app.router.add_get('/api/has-example-images', ExampleImagesRoutes.has_example_images)
|
||||
app.router.add_post('/api/delete-example-image', ExampleImagesRoutes.delete_example_image)
|
||||
app.router.add_post('/api/force-download-example-images', ExampleImagesRoutes.force_download_example_images)
|
||||
|
||||
@staticmethod
|
||||
async def download_example_images(request):
|
||||
@@ -64,4 +66,9 @@ class ExampleImagesRoutes:
|
||||
@staticmethod
|
||||
async def delete_example_image(request):
|
||||
"""Delete a custom example image for a model"""
|
||||
return await ExampleImagesProcessor.delete_custom_image(request)
|
||||
return await ExampleImagesProcessor.delete_custom_image(request)
|
||||
|
||||
@staticmethod
|
||||
async def force_download_example_images(request):
|
||||
"""Force download example images for specific models"""
|
||||
return await DownloadManager.start_force_download(request)
|
||||
@@ -7,7 +7,7 @@ from server import PromptServer # type: ignore
|
||||
from .base_model_routes import BaseModelRoutes
|
||||
from ..services.lora_service import LoraService
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..services.metadata_service import get_default_metadata_provider
|
||||
from ..utils.utils import get_lora_info
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -19,14 +19,12 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"""Initialize LoRA routes with LoRA service"""
|
||||
# Service will be initialized later via setup_routes
|
||||
self.service = None
|
||||
self.civitai_client = None
|
||||
self.template_name = "loras.html"
|
||||
|
||||
async def initialize_services(self):
|
||||
"""Initialize services from ServiceRegistry"""
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.service = LoraService(lora_scanner)
|
||||
self.civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
|
||||
# Initialize parent with the service
|
||||
super().__init__(self.service)
|
||||
@@ -43,15 +41,8 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"""Setup LoRA-specific routes"""
|
||||
# LoRA-specific query routes
|
||||
app.router.add_get(f'/api/{prefix}/letter-counts', self.get_letter_counts)
|
||||
app.router.add_get(f'/api/{prefix}/get-notes', self.get_lora_notes)
|
||||
app.router.add_get(f'/api/{prefix}/get-trigger-words', self.get_lora_trigger_words)
|
||||
app.router.add_get(f'/api/{prefix}/preview-url', self.get_lora_preview_url)
|
||||
app.router.add_get(f'/api/{prefix}/civitai-url', self.get_lora_civitai_url)
|
||||
app.router.add_get(f'/api/{prefix}/model-description', self.get_lora_model_description)
|
||||
|
||||
# LoRA-specific management routes
|
||||
app.router.add_post(f'/api/{prefix}/move_model', self.move_model)
|
||||
app.router.add_post(f'/api/{prefix}/move_models_bulk', self.move_models_bulk)
|
||||
app.router.add_get(f'/api/{prefix}/usage-tips-by-path', self.get_lora_usage_tips_by_path)
|
||||
|
||||
# CivitAI integration with LoRA-specific validation
|
||||
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_lora)
|
||||
@@ -147,6 +138,26 @@ class LoraRoutes(BaseModelRoutes):
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_usage_tips_by_path(self, request: web.Request) -> web.Response:
|
||||
"""Get usage tips for a LoRA by its relative path"""
|
||||
try:
|
||||
relative_path = request.query.get('relative_path')
|
||||
if not relative_path:
|
||||
return web.Response(text='Relative path is required', status=400)
|
||||
|
||||
usage_tips = await self.service.get_lora_usage_tips_by_relative_path(relative_path)
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'usage_tips': usage_tips or ''
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting lora usage tips by path: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_lora_preview_url(self, request: web.Request) -> web.Response:
|
||||
"""Get the static preview URL for a LoRA file"""
|
||||
try:
|
||||
@@ -204,7 +215,8 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"""Get available versions for a Civitai LoRA model with local availability info"""
|
||||
try:
|
||||
model_id = request.match_info['model_id']
|
||||
response = await self.civitai_client.get_model_versions(model_id)
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
response = await metadata_provider.get_model_versions(model_id)
|
||||
if not response or not response.get('modelVersions'):
|
||||
return web.Response(status=404, text="Model not found")
|
||||
|
||||
@@ -248,9 +260,10 @@ class LoraRoutes(BaseModelRoutes):
|
||||
try:
|
||||
model_version_id = request.match_info.get('modelVersionId')
|
||||
|
||||
# Get model details from Civitai API
|
||||
model, error_msg = await self.civitai_client.get_model_version_info(model_version_id)
|
||||
|
||||
# Get model details from metadata provider
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
model, error_msg = await metadata_provider.get_model_version_info(model_version_id)
|
||||
|
||||
if not model:
|
||||
# Log warning for failed model retrieval
|
||||
logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
|
||||
@@ -275,7 +288,8 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"""Get CivitAI model details by hash"""
|
||||
try:
|
||||
hash = request.match_info.get('hash')
|
||||
model = await self.civitai_client.get_model_by_hash(hash)
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
model = await metadata_provider.get_model_by_hash(hash)
|
||||
return web.json_response(model)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model details by hash: {e}")
|
||||
@@ -284,173 +298,6 @@ class LoraRoutes(BaseModelRoutes):
|
||||
"error": str(e)
|
||||
}, status=500)
|
||||
|
||||
# Model management methods
|
||||
async def move_model(self, request: web.Request) -> web.Response:
|
||||
"""Handle model move request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path') # full path of the model file
|
||||
target_path = data.get('target_path') # folder path to move the model to
|
||||
|
||||
if not file_path or not target_path:
|
||||
return web.Response(text='File path and target path are required', status=400)
|
||||
|
||||
# Check if source and destination are the same
|
||||
import os
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
logger.info(f"Source and target directories are the same: {source_dir}")
|
||||
return web.json_response({'success': True, 'message': 'Source and target directories are the same'})
|
||||
|
||||
# Check if target file already exists
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_path, file_name).replace(os.sep, '/')
|
||||
|
||||
if os.path.exists(target_file_path):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Target file already exists: {target_file_path}"
|
||||
}, status=409) # 409 Conflict
|
||||
|
||||
# Call scanner to handle the move operation
|
||||
success = await self.service.scanner.move_model(file_path, target_path)
|
||||
|
||||
if success:
|
||||
return web.json_response({'success': True, 'new_file_path': target_file_path})
|
||||
else:
|
||||
return web.Response(text='Failed to move model', status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def move_models_bulk(self, request: web.Request) -> web.Response:
|
||||
"""Handle bulk model move request"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_paths = data.get('file_paths', []) # list of full paths of the model files
|
||||
target_path = data.get('target_path') # folder path to move the models to
|
||||
|
||||
if not file_paths or not target_path:
|
||||
return web.Response(text='File paths and target path are required', status=400)
|
||||
|
||||
results = []
|
||||
import os
|
||||
for file_path in file_paths:
|
||||
# Check if source and destination are the same
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
results.append({
|
||||
"path": file_path,
|
||||
"success": True,
|
||||
"message": "Source and target directories are the same"
|
||||
})
|
||||
continue
|
||||
|
||||
# Check if target file already exists
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_path, file_name).replace(os.sep, '/')
|
||||
|
||||
if os.path.exists(target_file_path):
|
||||
results.append({
|
||||
"path": file_path,
|
||||
"success": False,
|
||||
"message": f"Target file already exists: {target_file_path}"
|
||||
})
|
||||
continue
|
||||
|
||||
# Try to move the model
|
||||
success = await self.service.scanner.move_model(file_path, target_path)
|
||||
results.append({
|
||||
"path": file_path,
|
||||
"success": success,
|
||||
"message": "Success" if success else "Failed to move model"
|
||||
})
|
||||
|
||||
# Count successes and failures
|
||||
success_count = sum(1 for r in results if r["success"])
|
||||
failure_count = len(results) - success_count
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Moved {success_count} of {len(file_paths)} models',
|
||||
'results': results,
|
||||
'success_count': success_count,
|
||||
'failure_count': failure_count
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
async def get_lora_model_description(self, request: web.Request) -> web.Response:
|
||||
"""Get model description for a Lora model"""
|
||||
try:
|
||||
# Get parameters
|
||||
model_id = request.query.get('model_id')
|
||||
file_path = request.query.get('file_path')
|
||||
|
||||
if not model_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Model ID is required'
|
||||
}, status=400)
|
||||
|
||||
# Check if we already have the description stored in metadata
|
||||
description = None
|
||||
tags = []
|
||||
creator = {}
|
||||
if file_path:
|
||||
import os
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
description = metadata.get('modelDescription')
|
||||
tags = metadata.get('tags', [])
|
||||
creator = metadata.get('creator', {})
|
||||
|
||||
# If description is not in metadata, fetch from CivitAI
|
||||
if not description:
|
||||
logger.info(f"Fetching model metadata for model ID: {model_id}")
|
||||
model_metadata, _ = await self.civitai_client.get_model_metadata(model_id)
|
||||
|
||||
if model_metadata:
|
||||
description = model_metadata.get('description')
|
||||
tags = model_metadata.get('tags', [])
|
||||
creator = model_metadata.get('creator', {})
|
||||
|
||||
# Save the metadata to file if we have a file path and got metadata
|
||||
if file_path:
|
||||
try:
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
metadata['modelDescription'] = description
|
||||
metadata['tags'] = tags
|
||||
# Ensure the civitai dict exists
|
||||
if 'civitai' not in metadata:
|
||||
metadata['civitai'] = {}
|
||||
# Store creator in the civitai nested structure
|
||||
metadata['civitai']['creator'] = creator
|
||||
|
||||
await MetadataManager.save_metadata(file_path, metadata, True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving model metadata: {e}")
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'description': description or "<p>No model description available.</p>",
|
||||
'tags': tags,
|
||||
'creator': creator
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model metadata: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
async def get_trigger_words(self, request: web.Request) -> web.Response:
|
||||
"""Get trigger words for specified LoRA models"""
|
||||
try:
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import asyncio
|
||||
import subprocess
|
||||
from server import PromptServer # type: ignore
|
||||
from aiohttp import web
|
||||
from ..services.settings_manager import settings
|
||||
@@ -12,8 +12,9 @@ from ..utils.lora_metadata import extract_trained_words
|
||||
from ..config import config
|
||||
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS, NODE_TYPES, DEFAULT_NODE_COLOR
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
import re
|
||||
|
||||
from ..services.metadata_service import get_metadata_archive_manager, update_metadata_providers
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.downloader import get_downloader
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
@@ -87,13 +88,13 @@ class MiscRoutes:
|
||||
@staticmethod
|
||||
def setup_routes(app):
|
||||
"""Register miscellaneous routes"""
|
||||
app.router.add_post('/api/settings', MiscRoutes.update_settings)
|
||||
|
||||
# Add new route for clearing cache
|
||||
app.router.add_post('/api/clear-cache', MiscRoutes.clear_cache)
|
||||
app.router.add_get('/api/lm/settings', MiscRoutes.get_settings)
|
||||
app.router.add_post('/api/lm/settings', MiscRoutes.update_settings)
|
||||
|
||||
app.router.add_get('/api/health-check', lambda request: web.json_response({'status': 'ok'}))
|
||||
|
||||
app.router.add_post('/api/open-file-location', MiscRoutes.open_file_location)
|
||||
|
||||
# Usage stats routes
|
||||
app.router.add_post('/api/update-usage-stats', MiscRoutes.update_usage_stats)
|
||||
app.router.add_get('/api/get-usage-stats', MiscRoutes.get_usage_stats)
|
||||
@@ -113,47 +114,51 @@ class MiscRoutes:
|
||||
|
||||
# Add new route for checking if a model exists in the library
|
||||
app.router.add_get('/api/check-model-exists', MiscRoutes.check_model_exists)
|
||||
|
||||
# Add routes for metadata archive database management
|
||||
app.router.add_post('/api/download-metadata-archive', MiscRoutes.download_metadata_archive)
|
||||
app.router.add_post('/api/remove-metadata-archive', MiscRoutes.remove_metadata_archive)
|
||||
app.router.add_get('/api/metadata-archive-status', MiscRoutes.get_metadata_archive_status)
|
||||
|
||||
@staticmethod
|
||||
async def clear_cache(request):
|
||||
"""Clear all cache files from the cache folder"""
|
||||
async def get_settings(request):
|
||||
"""Get application settings that should be synced to frontend"""
|
||||
try:
|
||||
# Get the cache folder path (relative to project directory)
|
||||
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
cache_folder = os.path.join(project_dir, 'cache')
|
||||
# Define keys that should be synced from backend to frontend
|
||||
sync_keys = [
|
||||
'civitai_api_key',
|
||||
'default_lora_root',
|
||||
'default_checkpoint_root',
|
||||
'default_embedding_root',
|
||||
'base_model_path_mappings',
|
||||
'download_path_templates',
|
||||
'enable_metadata_archive_db',
|
||||
'language',
|
||||
'proxy_enabled',
|
||||
'proxy_type',
|
||||
'proxy_host',
|
||||
'proxy_port',
|
||||
'proxy_username',
|
||||
'proxy_password',
|
||||
'example_images_path',
|
||||
'optimizeExampleImages',
|
||||
'autoDownloadExampleImages'
|
||||
]
|
||||
|
||||
# Check if cache folder exists
|
||||
if not os.path.exists(cache_folder):
|
||||
logger.info("Cache folder does not exist, nothing to clear")
|
||||
return web.json_response({'success': True, 'message': 'No cache folder found'})
|
||||
|
||||
# Get list of cache files before deleting for reporting
|
||||
cache_files = [f for f in os.listdir(cache_folder) if os.path.isfile(os.path.join(cache_folder, f))]
|
||||
deleted_files = []
|
||||
|
||||
# Delete each .msgpack file in the cache folder
|
||||
for filename in cache_files:
|
||||
if filename.endswith('.msgpack'):
|
||||
file_path = os.path.join(cache_folder, filename)
|
||||
try:
|
||||
os.remove(file_path)
|
||||
deleted_files.append(filename)
|
||||
logger.info(f"Deleted cache file: {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete {filename}: {e}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Failed to delete {filename}: {str(e)}"
|
||||
}, status=500)
|
||||
# Build response with only the keys that should be synced
|
||||
response_data = {}
|
||||
for key in sync_keys:
|
||||
value = settings.get(key)
|
||||
if value is not None:
|
||||
response_data[key] = value
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f"Successfully cleared {len(deleted_files)} cache files",
|
||||
'deleted_files': deleted_files
|
||||
'settings': response_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing cache files: {e}", exc_info=True)
|
||||
logger.error(f"Error getting settings: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
@@ -164,6 +169,8 @@ class MiscRoutes:
|
||||
"""Update application settings"""
|
||||
try:
|
||||
data = await request.json()
|
||||
proxy_keys = {'proxy_enabled', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password', 'proxy_type'}
|
||||
proxy_changed = False
|
||||
|
||||
# Validate and update settings
|
||||
for key, value in data.items():
|
||||
@@ -182,20 +189,24 @@ class MiscRoutes:
|
||||
old_path = settings.get('example_images_path')
|
||||
if old_path != value:
|
||||
logger.info(f"Example images path changed to {value} - server restart required")
|
||||
|
||||
# Special handling for base_model_path_mappings - parse JSON string
|
||||
if key == 'base_model_path_mappings' and value:
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Invalid JSON format for base_model_path_mappings: {value}"
|
||||
})
|
||||
|
||||
# Save to settings
|
||||
settings.set(key, value)
|
||||
|
||||
# Handle deletion for proxy credentials
|
||||
if value == '__DELETE__' and key in ('proxy_username', 'proxy_password'):
|
||||
settings.delete(key)
|
||||
else:
|
||||
# Save to settings
|
||||
settings.set(key, value)
|
||||
|
||||
if key == 'enable_metadata_archive_db':
|
||||
await update_metadata_providers()
|
||||
|
||||
if key in proxy_keys:
|
||||
proxy_changed = True
|
||||
|
||||
if proxy_changed:
|
||||
downloader = await get_downloader()
|
||||
await downloader.refresh_session()
|
||||
|
||||
return web.json_response({'success': True})
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating settings: {e}", exc_info=True)
|
||||
@@ -654,13 +665,13 @@ class MiscRoutes:
|
||||
exists = False
|
||||
model_type = None
|
||||
|
||||
if await lora_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
if await lora_scanner.check_model_version_exists(model_version_id):
|
||||
exists = True
|
||||
model_type = 'lora'
|
||||
elif checkpoint_scanner and await checkpoint_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
elif checkpoint_scanner and await checkpoint_scanner.check_model_version_exists(model_version_id):
|
||||
exists = True
|
||||
model_type = 'checkpoint'
|
||||
elif embedding_scanner and await embedding_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
elif embedding_scanner and await embedding_scanner.check_model_version_exists(model_version_id):
|
||||
exists = True
|
||||
model_type = 'embedding'
|
||||
|
||||
@@ -708,3 +719,167 @@ class MiscRoutes:
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def download_metadata_archive(request):
|
||||
"""Download and extract the metadata archive database"""
|
||||
try:
|
||||
archive_manager = await get_metadata_archive_manager()
|
||||
|
||||
# Get the download_id from query parameters if provided
|
||||
download_id = request.query.get('download_id')
|
||||
|
||||
# Progress callback to send updates via WebSocket
|
||||
def progress_callback(stage, message):
|
||||
data = {
|
||||
'stage': stage,
|
||||
'message': message,
|
||||
'type': 'metadata_archive_download'
|
||||
}
|
||||
|
||||
if download_id:
|
||||
# Send to specific download WebSocket if download_id is provided
|
||||
asyncio.create_task(ws_manager.broadcast_download_progress(download_id, data))
|
||||
else:
|
||||
# Fallback to general broadcast
|
||||
asyncio.create_task(ws_manager.broadcast(data))
|
||||
|
||||
# Download and extract in background
|
||||
success = await archive_manager.download_and_extract_database(progress_callback)
|
||||
|
||||
if success:
|
||||
# Update settings to enable metadata archive
|
||||
settings.set('enable_metadata_archive_db', True)
|
||||
|
||||
# Update metadata providers
|
||||
await update_metadata_providers()
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Metadata archive database downloaded and extracted successfully'
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Failed to download and extract metadata archive database'
|
||||
}, status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading metadata archive: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def remove_metadata_archive(request):
|
||||
"""Remove the metadata archive database"""
|
||||
try:
|
||||
archive_manager = await get_metadata_archive_manager()
|
||||
|
||||
success = await archive_manager.remove_database()
|
||||
|
||||
if success:
|
||||
# Update settings to disable metadata archive
|
||||
settings.set('enable_metadata_archive_db', False)
|
||||
|
||||
# Update metadata providers
|
||||
await update_metadata_providers()
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Metadata archive database removed successfully'
|
||||
})
|
||||
else:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Failed to remove metadata archive database'
|
||||
}, status=500)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing metadata archive: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def get_metadata_archive_status(request):
|
||||
"""Get the status of metadata archive database"""
|
||||
try:
|
||||
archive_manager = await get_metadata_archive_manager()
|
||||
|
||||
is_available = archive_manager.is_database_available()
|
||||
is_enabled = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
db_size = 0
|
||||
if is_available:
|
||||
db_path = archive_manager.get_database_path()
|
||||
if db_path and os.path.exists(db_path):
|
||||
db_size = os.path.getsize(db_path)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'isAvailable': is_available,
|
||||
'isEnabled': is_enabled,
|
||||
'databaseSize': db_size,
|
||||
'databasePath': archive_manager.get_database_path() if is_available else None
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting metadata archive status: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def open_file_location(request):
|
||||
"""
|
||||
Open the folder containing the specified file and select the file in the file explorer.
|
||||
|
||||
Expects a JSON request body with:
|
||||
{
|
||||
"file_path": "absolute/path/to/file"
|
||||
}
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
|
||||
if not file_path:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing file_path parameter'
|
||||
}, status=400)
|
||||
|
||||
file_path = os.path.abspath(file_path)
|
||||
|
||||
if not os.path.isfile(file_path):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'File does not exist'
|
||||
}, status=404)
|
||||
|
||||
# Open the folder and select the file
|
||||
if os.name == 'nt': # Windows
|
||||
# explorer /select,"C:\path\to\file"
|
||||
subprocess.Popen(['explorer', '/select,', file_path])
|
||||
elif os.name == 'posix':
|
||||
if sys.platform == 'darwin': # macOS
|
||||
subprocess.Popen(['open', '-R', file_path])
|
||||
else: # Linux (selecting file is not standard, just open folder)
|
||||
folder = os.path.dirname(file_path)
|
||||
subprocess.Popen(['xdg-open', folder])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': f'Opened folder and selected file: {file_path}'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to open file location: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@@ -17,13 +17,14 @@ from ..recipes import RecipeParserFactory
|
||||
from ..utils.constants import CARD_PREVIEW_WIDTH
|
||||
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.server_i18n import server_i18n
|
||||
from ..config import config
|
||||
|
||||
# Check if running in standalone mode
|
||||
standalone_mode = 'nodes' not in sys.modules
|
||||
|
||||
from ..utils.utils import download_civitai_image
|
||||
from ..services.service_registry import ServiceRegistry # Add ServiceRegistry import
|
||||
from ..services.downloader import get_downloader
|
||||
|
||||
# Only import MetadataRegistry in non-standalone mode
|
||||
if not standalone_mode:
|
||||
@@ -128,6 +129,17 @@ class RecipeRoutes:
|
||||
# Ensure services are initialized
|
||||
await self.init_services()
|
||||
|
||||
# 获取用户语言设置
|
||||
user_language = settings.get('language', 'en')
|
||||
|
||||
# 设置服务端i18n语言
|
||||
server_i18n.set_locale(user_language)
|
||||
|
||||
# 为模板环境添加i18n过滤器
|
||||
if not hasattr(self.template_env, '_i18n_filter_added'):
|
||||
self.template_env.filters['t'] = server_i18n.create_template_filter()
|
||||
self.template_env._i18n_filter_added = True
|
||||
|
||||
# Skip initialization check and directly try to get cached data
|
||||
try:
|
||||
# Recipe scanner will initialize cache if needed
|
||||
@@ -137,7 +149,9 @@ class RecipeRoutes:
|
||||
recipes=[], # Frontend will load recipes via API
|
||||
is_initializing=False,
|
||||
settings=settings,
|
||||
request=request
|
||||
request=request,
|
||||
# 添加服务端翻译函数
|
||||
t=server_i18n.get_translation,
|
||||
)
|
||||
except Exception as cache_error:
|
||||
logger.error(f"Error loading recipe cache data: {cache_error}")
|
||||
@@ -146,7 +160,9 @@ class RecipeRoutes:
|
||||
rendered = template.render(
|
||||
is_initializing=True,
|
||||
settings=settings,
|
||||
request=request
|
||||
request=request,
|
||||
# 添加服务端翻译函数
|
||||
t=server_i18n.get_translation,
|
||||
)
|
||||
logger.info("Recipe cache error, returning initialization page")
|
||||
|
||||
@@ -357,35 +373,27 @@ class RecipeRoutes:
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
# Download image directly from URL
|
||||
session = await self.civitai_client.session
|
||||
# Download image using unified downloader
|
||||
downloader = await get_downloader()
|
||||
# Create a temporary file to save the downloaded image
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
async with session.get(image_url) as response:
|
||||
if response.status != 200:
|
||||
return web.json_response({
|
||||
"error": f"Failed to download image from URL: HTTP {response.status}",
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
with open(temp_path, 'wb') as f:
|
||||
f.write(await response.read())
|
||||
success, result = await downloader.download_file(
|
||||
image_url,
|
||||
temp_path,
|
||||
use_auth=False # Image downloads typically don't need auth
|
||||
)
|
||||
|
||||
if not success:
|
||||
return web.json_response({
|
||||
"error": f"Failed to download image from URL: {result}",
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
# Use meta field from image_info as metadata
|
||||
if 'meta' in image_info:
|
||||
metadata = image_info['meta']
|
||||
|
||||
else:
|
||||
# Not a Civitai image URL, use the original download method
|
||||
temp_path = download_civitai_image(url)
|
||||
|
||||
if not temp_path:
|
||||
return web.json_response({
|
||||
"error": "Failed to download image from URL",
|
||||
"loras": []
|
||||
}, status=400)
|
||||
|
||||
# If metadata wasn't obtained from Civitai API, extract it from the image
|
||||
if metadata is None:
|
||||
@@ -425,8 +433,7 @@ class RecipeRoutes:
|
||||
# Parse the metadata
|
||||
result = await parser.parse_metadata(
|
||||
metadata,
|
||||
recipe_scanner=self.recipe_scanner,
|
||||
civitai_client=self.civitai_client
|
||||
recipe_scanner=self.recipe_scanner
|
||||
)
|
||||
|
||||
# For URL mode, include the image data as base64
|
||||
@@ -527,8 +534,7 @@ class RecipeRoutes:
|
||||
# Parse the metadata
|
||||
result = await parser.parse_metadata(
|
||||
metadata,
|
||||
recipe_scanner=self.recipe_scanner,
|
||||
civitai_client=self.civitai_client
|
||||
recipe_scanner=self.recipe_scanner
|
||||
)
|
||||
|
||||
# Add base64 image data to result
|
||||
@@ -638,21 +644,6 @@ class RecipeRoutes:
|
||||
image = base64.b64decode(image_base64)
|
||||
except Exception as e:
|
||||
return web.json_response({"error": f"Invalid base64 image data: {str(e)}"}, status=400)
|
||||
elif image_url:
|
||||
# Download image from URL
|
||||
temp_path = download_civitai_image(image_url)
|
||||
if not temp_path:
|
||||
return web.json_response({"error": "Failed to download image from URL"}, status=400)
|
||||
|
||||
# Read the downloaded image
|
||||
with open(temp_path, 'rb') as f:
|
||||
image = f.read()
|
||||
|
||||
# Clean up temp file
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
return web.json_response({"error": "No image data provided"}, status=400)
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing import Dict, List, Any
|
||||
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings
|
||||
from ..services.server_i18n import server_i18n
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.usage_stats import UsageStats
|
||||
|
||||
@@ -32,7 +33,13 @@ class StatsRoutes:
|
||||
self.lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
self.checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
self.embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
self.usage_stats = UsageStats()
|
||||
|
||||
# Only initialize usage stats if we have valid paths configured
|
||||
try:
|
||||
self.usage_stats = UsageStats()
|
||||
except RuntimeError as e:
|
||||
logger.warning(f"Could not initialize usage statistics: {e}")
|
||||
self.usage_stats = None
|
||||
|
||||
async def handle_stats_page(self, request: web.Request) -> web.Response:
|
||||
"""Handle GET /statistics request"""
|
||||
@@ -58,11 +65,23 @@ class StatsRoutes:
|
||||
|
||||
is_initializing = lora_initializing or checkpoint_initializing or embedding_initializing
|
||||
|
||||
# 获取用户语言设置
|
||||
user_language = settings.get('language', 'en')
|
||||
|
||||
# 设置服务端i18n语言
|
||||
server_i18n.set_locale(user_language)
|
||||
|
||||
# 为模板环境添加i18n过滤器
|
||||
if not hasattr(self.template_env, '_i18n_filter_added'):
|
||||
self.template_env.filters['t'] = server_i18n.create_template_filter()
|
||||
self.template_env._i18n_filter_added = True
|
||||
|
||||
template = self.template_env.get_template('statistics.html')
|
||||
rendered = template.render(
|
||||
is_initializing=is_initializing,
|
||||
settings=settings,
|
||||
request=request
|
||||
request=request,
|
||||
t=server_i18n.get_translation,
|
||||
)
|
||||
|
||||
return web.Response(
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
import os
|
||||
import subprocess
|
||||
import aiohttp
|
||||
import logging
|
||||
import toml
|
||||
import git
|
||||
import zipfile
|
||||
import shutil
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from aiohttp import web
|
||||
from typing import Dict, List
|
||||
|
||||
from ..services.downloader import get_downloader, Downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -157,57 +154,89 @@ class UpdateRoutes:
|
||||
async def _download_and_replace_zip(plugin_root: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Download latest release ZIP from GitHub and replace plugin files.
|
||||
Skips settings.json.
|
||||
Skips settings.json and civitai folder. Writes extracted file list to .tracking.
|
||||
"""
|
||||
repo_owner = "willmiao"
|
||||
repo_name = "ComfyUI-Lora-Manager"
|
||||
github_api = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_api) as resp:
|
||||
if resp.status != 200:
|
||||
logger.error(f"Failed to fetch release info: {resp.status}")
|
||||
return False, ""
|
||||
data = await resp.json()
|
||||
zip_url = data.get("zipball_url")
|
||||
version = data.get("tag_name", "unknown")
|
||||
downloader = await get_downloader()
|
||||
|
||||
# Get release info
|
||||
success, data = await downloader.make_request(
|
||||
'GET',
|
||||
github_api,
|
||||
use_auth=False
|
||||
)
|
||||
if not success:
|
||||
logger.error(f"Failed to fetch release info: {data}")
|
||||
return False, ""
|
||||
|
||||
zip_url = data.get("zipball_url")
|
||||
version = data.get("tag_name", "unknown")
|
||||
|
||||
# Download ZIP
|
||||
async with session.get(zip_url) as zip_resp:
|
||||
if zip_resp.status != 200:
|
||||
logger.error(f"Failed to download ZIP: {zip_resp.status}")
|
||||
return False, ""
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
|
||||
tmp_zip.write(await zip_resp.read())
|
||||
zip_path = tmp_zip.name
|
||||
# Download ZIP to temporary file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
|
||||
tmp_zip_path = tmp_zip.name
|
||||
|
||||
success, result = await downloader.download_file(
|
||||
url=zip_url,
|
||||
save_path=tmp_zip_path,
|
||||
use_auth=False,
|
||||
allow_resume=False
|
||||
)
|
||||
|
||||
if not success:
|
||||
logger.error(f"Failed to download ZIP: {result}")
|
||||
return False, ""
|
||||
|
||||
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json'])
|
||||
zip_path = tmp_zip_path
|
||||
|
||||
# Extract ZIP to temp dir
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
# Skip both settings.json and civitai folder
|
||||
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json', 'civitai'])
|
||||
|
||||
# Extract ZIP to temp dir
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(tmp_dir)
|
||||
# Find extracted folder (GitHub ZIP contains a root folder)
|
||||
extracted_root = next(os.scandir(tmp_dir)).path
|
||||
|
||||
# Copy files, skipping settings.json
|
||||
# Copy files, skipping settings.json and civitai folder
|
||||
for item in os.listdir(extracted_root):
|
||||
if item == 'settings.json' or item == 'civitai':
|
||||
continue
|
||||
src = os.path.join(extracted_root, item)
|
||||
dst = os.path.join(plugin_root, item)
|
||||
if os.path.isdir(src):
|
||||
# Remove old folder, then copy
|
||||
if os.path.exists(dst):
|
||||
shutil.rmtree(dst)
|
||||
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json'))
|
||||
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json', 'civitai'))
|
||||
else:
|
||||
if item == 'settings.json':
|
||||
continue
|
||||
shutil.copy2(src, dst)
|
||||
|
||||
os.remove(zip_path)
|
||||
logger.info(f"Updated plugin via ZIP to {version}")
|
||||
return True, version
|
||||
# Write .tracking file: list all files under extracted_root, relative to extracted_root
|
||||
# for ComfyUI Manager to work properly
|
||||
tracking_info_file = os.path.join(plugin_root, '.tracking')
|
||||
tracking_files = []
|
||||
for root, dirs, files in os.walk(extracted_root):
|
||||
# Skip civitai folder and its contents
|
||||
rel_root = os.path.relpath(root, extracted_root)
|
||||
if rel_root == 'civitai' or rel_root.startswith('civitai' + os.sep):
|
||||
continue
|
||||
for file in files:
|
||||
rel_path = os.path.relpath(os.path.join(root, file), extracted_root)
|
||||
# Skip settings.json and any file under civitai
|
||||
if rel_path == 'settings.json' or rel_path.startswith('civitai' + os.sep):
|
||||
continue
|
||||
tracking_files.append(rel_path.replace("\\", "/"))
|
||||
with open(tracking_info_file, "w", encoding='utf-8') as file:
|
||||
file.write('\n'.join(tracking_files))
|
||||
|
||||
os.remove(zip_path)
|
||||
logger.info(f"Updated plugin via ZIP to {version}")
|
||||
return True, version
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"ZIP update failed: {e}", exc_info=True)
|
||||
@@ -236,23 +265,23 @@ class UpdateRoutes:
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/commits/main"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
|
||||
if response.status != 200:
|
||||
logger.warning(f"Failed to fetch GitHub commit: {response.status}")
|
||||
return "main", []
|
||||
|
||||
data = await response.json()
|
||||
commit_sha = data.get('sha', '')[:7] # Short hash
|
||||
commit_message = data.get('commit', {}).get('message', '')
|
||||
|
||||
# Format as "main-{short_hash}"
|
||||
version = f"main-{commit_sha}"
|
||||
|
||||
# Use commit message as changelog
|
||||
changelog = [commit_message] if commit_message else []
|
||||
|
||||
return version, changelog
|
||||
downloader = await Downloader.get_instance()
|
||||
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
|
||||
|
||||
if not success:
|
||||
logger.warning(f"Failed to fetch GitHub commit: {data}")
|
||||
return "main", []
|
||||
|
||||
commit_sha = data.get('sha', '')[:7] # Short hash
|
||||
commit_message = data.get('commit', {}).get('message', '')
|
||||
|
||||
# Format as "main-{short_hash}"
|
||||
version = f"main-{commit_sha}"
|
||||
|
||||
# Use commit message as changelog
|
||||
changelog = [commit_message] if commit_message else []
|
||||
|
||||
return version, changelog
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching nightly version: {e}", exc_info=True)
|
||||
@@ -364,65 +393,28 @@ class UpdateRoutes:
|
||||
"""Get Git repository information"""
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
plugin_root = os.path.dirname(os.path.dirname(current_dir))
|
||||
|
||||
|
||||
git_info = {
|
||||
'commit_hash': 'unknown',
|
||||
'short_hash': 'stable',
|
||||
'branch': 'unknown',
|
||||
'commit_date': 'unknown'
|
||||
}
|
||||
|
||||
|
||||
try:
|
||||
# Check if we're in a git repository
|
||||
if not os.path.exists(os.path.join(plugin_root, '.git')):
|
||||
return git_info
|
||||
|
||||
# Get current commit hash
|
||||
result = subprocess.run(
|
||||
['git', 'rev-parse', 'HEAD'],
|
||||
cwd=plugin_root,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0:
|
||||
git_info['commit_hash'] = result.stdout.strip()
|
||||
git_info['short_hash'] = git_info['commit_hash'][:7]
|
||||
|
||||
# Get current branch name
|
||||
result = subprocess.run(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
cwd=plugin_root,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0:
|
||||
git_info['branch'] = result.stdout.strip()
|
||||
|
||||
# Get commit date
|
||||
result = subprocess.run(
|
||||
['git', 'show', '-s', '--format=%ci', 'HEAD'],
|
||||
cwd=plugin_root,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0:
|
||||
commit_date = result.stdout.strip()
|
||||
# Format the date nicely if possible
|
||||
try:
|
||||
date_obj = datetime.strptime(commit_date, '%Y-%m-%d %H:%M:%S %z')
|
||||
git_info['commit_date'] = date_obj.strftime('%Y-%m-%d')
|
||||
except:
|
||||
git_info['commit_date'] = commit_date
|
||||
|
||||
|
||||
repo = git.Repo(plugin_root)
|
||||
commit = repo.head.commit
|
||||
git_info['commit_hash'] = commit.hexsha
|
||||
git_info['short_hash'] = commit.hexsha[:7]
|
||||
git_info['branch'] = repo.active_branch.name if not repo.head.is_detached else 'detached'
|
||||
git_info['commit_date'] = commit.committed_datetime.strftime('%Y-%m-%d')
|
||||
except Exception as e:
|
||||
logger.warning(f"Error getting git info: {e}")
|
||||
|
||||
|
||||
return git_info
|
||||
|
||||
@staticmethod
|
||||
@@ -439,22 +431,22 @@ class UpdateRoutes:
|
||||
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
|
||||
if response.status != 200:
|
||||
logger.warning(f"Failed to fetch GitHub release: {response.status}")
|
||||
return "v0.0.0", []
|
||||
|
||||
data = await response.json()
|
||||
version = data.get('tag_name', '')
|
||||
if not version.startswith('v'):
|
||||
version = f"v{version}"
|
||||
|
||||
# Extract changelog from release notes
|
||||
body = data.get('body', '')
|
||||
changelog = UpdateRoutes._parse_changelog(body)
|
||||
|
||||
return version, changelog
|
||||
downloader = await Downloader.get_instance()
|
||||
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
|
||||
|
||||
if not success:
|
||||
logger.warning(f"Failed to fetch GitHub release: {data}")
|
||||
return "v0.0.0", []
|
||||
|
||||
version = data.get('tag_name', '')
|
||||
if not version.startswith('v'):
|
||||
version = f"v{version}"
|
||||
|
||||
# Extract changelog from release notes
|
||||
body = data.get('body', '')
|
||||
changelog = UpdateRoutes._parse_changelog(body)
|
||||
|
||||
return version, changelog
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching remote version: {e}", exc_info=True)
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional, Type
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ..utils.models import BaseModelMetadata
|
||||
from ..utils.routes_common import ModelRouteUtils
|
||||
from ..utils.constants import NSFW_LEVELS
|
||||
from .settings_manager import settings
|
||||
from ..utils.utils import fuzzy_match
|
||||
@@ -67,7 +69,7 @@ class BaseModelService(ABC):
|
||||
'filename': True,
|
||||
'modelname': True,
|
||||
'tags': False,
|
||||
'recursive': False,
|
||||
'recursive': True,
|
||||
}
|
||||
|
||||
# Get the base data set using new sort logic
|
||||
@@ -138,12 +140,20 @@ class BaseModelService(ABC):
|
||||
|
||||
# Apply folder filtering
|
||||
if folder is not None:
|
||||
if search_options and search_options.get('recursive', False):
|
||||
if search_options and search_options.get('recursive', True):
|
||||
# Recursive folder filtering - include all subfolders
|
||||
data = [
|
||||
item for item in data
|
||||
if item['folder'].startswith(folder)
|
||||
]
|
||||
# Ensure we match exact folder or its subfolders by checking path boundaries
|
||||
if folder == "":
|
||||
# Empty folder means root - include all items
|
||||
pass # Don't filter anything
|
||||
else:
|
||||
# Add trailing slash to ensure we match folder boundaries correctly
|
||||
folder_with_separator = folder + "/"
|
||||
data = [
|
||||
item for item in data
|
||||
if (item['folder'] == folder or
|
||||
item['folder'].startswith(folder_with_separator))
|
||||
]
|
||||
else:
|
||||
# Exact folder filtering
|
||||
data = [
|
||||
@@ -199,6 +209,22 @@ class BaseModelService(ABC):
|
||||
for tag in item['tags']):
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
# Search by creator
|
||||
civitai = item.get('civitai')
|
||||
creator_username = ''
|
||||
if civitai and isinstance(civitai, dict):
|
||||
creator = civitai.get('creator')
|
||||
if creator and isinstance(creator, dict):
|
||||
creator_username = creator.get('username', '')
|
||||
if search_options.get('creator', False) and creator_username:
|
||||
if fuzzy_search:
|
||||
if fuzzy_match(creator_username, search):
|
||||
search_results.append(item)
|
||||
continue
|
||||
elif search.lower() in creator_username.lower():
|
||||
search_results.append(item)
|
||||
continue
|
||||
|
||||
return search_results
|
||||
|
||||
@@ -256,4 +282,170 @@ class BaseModelService(ABC):
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get model root directories"""
|
||||
return self.scanner.get_model_roots()
|
||||
return self.scanner.get_model_roots()
|
||||
|
||||
async def get_folder_tree(self, model_root: str) -> Dict:
|
||||
"""Get hierarchical folder tree for a specific model root"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
# Build tree structure from folders
|
||||
tree = {}
|
||||
|
||||
for folder in cache.folders:
|
||||
# Check if this folder belongs to the specified model root
|
||||
folder_belongs_to_root = False
|
||||
for root in self.scanner.get_model_roots():
|
||||
if root == model_root:
|
||||
folder_belongs_to_root = True
|
||||
break
|
||||
|
||||
if not folder_belongs_to_root:
|
||||
continue
|
||||
|
||||
# Split folder path into components
|
||||
parts = folder.split('/') if folder else []
|
||||
current_level = tree
|
||||
|
||||
for part in parts:
|
||||
if part not in current_level:
|
||||
current_level[part] = {}
|
||||
current_level = current_level[part]
|
||||
|
||||
return tree
|
||||
|
||||
async def get_unified_folder_tree(self) -> Dict:
|
||||
"""Get unified folder tree across all model roots"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
# Build unified tree structure by analyzing all relative paths
|
||||
unified_tree = {}
|
||||
|
||||
# Get all model roots for path normalization
|
||||
model_roots = self.scanner.get_model_roots()
|
||||
|
||||
for folder in cache.folders:
|
||||
if not folder: # Skip empty folders
|
||||
continue
|
||||
|
||||
# Find which root this folder belongs to by checking the actual file paths
|
||||
# This is a simplified approach - we'll use the folder as-is since it should already be relative
|
||||
relative_path = folder
|
||||
|
||||
# Split folder path into components
|
||||
parts = relative_path.split('/')
|
||||
current_level = unified_tree
|
||||
|
||||
for part in parts:
|
||||
if part not in current_level:
|
||||
current_level[part] = {}
|
||||
current_level = current_level[part]
|
||||
|
||||
return unified_tree
|
||||
|
||||
async def get_model_notes(self, model_name: str) -> Optional[str]:
|
||||
"""Get notes for a specific model file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for model in cache.raw_data:
|
||||
if model['file_name'] == model_name:
|
||||
return model.get('notes', '')
|
||||
|
||||
return None
|
||||
|
||||
async def get_model_preview_url(self, model_name: str) -> Optional[str]:
|
||||
"""Get the static preview URL for a model file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for model in cache.raw_data:
|
||||
if model['file_name'] == model_name:
|
||||
preview_url = model.get('preview_url')
|
||||
if preview_url:
|
||||
from ..config import config
|
||||
return config.get_preview_static_url(preview_url)
|
||||
|
||||
return '/loras_static/images/no-preview.png'
|
||||
|
||||
async def get_model_civitai_url(self, model_name: str) -> Dict[str, Optional[str]]:
|
||||
"""Get the Civitai URL for a model file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for model in cache.raw_data:
|
||||
if model['file_name'] == model_name:
|
||||
civitai_data = model.get('civitai', {})
|
||||
model_id = civitai_data.get('modelId')
|
||||
version_id = civitai_data.get('id')
|
||||
|
||||
if model_id:
|
||||
civitai_url = f"https://civitai.com/models/{model_id}"
|
||||
if version_id:
|
||||
civitai_url += f"?modelVersionId={version_id}"
|
||||
|
||||
return {
|
||||
'civitai_url': civitai_url,
|
||||
'model_id': str(model_id),
|
||||
'version_id': str(version_id) if version_id else None
|
||||
}
|
||||
|
||||
return {'civitai_url': None, 'model_id': None, 'version_id': None}
|
||||
|
||||
async def get_model_metadata(self, file_path: str) -> Optional[Dict]:
|
||||
"""Get filtered CivitAI metadata for a model by file path"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for model in cache.raw_data:
|
||||
if model.get('file_path') == file_path:
|
||||
return ModelRouteUtils.filter_civitai_data(model.get("civitai", {}))
|
||||
|
||||
return None
|
||||
|
||||
async def get_model_description(self, file_path: str) -> Optional[str]:
|
||||
"""Get model description by file path"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for model in cache.raw_data:
|
||||
if model.get('file_path') == file_path:
|
||||
return model.get('modelDescription', '')
|
||||
|
||||
return None
|
||||
|
||||
async def search_relative_paths(self, search_term: str, limit: int = 15) -> List[str]:
|
||||
"""Search model relative file paths for autocomplete functionality"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
matching_paths = []
|
||||
search_lower = search_term.lower()
|
||||
|
||||
# Get model roots for path calculation
|
||||
model_roots = self.scanner.get_model_roots()
|
||||
|
||||
for model in cache.raw_data:
|
||||
file_path = model.get('file_path', '')
|
||||
if not file_path:
|
||||
continue
|
||||
|
||||
# Calculate relative path from model root
|
||||
relative_path = None
|
||||
for root in model_roots:
|
||||
# Normalize paths for comparison
|
||||
normalized_root = os.path.normpath(root)
|
||||
normalized_file = os.path.normpath(file_path)
|
||||
|
||||
if normalized_file.startswith(normalized_root):
|
||||
# Remove root and leading separator to get relative path
|
||||
relative_path = normalized_file[len(normalized_root):].lstrip(os.sep)
|
||||
break
|
||||
|
||||
if relative_path and search_lower in relative_path.lower():
|
||||
matching_paths.append(relative_path)
|
||||
|
||||
if len(matching_paths) >= limit * 2: # Get more for better sorting
|
||||
break
|
||||
|
||||
# Sort by relevance (exact matches first, then by length)
|
||||
matching_paths.sort(key=lambda x: (
|
||||
not x.lower().startswith(search_lower), # Exact prefix matches first
|
||||
len(x), # Then by length (shorter first)
|
||||
x.lower() # Then alphabetically
|
||||
))
|
||||
|
||||
return matching_paths[:limit]
|
||||
@@ -13,7 +13,7 @@ class CheckpointScanner(ModelScanner):
|
||||
|
||||
def __init__(self):
|
||||
# Define supported file extensions
|
||||
file_extensions = {'.safetensors', '.ckpt', '.pt', '.pth', '.sft', '.gguf'}
|
||||
file_extensions = {'.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft', '.gguf'}
|
||||
super().__init__(
|
||||
model_type="checkpoint",
|
||||
model_class=CheckpointMetadata,
|
||||
@@ -21,6 +21,14 @@ class CheckpointScanner(ModelScanner):
|
||||
hash_index=ModelHashIndex()
|
||||
)
|
||||
|
||||
def adjust_metadata(self, metadata, file_path, root_path):
|
||||
if hasattr(metadata, "model_type"):
|
||||
if root_path in config.checkpoints_roots:
|
||||
metadata.model_type = "checkpoint"
|
||||
elif root_path in config.unet_roots:
|
||||
metadata.model_type = "diffusion_model"
|
||||
return metadata
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get checkpoint root directories"""
|
||||
return config.base_models_roots
|
||||
@@ -34,12 +34,11 @@ class CheckpointService(BaseModelService):
|
||||
"file_size": checkpoint_data.get("size", 0),
|
||||
"modified": checkpoint_data.get("modified", ""),
|
||||
"tags": checkpoint_data.get("tags", []),
|
||||
"modelDescription": checkpoint_data.get("modelDescription", ""),
|
||||
"from_civitai": checkpoint_data.get("from_civitai", True),
|
||||
"notes": checkpoint_data.get("notes", ""),
|
||||
"model_type": checkpoint_data.get("model_type", "checkpoint"),
|
||||
"favorite": checkpoint_data.get("favorite", False),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(checkpoint_data.get("civitai", {}))
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(checkpoint_data.get("civitai", {}), minimal=True)
|
||||
}
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from datetime import datetime
|
||||
import aiohttp
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
from email.parser import Parser
|
||||
from typing import Optional, Dict, Tuple, List
|
||||
from urllib.parse import unquote
|
||||
from .model_metadata_provider import CivitaiModelMetadataProvider, ModelMetadataProviderManager
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -19,6 +18,11 @@ class CivitaiClient:
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
|
||||
# Register this client as a metadata provider
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
provider_manager.register_provider('civitai', CivitaiModelMetadataProvider(cls._instance), True)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
@@ -28,81 +32,9 @@ class CivitaiClient:
|
||||
self._initialized = True
|
||||
|
||||
self.base_url = "https://civitai.com/api/v1"
|
||||
self.headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
|
||||
}
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
# Set default buffer size to 1MB for higher throughput
|
||||
self.chunk_size = 1024 * 1024
|
||||
|
||||
@property
|
||||
async def session(self) -> aiohttp.ClientSession:
|
||||
"""Lazy initialize the session"""
|
||||
if self._session is None:
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=8, # Increase from 3 to 8 for better parallelism
|
||||
ttl_dns_cache=300, # Enable DNS caching with reasonable timeout
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
trust_env = True # Allow using system environment proxy settings
|
||||
# Configure timeout parameters - increase read timeout for large files
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=120)
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=trust_env,
|
||||
timeout=timeout
|
||||
)
|
||||
self._session_created_at = datetime.now()
|
||||
return self._session
|
||||
|
||||
async def _ensure_fresh_session(self):
|
||||
"""Refresh session if it's been open too long"""
|
||||
if self._session is not None:
|
||||
if not hasattr(self, '_session_created_at') or \
|
||||
(datetime.now() - self._session_created_at).total_seconds() > 300: # 5 minutes
|
||||
await self.close()
|
||||
self._session = None
|
||||
|
||||
return await self.session
|
||||
|
||||
def _parse_content_disposition(self, header: str) -> str:
|
||||
"""Parse filename from content-disposition header"""
|
||||
if not header:
|
||||
return None
|
||||
|
||||
# Handle quoted filenames
|
||||
if 'filename="' in header:
|
||||
start = header.index('filename="') + 10
|
||||
end = header.index('"', start)
|
||||
return unquote(header[start:end])
|
||||
|
||||
# Fallback to original parsing
|
||||
disposition = Parser().parsestr(f'Content-Disposition: {header}')
|
||||
filename = disposition.get_param('filename')
|
||||
if filename:
|
||||
return unquote(filename)
|
||||
return None
|
||||
|
||||
def _get_request_headers(self) -> dict:
|
||||
"""Get request headers with optional API key"""
|
||||
headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
from .settings_manager import settings
|
||||
api_key = settings.get('civitai_api_key')
|
||||
if (api_key):
|
||||
headers['Authorization'] = f'Bearer {api_key}'
|
||||
|
||||
return headers
|
||||
|
||||
async def _download_file(self, url: str, save_dir: str, default_filename: str, progress_callback=None) -> Tuple[bool, str]:
|
||||
"""Download file with content-disposition support and progress tracking
|
||||
async def download_file(self, url: str, save_dir: str, default_filename: str, progress_callback=None) -> Tuple[bool, str]:
|
||||
"""Download file with resumable downloads and retry mechanism
|
||||
|
||||
Args:
|
||||
url: Download URL
|
||||
@@ -113,135 +45,144 @@ class CivitaiClient:
|
||||
Returns:
|
||||
Tuple[bool, str]: (success, save_path or error message)
|
||||
"""
|
||||
logger.debug(f"Resolving DNS for: {url}")
|
||||
session = await self._ensure_fresh_session()
|
||||
try:
|
||||
headers = self._get_request_headers()
|
||||
|
||||
# Add Range header to allow resumable downloads
|
||||
headers['Accept-Encoding'] = 'identity' # Disable compression for better chunked downloads
|
||||
|
||||
logger.debug(f"Starting download from: {url}")
|
||||
async with session.get(url, headers=headers, allow_redirects=True) as response:
|
||||
if response.status != 200:
|
||||
# Handle 401 unauthorized responses
|
||||
if response.status == 401:
|
||||
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
|
||||
|
||||
return False, "Invalid or missing CivitAI API key, or early access restriction."
|
||||
|
||||
# Handle other client errors that might be permission-related
|
||||
if response.status == 403:
|
||||
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
|
||||
return False, "Access forbidden: You don't have permission to download this file."
|
||||
|
||||
# Generic error response for other status codes
|
||||
logger.error(f"Download failed for {url} with status {response.status}")
|
||||
return False, f"Download failed with status {response.status}"
|
||||
|
||||
# Get filename from content-disposition header
|
||||
content_disposition = response.headers.get('Content-Disposition')
|
||||
filename = self._parse_content_disposition(content_disposition)
|
||||
if not filename:
|
||||
filename = default_filename
|
||||
|
||||
save_path = os.path.join(save_dir, filename)
|
||||
|
||||
# Get total file size for progress calculation
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
current_size = 0
|
||||
last_progress_report_time = datetime.now()
|
||||
|
||||
# Stream download to file with progress updates using larger buffer
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(self.chunk_size):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
current_size += len(chunk)
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
now = datetime.now()
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
|
||||
if progress_callback and total_size and time_diff >= 1.0:
|
||||
progress = (current_size / total_size) * 100
|
||||
await progress_callback(progress)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Ensure 100% progress is reported
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
|
||||
return True, save_path
|
||||
|
||||
except aiohttp.ClientError as e:
|
||||
logger.error(f"Network error during download: {e}")
|
||||
return False, f"Network error: {str(e)}"
|
||||
except Exception as e:
|
||||
logger.error(f"Download error: {e}")
|
||||
return False, str(e)
|
||||
downloader = await get_downloader()
|
||||
save_path = os.path.join(save_dir, default_filename)
|
||||
|
||||
# Use unified downloader with CivitAI authentication
|
||||
success, result = await downloader.download_file(
|
||||
url=url,
|
||||
save_path=save_path,
|
||||
progress_callback=progress_callback,
|
||||
use_auth=True, # Enable CivitAI authentication
|
||||
allow_resume=True
|
||||
)
|
||||
|
||||
return success, result
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
async with session.get(f"{self.base_url}/model-versions/by-hash/{model_hash}") as response:
|
||||
if response.status == 200:
|
||||
return await response.json()
|
||||
return None
|
||||
downloader = await get_downloader()
|
||||
success, version = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/model-versions/by-hash/{model_hash}",
|
||||
use_auth=True
|
||||
)
|
||||
if success:
|
||||
# Get model ID from version data
|
||||
model_id = version.get('modelId')
|
||||
if model_id:
|
||||
# Fetch additional model metadata
|
||||
success_model, data = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if success_model:
|
||||
# Enrich version_info with model data
|
||||
version['model']['description'] = data.get("description")
|
||||
version['model']['tags'] = data.get("tags", [])
|
||||
|
||||
# Add creator from model data
|
||||
version['creator'] = data.get("creator")
|
||||
|
||||
return version
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"API Error: {str(e)}")
|
||||
return None
|
||||
|
||||
async def download_preview_image(self, image_url: str, save_path: str):
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
async with session.get(image_url) as response:
|
||||
if response.status == 200:
|
||||
content = await response.read()
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
return True
|
||||
return False
|
||||
downloader = await get_downloader()
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
image_url,
|
||||
use_auth=False # Preview images don't need auth
|
||||
)
|
||||
if success:
|
||||
# Ensure directory exists
|
||||
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Download Error: {str(e)}")
|
||||
logger.error(f"Download Error: {str(e)}")
|
||||
return False
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> List[Dict]:
|
||||
"""Get all versions of a model with local availability info"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session() # Use fresh session
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
data = await response.json()
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if success:
|
||||
# Also return model type along with versions
|
||||
return {
|
||||
'modelVersions': data.get('modelVersions', []),
|
||||
'type': data.get('type', '')
|
||||
'modelVersions': result.get('modelVersions', []),
|
||||
'type': result.get('type', '')
|
||||
}
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model versions: {e}")
|
||||
return None
|
||||
|
||||
async def get_model_version(self, model_id: int, version_id: int = None) -> Optional[Dict]:
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version with additional metadata
|
||||
|
||||
Args:
|
||||
model_id: The Civitai model ID
|
||||
model_id: The Civitai model ID (optional if version_id is provided)
|
||||
version_id: Optional specific version ID to retrieve
|
||||
|
||||
Returns:
|
||||
Optional[Dict]: The model version data with additional fields or None if not found
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
downloader = await get_downloader()
|
||||
|
||||
# Step 1: Get model data to find version_id if not provided and get additional metadata
|
||||
async with session.get(f"{self.base_url}/models/{model_id}") as response:
|
||||
if response.status != 200:
|
||||
# Case 1: Only version_id is provided
|
||||
if model_id is None and version_id is not None:
|
||||
# First get the version info to extract model_id
|
||||
success, version = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/model-versions/{version_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
|
||||
model_id = version.get('modelId')
|
||||
if not model_id:
|
||||
logger.error(f"No modelId found in version {version_id}")
|
||||
return None
|
||||
|
||||
# Now get the model data for additional metadata
|
||||
success, model_data = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if success:
|
||||
# Enrich version with model data
|
||||
version['model']['description'] = model_data.get("description")
|
||||
version['model']['tags'] = model_data.get("tags", [])
|
||||
version['creator'] = model_data.get("creator")
|
||||
|
||||
return version
|
||||
|
||||
# Case 2: model_id is provided (with or without version_id)
|
||||
elif model_id is not None:
|
||||
# Step 1: Get model data to find version_id if not provided and get additional metadata
|
||||
success, data = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/models/{model_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
|
||||
data = await response.json()
|
||||
model_versions = data.get('modelVersions', [])
|
||||
|
||||
# Step 2: Determine the version_id to use
|
||||
@@ -249,14 +190,15 @@ class CivitaiClient:
|
||||
if target_version_id is None:
|
||||
target_version_id = model_versions[0].get('id')
|
||||
|
||||
# Step 3: Get detailed version info using the version_id
|
||||
headers = self._get_request_headers()
|
||||
async with session.get(f"{self.base_url}/model-versions/{target_version_id}", headers=headers) as response:
|
||||
if response.status != 200:
|
||||
# Step 3: Get detailed version info using the version_id
|
||||
success, version = await downloader.make_request(
|
||||
'GET',
|
||||
f"{self.base_url}/model-versions/{target_version_id}",
|
||||
use_auth=True
|
||||
)
|
||||
if not success:
|
||||
return None
|
||||
|
||||
version = await response.json()
|
||||
|
||||
# Step 4: Enrich version_info with model data
|
||||
# Add description and tags from model data
|
||||
version['model']['description'] = data.get("description")
|
||||
@@ -266,6 +208,11 @@ class CivitaiClient:
|
||||
version['creator'] = data.get("creator")
|
||||
|
||||
return version
|
||||
|
||||
# Case 3: Neither model_id nor version_id provided
|
||||
else:
|
||||
logger.error("Either model_id or version_id must be provided")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model version: {e}")
|
||||
@@ -283,30 +230,29 @@ class CivitaiClient:
|
||||
- An error message if there was an error, or None on success
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
downloader = await get_downloader()
|
||||
url = f"{self.base_url}/model-versions/{version_id}"
|
||||
headers = self._get_request_headers()
|
||||
|
||||
logger.debug(f"Resolving DNS for model version info: {url}")
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
logger.debug(f"Successfully fetched model version info for: {version_id}")
|
||||
return await response.json(), None
|
||||
|
||||
# Handle specific error cases
|
||||
if response.status == 404:
|
||||
# Try to parse the error message
|
||||
try:
|
||||
error_data = await response.json()
|
||||
error_msg = error_data.get('error', f"Model not found (status 404)")
|
||||
logger.warning(f"Model version not found: {version_id} - {error_msg}")
|
||||
return None, error_msg
|
||||
except:
|
||||
return None, "Model not found (status 404)"
|
||||
|
||||
# Other error cases
|
||||
logger.error(f"Failed to fetch model info for {version_id} (status {response.status})")
|
||||
return None, f"Failed to fetch model info (status {response.status})"
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
url,
|
||||
use_auth=True
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.debug(f"Successfully fetched model version info for: {version_id}")
|
||||
return result, None
|
||||
|
||||
# Handle specific error cases
|
||||
if "404" in str(result):
|
||||
error_msg = f"Model not found (status 404)"
|
||||
logger.warning(f"Model version not found: {version_id} - {error_msg}")
|
||||
return None, error_msg
|
||||
|
||||
# Other error cases
|
||||
logger.error(f"Failed to fetch model info for {version_id}: {result}")
|
||||
return None, str(result)
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching model version info: {e}"
|
||||
logger.error(error_msg)
|
||||
@@ -321,78 +267,50 @@ class CivitaiClient:
|
||||
Returns:
|
||||
Tuple[Optional[Dict], int]: A tuple containing:
|
||||
- A dictionary with model metadata or None if not found
|
||||
- The HTTP status code from the request
|
||||
- The HTTP status code from the request (0 for exceptions)
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
downloader = await get_downloader()
|
||||
url = f"{self.base_url}/models/{model_id}"
|
||||
|
||||
async with session.get(url, headers=headers) as response:
|
||||
status_code = response.status
|
||||
|
||||
if status_code != 200:
|
||||
logger.warning(f"Failed to fetch model metadata: Status {status_code}")
|
||||
return None, status_code
|
||||
|
||||
data = await response.json()
|
||||
|
||||
# Extract relevant metadata
|
||||
metadata = {
|
||||
"description": data.get("description") or "No model description available",
|
||||
"tags": data.get("tags", []),
|
||||
"creator": {
|
||||
"username": data.get("creator", {}).get("username"),
|
||||
"image": data.get("creator", {}).get("image")
|
||||
}
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
url,
|
||||
use_auth=True
|
||||
)
|
||||
|
||||
if not success:
|
||||
# Try to extract status code from error message
|
||||
status_code = 0
|
||||
if "404" in str(result):
|
||||
status_code = 404
|
||||
elif "401" in str(result):
|
||||
status_code = 401
|
||||
elif "403" in str(result):
|
||||
status_code = 403
|
||||
logger.warning(f"Failed to fetch model metadata: {result}")
|
||||
return None, status_code
|
||||
|
||||
# Extract relevant metadata
|
||||
metadata = {
|
||||
"description": result.get("description") or "No model description available",
|
||||
"tags": result.get("tags", []),
|
||||
"creator": {
|
||||
"username": result.get("creator", {}).get("username"),
|
||||
"image": result.get("creator", {}).get("image")
|
||||
}
|
||||
|
||||
if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
|
||||
return metadata, status_code
|
||||
else:
|
||||
logger.warning(f"No metadata found for model {model_id}")
|
||||
return None, status_code
|
||||
}
|
||||
|
||||
if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
|
||||
return metadata, 200
|
||||
else:
|
||||
logger.warning(f"No metadata found for model {model_id}")
|
||||
return None, 200
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching model metadata: {e}", exc_info=True)
|
||||
return None, 0
|
||||
|
||||
# Keep old method for backward compatibility, delegating to the new one
|
||||
async def get_model_description(self, model_id: str) -> Optional[str]:
|
||||
"""Fetch the model description from Civitai API (Legacy method)"""
|
||||
metadata, _ = await self.get_model_metadata(model_id)
|
||||
return metadata.get("description") if metadata else None
|
||||
|
||||
async def close(self):
|
||||
"""Close the session if it exists"""
|
||||
if self._session is not None:
|
||||
await self._session.close()
|
||||
self._session = None
|
||||
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
if not session:
|
||||
return None
|
||||
|
||||
version_info = await session.get(f"{self.base_url}/model-versions/{model_version_id}")
|
||||
|
||||
if not version_info or not version_info.json().get('files'):
|
||||
return None
|
||||
|
||||
# Get hash from the first file
|
||||
for file_info in version_info.json().get('files', []):
|
||||
if file_info.get('hashes', {}).get('SHA256'):
|
||||
# Convert hash to lowercase to standardize
|
||||
hash_value = file_info['hashes']['SHA256'].lower()
|
||||
return hash_value
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting hash from Civitai: {e}")
|
||||
return None
|
||||
|
||||
async def get_image_info(self, image_id: str) -> Optional[Dict]:
|
||||
"""Fetch image information from Civitai API
|
||||
|
||||
@@ -403,22 +321,25 @@ class CivitaiClient:
|
||||
Optional[Dict]: The image data or None if not found
|
||||
"""
|
||||
try:
|
||||
session = await self._ensure_fresh_session()
|
||||
headers = self._get_request_headers()
|
||||
downloader = await get_downloader()
|
||||
url = f"{self.base_url}/images?imageId={image_id}&nsfw=X"
|
||||
|
||||
logger.debug(f"Fetching image info for ID: {image_id}")
|
||||
async with session.get(url, headers=headers) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
if data and "items" in data and len(data["items"]) > 0:
|
||||
logger.debug(f"Successfully fetched image info for ID: {image_id}")
|
||||
return data["items"][0]
|
||||
logger.warning(f"No image found with ID: {image_id}")
|
||||
return None
|
||||
|
||||
logger.error(f"Failed to fetch image info for ID: {image_id} (status {response.status})")
|
||||
success, result = await downloader.make_request(
|
||||
'GET',
|
||||
url,
|
||||
use_auth=True
|
||||
)
|
||||
|
||||
if success:
|
||||
if result and "items" in result and len(result["items"]) > 0:
|
||||
logger.debug(f"Successfully fetched image info for ID: {image_id}")
|
||||
return result["items"][0]
|
||||
logger.warning(f"No image found with ID: {image_id}")
|
||||
return None
|
||||
|
||||
logger.error(f"Failed to fetch image info for ID: {image_id}: {result}")
|
||||
return None
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching image info: {e}"
|
||||
logger.error(error_msg)
|
||||
|
||||
@@ -10,6 +10,8 @@ from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .service_registry import ServiceRegistry
|
||||
from .settings_manager import settings
|
||||
from .metadata_service import get_default_metadata_provider
|
||||
from .downloader import get_downloader
|
||||
|
||||
# Download to temporary file first
|
||||
import tempfile
|
||||
@@ -34,17 +36,10 @@ class DownloadManager:
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
self._civitai_client = None # Will be lazily initialized
|
||||
# Add download management
|
||||
self._active_downloads = OrderedDict() # download_id -> download_info
|
||||
self._download_semaphore = asyncio.Semaphore(5) # Limit concurrent downloads
|
||||
self._download_tasks = {} # download_id -> asyncio.Task
|
||||
|
||||
async def _get_civitai_client(self):
|
||||
"""Lazily initialize CivitaiClient from registry"""
|
||||
if self._civitai_client is None:
|
||||
self._civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
return self._civitai_client
|
||||
|
||||
async def _get_lora_scanner(self):
|
||||
"""Get the lora scanner from registry"""
|
||||
@@ -54,24 +49,29 @@ class DownloadManager:
|
||||
"""Get the checkpoint scanner from registry"""
|
||||
return await ServiceRegistry.get_checkpoint_scanner()
|
||||
|
||||
async def download_from_civitai(self, model_id: int, model_version_id: int,
|
||||
async def download_from_civitai(self, model_id: int = None, model_version_id: int = None,
|
||||
save_dir: str = None, relative_path: str = '',
|
||||
progress_callback=None, use_default_paths: bool = False,
|
||||
download_id: str = None) -> Dict:
|
||||
download_id: str = None, source: str = None) -> Dict:
|
||||
"""Download model from Civitai with task tracking and concurrency control
|
||||
|
||||
Args:
|
||||
model_id: Civitai model ID
|
||||
model_version_id: Civitai model version ID
|
||||
model_id: Civitai model ID (optional if model_version_id is provided)
|
||||
model_version_id: Civitai model version ID (optional if model_id is provided)
|
||||
save_dir: Directory to save the model
|
||||
relative_path: Relative path within save_dir
|
||||
progress_callback: Callback function for progress updates
|
||||
use_default_paths: Flag to use default paths
|
||||
download_id: Unique identifier for this download task
|
||||
source: Optional source parameter to specify metadata provider
|
||||
|
||||
Returns:
|
||||
Dict with download result
|
||||
"""
|
||||
# Validate that at least one identifier is provided
|
||||
if not model_id and not model_version_id:
|
||||
return {'success': False, 'error': 'Either model_id or model_version_id must be provided'}
|
||||
|
||||
# Use provided download_id or generate new one
|
||||
task_id = download_id or str(uuid.uuid4())
|
||||
|
||||
@@ -87,7 +87,7 @@ class DownloadManager:
|
||||
download_task = asyncio.create_task(
|
||||
self._download_with_semaphore(
|
||||
task_id, model_id, model_version_id, save_dir,
|
||||
relative_path, progress_callback, use_default_paths
|
||||
relative_path, progress_callback, use_default_paths, source
|
||||
)
|
||||
)
|
||||
|
||||
@@ -108,7 +108,8 @@ class DownloadManager:
|
||||
|
||||
async def _download_with_semaphore(self, task_id: str, model_id: int, model_version_id: int,
|
||||
save_dir: str, relative_path: str,
|
||||
progress_callback=None, use_default_paths: bool = False):
|
||||
progress_callback=None, use_default_paths: bool = False,
|
||||
source: str = None):
|
||||
"""Execute download with semaphore to limit concurrency"""
|
||||
# Update status to waiting
|
||||
if task_id in self._active_downloads:
|
||||
@@ -138,7 +139,7 @@ class DownloadManager:
|
||||
result = await self._execute_original_download(
|
||||
model_id, model_version_id, save_dir,
|
||||
relative_path, tracking_callback, use_default_paths,
|
||||
task_id
|
||||
task_id, source
|
||||
)
|
||||
|
||||
# Update status based on result
|
||||
@@ -173,7 +174,7 @@ class DownloadManager:
|
||||
|
||||
async def _execute_original_download(self, model_id, model_version_id, save_dir,
|
||||
relative_path, progress_callback, use_default_paths,
|
||||
download_id=None):
|
||||
download_id=None, source=None):
|
||||
"""Wrapper for original download_from_civitai implementation"""
|
||||
try:
|
||||
# Check if model version already exists in library
|
||||
@@ -181,20 +182,29 @@ class DownloadManager:
|
||||
# Check both scanners
|
||||
lora_scanner = await self._get_lora_scanner()
|
||||
checkpoint_scanner = await self._get_checkpoint_scanner()
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
|
||||
# Check lora scanner first
|
||||
if await lora_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
if await lora_scanner.check_model_version_exists(model_version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in lora library'}
|
||||
|
||||
# Check checkpoint scanner
|
||||
if await checkpoint_scanner.check_model_version_exists(model_id, model_version_id):
|
||||
if await checkpoint_scanner.check_model_version_exists(model_version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in checkpoint library'}
|
||||
|
||||
# Check embedding scanner
|
||||
if await embedding_scanner.check_model_version_exists(model_version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in embedding library'}
|
||||
|
||||
# Get civitai client
|
||||
civitai_client = await self._get_civitai_client()
|
||||
# Get metadata provider based on source parameter
|
||||
if source == 'civarchive':
|
||||
from .metadata_service import get_metadata_provider
|
||||
metadata_provider = await get_metadata_provider('civarchive')
|
||||
else:
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Get version info based on the provided identifier
|
||||
version_info = await civitai_client.get_model_version(model_id, model_version_id)
|
||||
version_info = await metadata_provider.get_model_version(model_id, model_version_id)
|
||||
|
||||
if not version_info:
|
||||
return {'success': False, 'error': 'Failed to fetch model metadata'}
|
||||
@@ -211,23 +221,22 @@ class DownloadManager:
|
||||
|
||||
# Case 2: model_version_id was None, check after getting version_info
|
||||
if model_version_id is None:
|
||||
version_model_id = version_info.get('modelId')
|
||||
version_id = version_info.get('id')
|
||||
|
||||
if model_type == 'lora':
|
||||
# Check lora scanner
|
||||
lora_scanner = await self._get_lora_scanner()
|
||||
if await lora_scanner.check_model_version_exists(version_model_id, version_id):
|
||||
if await lora_scanner.check_model_version_exists(version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in lora library'}
|
||||
elif model_type == 'checkpoint':
|
||||
# Check checkpoint scanner
|
||||
checkpoint_scanner = await self._get_checkpoint_scanner()
|
||||
if await checkpoint_scanner.check_model_version_exists(version_model_id, version_id):
|
||||
if await checkpoint_scanner.check_model_version_exists(version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in checkpoint library'}
|
||||
elif model_type == 'embedding':
|
||||
# Embeddings are not checked in scanners, but we can still check if it exists
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
if await embedding_scanner.check_model_version_exists(version_model_id, version_id):
|
||||
if await embedding_scanner.check_model_version_exists(version_id):
|
||||
return {'success': False, 'error': 'Model version already exists in embedding library'}
|
||||
|
||||
# Handle use_default_paths
|
||||
@@ -250,7 +259,7 @@ class DownloadManager:
|
||||
save_dir = default_path
|
||||
|
||||
# Calculate relative path using template
|
||||
relative_path = self._calculate_relative_path(version_info)
|
||||
relative_path = self._calculate_relative_path(version_info, model_type)
|
||||
|
||||
# Update save directory with relative path if provided
|
||||
if relative_path:
|
||||
@@ -266,9 +275,9 @@ class DownloadManager:
|
||||
from datetime import datetime
|
||||
date_obj = datetime.fromisoformat(early_access_date.replace('Z', '+00:00'))
|
||||
formatted_date = date_obj.strftime('%Y-%m-%d')
|
||||
early_access_msg = f"This model requires early access payment (until {formatted_date}). "
|
||||
early_access_msg = f"This model requires payment (until {formatted_date}). "
|
||||
except:
|
||||
early_access_msg = "This model requires early access payment. "
|
||||
early_access_msg = "This model requires payment. "
|
||||
|
||||
early_access_msg += "Please ensure you have purchased early access and are logged in to Civitai."
|
||||
logger.warning(f"Early access model detected: {version_info.get('name', 'Unknown')}")
|
||||
@@ -285,6 +294,8 @@ class DownloadManager:
|
||||
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
|
||||
if not file_info:
|
||||
return {'success': False, 'error': 'No primary file found in metadata'}
|
||||
if not file_info.get('downloadUrl'):
|
||||
return {'success': False, 'error': 'No download URL found for primary file'}
|
||||
|
||||
# 3. Prepare download
|
||||
file_name = file_info['name']
|
||||
@@ -313,6 +324,10 @@ class DownloadManager:
|
||||
download_id=download_id
|
||||
)
|
||||
|
||||
# If early_access_msg exists and download failed, replace error message
|
||||
if 'early_access_msg' in locals() and not result.get('success', False):
|
||||
result['error'] = early_access_msg
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
@@ -323,17 +338,18 @@ class DownloadManager:
|
||||
return {'success': False, 'error': f"Early access restriction: {str(e)}. Please ensure you have purchased early access and are logged in to Civitai."}
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
def _calculate_relative_path(self, version_info: Dict) -> str:
|
||||
def _calculate_relative_path(self, version_info: Dict, model_type: str = 'lora') -> str:
|
||||
"""Calculate relative path using template from settings
|
||||
|
||||
Args:
|
||||
version_info: Version info from Civitai API
|
||||
model_type: Type of model ('lora', 'checkpoint', 'embedding')
|
||||
|
||||
Returns:
|
||||
Relative path string
|
||||
"""
|
||||
# Get path template from settings, default to '{base_model}/{first_tag}'
|
||||
path_template = settings.get('download_path_template', '{base_model}/{first_tag}')
|
||||
# Get path template from settings for specific model type
|
||||
path_template = settings.get_download_path_template(model_type)
|
||||
|
||||
# If template is empty, return empty path (flat structure)
|
||||
if not path_template:
|
||||
@@ -342,6 +358,13 @@ class DownloadManager:
|
||||
# Get base model name
|
||||
base_model = version_info.get('baseModel', '')
|
||||
|
||||
# Get author from creator data
|
||||
creator_info = version_info.get('creator')
|
||||
if creator_info and isinstance(creator_info, dict):
|
||||
author = creator_info.get('username') or 'Anonymous'
|
||||
else:
|
||||
author = 'Anonymous'
|
||||
|
||||
# Apply mapping if available
|
||||
base_model_mappings = settings.get('base_model_path_mappings', {})
|
||||
mapped_base_model = base_model_mappings.get(base_model, base_model)
|
||||
@@ -364,22 +387,49 @@ class DownloadManager:
|
||||
formatted_path = path_template
|
||||
formatted_path = formatted_path.replace('{base_model}', mapped_base_model)
|
||||
formatted_path = formatted_path.replace('{first_tag}', first_tag)
|
||||
formatted_path = formatted_path.replace('{author}', author)
|
||||
|
||||
return formatted_path
|
||||
|
||||
async def _execute_download(self, download_url: str, save_dir: str,
|
||||
metadata, version_info: Dict,
|
||||
relative_path: str, progress_callback=None,
|
||||
model_type: str = "lora", download_id: str = None) -> Dict:
|
||||
metadata, version_info: Dict,
|
||||
relative_path: str, progress_callback=None,
|
||||
model_type: str = "lora", download_id: str = None) -> Dict:
|
||||
"""Execute the actual download process including preview images and model files"""
|
||||
try:
|
||||
civitai_client = await self._get_civitai_client()
|
||||
save_path = metadata.file_path
|
||||
# Extract original filename details
|
||||
original_filename = os.path.basename(metadata.file_path)
|
||||
base_name, extension = os.path.splitext(original_filename)
|
||||
|
||||
# Check for filename conflicts and generate unique filename if needed
|
||||
# Use the hash from metadata for conflict resolution
|
||||
def hash_provider():
|
||||
return metadata.sha256
|
||||
|
||||
unique_filename = metadata.generate_unique_filename(
|
||||
save_dir,
|
||||
base_name,
|
||||
extension,
|
||||
hash_provider=hash_provider
|
||||
)
|
||||
|
||||
# Update paths if filename changed
|
||||
if unique_filename != original_filename:
|
||||
logger.info(f"Filename conflict detected. Changing '{original_filename}' to '{unique_filename}'")
|
||||
save_path = os.path.join(save_dir, unique_filename)
|
||||
# Update metadata with new file path and name
|
||||
metadata.file_path = save_path.replace(os.sep, '/')
|
||||
metadata.file_name = os.path.splitext(unique_filename)[0]
|
||||
else:
|
||||
save_path = metadata.file_path
|
||||
|
||||
part_path = save_path + '.part'
|
||||
metadata_path = os.path.splitext(save_path)[0] + '.metadata.json'
|
||||
|
||||
# Store file path in active_downloads for potential cleanup
|
||||
# Store file paths in active_downloads for potential cleanup
|
||||
if download_id and download_id in self._active_downloads:
|
||||
self._active_downloads[download_id]['file_path'] = save_path
|
||||
self._active_downloads[download_id]['part_path'] = part_path
|
||||
|
||||
# Download preview image if available
|
||||
images = version_info.get('images', [])
|
||||
@@ -396,8 +446,14 @@ class DownloadManager:
|
||||
preview_ext = '.mp4'
|
||||
preview_path = os.path.splitext(save_path)[0] + preview_ext
|
||||
|
||||
# Download video directly
|
||||
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
|
||||
# Download video directly using downloader
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.download_file(
|
||||
images[0]['url'],
|
||||
preview_path,
|
||||
use_auth=False # Preview images typically don't need auth
|
||||
)
|
||||
if success:
|
||||
metadata.preview_url = preview_path.replace(os.sep, '/')
|
||||
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
|
||||
else:
|
||||
@@ -405,8 +461,16 @@ class DownloadManager:
|
||||
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
# Download the original image to temp path
|
||||
if await civitai_client.download_preview_image(images[0]['url'], temp_path):
|
||||
# Download the original image to temp path using downloader
|
||||
downloader = await get_downloader()
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
images[0]['url'],
|
||||
use_auth=False
|
||||
)
|
||||
if success:
|
||||
# Save to temp file
|
||||
with open(temp_path, 'wb') as f:
|
||||
f.write(content)
|
||||
# Optimize and convert to WebP
|
||||
preview_path = os.path.splitext(save_path)[0] + '.webp'
|
||||
|
||||
@@ -437,26 +501,41 @@ class DownloadManager:
|
||||
if progress_callback:
|
||||
await progress_callback(3) # 3% progress after preview download
|
||||
|
||||
# Download model file with progress tracking
|
||||
success, result = await civitai_client._download_file(
|
||||
# Download model file with progress tracking using downloader
|
||||
downloader = await get_downloader()
|
||||
# Determine if the download URL is from Civitai
|
||||
use_auth = download_url.startswith("https://civitai.com/api/download/")
|
||||
success, result = await downloader.download_file(
|
||||
download_url,
|
||||
save_dir,
|
||||
os.path.basename(save_path),
|
||||
progress_callback=lambda p: self._handle_download_progress(p, progress_callback)
|
||||
save_path, # Use full path instead of separate dir and filename
|
||||
progress_callback=lambda p: self._handle_download_progress(p, progress_callback),
|
||||
use_auth=use_auth # Only use authentication for Civitai downloads
|
||||
)
|
||||
|
||||
if not success:
|
||||
# Clean up files on failure
|
||||
for path in [save_path, metadata_path, metadata.preview_url]:
|
||||
# Clean up files on failure, but preserve .part file for resume
|
||||
cleanup_files = [metadata_path]
|
||||
if metadata.preview_url and os.path.exists(metadata.preview_url):
|
||||
cleanup_files.append(metadata.preview_url)
|
||||
|
||||
for path in cleanup_files:
|
||||
if path and os.path.exists(path):
|
||||
os.remove(path)
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cleanup file {path}: {e}")
|
||||
|
||||
# Log but don't remove .part file to allow resume
|
||||
if os.path.exists(part_path):
|
||||
logger.info(f"Preserving partial download for resume: {part_path}")
|
||||
|
||||
return {'success': False, 'error': result}
|
||||
|
||||
# 4. Update file information (size and modified time)
|
||||
metadata.update_file_info(save_path)
|
||||
|
||||
# 5. Final metadata update
|
||||
await MetadataManager.save_metadata(save_path, metadata, True)
|
||||
await MetadataManager.save_metadata(save_path, metadata)
|
||||
|
||||
# 6. Update cache based on model type
|
||||
if model_type == "checkpoint":
|
||||
@@ -485,10 +564,18 @@ class DownloadManager:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in _execute_download: {e}", exc_info=True)
|
||||
# Clean up partial downloads
|
||||
for path in [save_path, metadata_path]:
|
||||
# Clean up partial downloads except .part file
|
||||
cleanup_files = [metadata_path]
|
||||
if hasattr(metadata, 'preview_url') and metadata.preview_url and os.path.exists(metadata.preview_url):
|
||||
cleanup_files.append(metadata.preview_url)
|
||||
|
||||
for path in cleanup_files:
|
||||
if path and os.path.exists(path):
|
||||
os.remove(path)
|
||||
try:
|
||||
os.remove(path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cleanup file {path}: {e}")
|
||||
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
async def _handle_download_progress(self, file_progress: float, progress_callback):
|
||||
@@ -530,35 +617,48 @@ class DownloadManager:
|
||||
except (asyncio.CancelledError, asyncio.TimeoutError):
|
||||
pass
|
||||
|
||||
# Clean up partial downloads
|
||||
# Clean up ALL files including .part when user cancels
|
||||
download_info = self._active_downloads.get(download_id)
|
||||
if download_info and 'file_path' in download_info:
|
||||
# Delete the partial file
|
||||
file_path = download_info['file_path']
|
||||
if os.path.exists(file_path):
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
logger.debug(f"Deleted partial download: {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting partial file: {e}")
|
||||
if download_info:
|
||||
# Delete the main file
|
||||
if 'file_path' in download_info:
|
||||
file_path = download_info['file_path']
|
||||
if os.path.exists(file_path):
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
logger.debug(f"Deleted cancelled download: {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting file: {e}")
|
||||
|
||||
# Delete the .part file (only on user cancellation)
|
||||
if 'part_path' in download_info:
|
||||
part_path = download_info['part_path']
|
||||
if os.path.exists(part_path):
|
||||
try:
|
||||
os.unlink(part_path)
|
||||
logger.debug(f"Deleted partial download: {part_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting part file: {e}")
|
||||
|
||||
# Delete metadata file if exists
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
os.unlink(metadata_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting metadata file: {e}")
|
||||
|
||||
# Delete preview file if exists (.webp or .mp4)
|
||||
for preview_ext in ['.webp', '.mp4']:
|
||||
preview_path = os.path.splitext(file_path)[0] + preview_ext
|
||||
if os.path.exists(preview_path):
|
||||
if 'file_path' in download_info:
|
||||
file_path = download_info['file_path']
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
os.unlink(preview_path)
|
||||
logger.debug(f"Deleted preview file: {preview_path}")
|
||||
os.unlink(metadata_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting preview file: {e}")
|
||||
logger.error(f"Error deleting metadata file: {e}")
|
||||
|
||||
# Delete preview file if exists (.webp or .mp4)
|
||||
for preview_ext in ['.webp', '.mp4']:
|
||||
preview_path = os.path.splitext(file_path)[0] + preview_ext
|
||||
if os.path.exists(preview_path):
|
||||
try:
|
||||
os.unlink(preview_path)
|
||||
logger.debug(f"Deleted preview file: {preview_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting preview file: {e}")
|
||||
|
||||
return {'success': True, 'message': 'Download cancelled successfully'}
|
||||
except Exception as e:
|
||||
|
||||
539
py/services/downloader.py
Normal file
539
py/services/downloader.py
Normal file
@@ -0,0 +1,539 @@
|
||||
"""
|
||||
Unified download manager for all HTTP/HTTPS downloads in the application.
|
||||
|
||||
This module provides a centralized download service with:
|
||||
- Singleton pattern for global session management
|
||||
- Support for authenticated downloads (e.g., CivitAI API key)
|
||||
- Resumable downloads with automatic retry
|
||||
- Progress tracking and callbacks
|
||||
- Optimized connection pooling and timeouts
|
||||
- Unified error handling and logging
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import aiohttp
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Tuple, Callable, Union
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Downloader:
|
||||
"""Unified downloader for all HTTP/HTTPS downloads in the application."""
|
||||
|
||||
_instance = None
|
||||
_lock = asyncio.Lock()
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of Downloader"""
|
||||
async with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the downloader with optimal settings"""
|
||||
# Check if already initialized for singleton pattern
|
||||
if hasattr(self, '_initialized'):
|
||||
return
|
||||
self._initialized = True
|
||||
|
||||
# Session management
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
self._proxy_url = None # Store proxy URL for current session
|
||||
|
||||
# Configuration
|
||||
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better throughput
|
||||
self.max_retries = 5
|
||||
self.base_delay = 2.0 # Base delay for exponential backoff
|
||||
self.session_timeout = 300 # 5 minutes
|
||||
|
||||
# Default headers
|
||||
self.default_headers = {
|
||||
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
|
||||
}
|
||||
|
||||
@property
|
||||
async def session(self) -> aiohttp.ClientSession:
|
||||
"""Get or create the global aiohttp session with optimized settings"""
|
||||
if self._session is None or self._should_refresh_session():
|
||||
await self._create_session()
|
||||
return self._session
|
||||
|
||||
@property
|
||||
def proxy_url(self) -> Optional[str]:
|
||||
"""Get the current proxy URL (initialize if needed)"""
|
||||
if not hasattr(self, '_proxy_url'):
|
||||
self._proxy_url = None
|
||||
return self._proxy_url
|
||||
|
||||
def _should_refresh_session(self) -> bool:
|
||||
"""Check if session should be refreshed"""
|
||||
if self._session is None:
|
||||
return True
|
||||
|
||||
if not hasattr(self, '_session_created_at') or self._session_created_at is None:
|
||||
return True
|
||||
|
||||
# Refresh if session is older than timeout
|
||||
if (datetime.now() - self._session_created_at).total_seconds() > self.session_timeout:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def _create_session(self):
|
||||
"""Create a new aiohttp session with optimized settings"""
|
||||
# Close existing session if any
|
||||
if self._session is not None:
|
||||
await self._session.close()
|
||||
|
||||
# Check for app-level proxy settings
|
||||
proxy_url = None
|
||||
if settings.get('proxy_enabled', False):
|
||||
proxy_host = settings.get('proxy_host', '').strip()
|
||||
proxy_port = settings.get('proxy_port', '').strip()
|
||||
proxy_type = settings.get('proxy_type', 'http').lower()
|
||||
proxy_username = settings.get('proxy_username', '').strip()
|
||||
proxy_password = settings.get('proxy_password', '').strip()
|
||||
|
||||
if proxy_host and proxy_port:
|
||||
# Build proxy URL
|
||||
if proxy_username and proxy_password:
|
||||
proxy_url = f"{proxy_type}://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"
|
||||
else:
|
||||
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
|
||||
|
||||
logger.debug(f"Using app-level proxy: {proxy_type}://{proxy_host}:{proxy_port}")
|
||||
logger.debug("Proxy mode: app-level proxy is active.")
|
||||
else:
|
||||
logger.debug("Proxy mode: system-level proxy (trust_env) will be used if configured in environment.")
|
||||
# Optimize TCP connection parameters
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=8, # Concurrent connections
|
||||
ttl_dns_cache=300, # DNS cache timeout
|
||||
force_close=False, # Keep connections for reuse
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
|
||||
# Configure timeout parameters
|
||||
timeout = aiohttp.ClientTimeout(
|
||||
total=None, # No total timeout for large downloads
|
||||
connect=60, # Connection timeout
|
||||
sock_read=300 # 5 minute socket read timeout
|
||||
)
|
||||
|
||||
self._session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=proxy_url is None, # Only use system proxy if no app-level proxy is set
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# Store proxy URL for use in requests
|
||||
self._proxy_url = proxy_url
|
||||
self._session_created_at = datetime.now()
|
||||
|
||||
logger.debug("Created new HTTP session with proxy settings. App-level proxy: %s, System-level proxy (trust_env): %s", bool(proxy_url), proxy_url is None)
|
||||
|
||||
def _get_auth_headers(self, use_auth: bool = False) -> Dict[str, str]:
|
||||
"""Get headers with optional authentication"""
|
||||
headers = self.default_headers.copy()
|
||||
|
||||
if use_auth:
|
||||
# Add CivitAI API key if available
|
||||
api_key = settings.get('civitai_api_key')
|
||||
if api_key:
|
||||
headers['Authorization'] = f'Bearer {api_key}'
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
return headers
|
||||
|
||||
async def download_file(
|
||||
self,
|
||||
url: str,
|
||||
save_path: str,
|
||||
progress_callback: Optional[Callable[[float], None]] = None,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
allow_resume: bool = True
|
||||
) -> Tuple[bool, str]:
|
||||
"""
|
||||
Download a file with resumable downloads and retry mechanism
|
||||
|
||||
Args:
|
||||
url: Download URL
|
||||
save_path: Full path where the file should be saved
|
||||
progress_callback: Optional callback for progress updates (0-100)
|
||||
use_auth: Whether to include authentication headers (e.g., CivitAI API key)
|
||||
custom_headers: Additional headers to include in request
|
||||
allow_resume: Whether to support resumable downloads
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str]: (success, save_path or error message)
|
||||
"""
|
||||
retry_count = 0
|
||||
part_path = save_path + '.part' if allow_resume else save_path
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
# Get existing file size for resume
|
||||
resume_offset = 0
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
resume_offset = os.path.getsize(part_path)
|
||||
logger.info(f"Resuming download from offset {resume_offset} bytes")
|
||||
|
||||
total_size = 0
|
||||
|
||||
while retry_count <= self.max_retries:
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[download_file] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[download_file] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Add Range header for resume if we have partial data
|
||||
request_headers = headers.copy()
|
||||
if allow_resume and resume_offset > 0:
|
||||
request_headers['Range'] = f'bytes={resume_offset}-'
|
||||
|
||||
# Disable compression for better chunked downloads
|
||||
request_headers['Accept-Encoding'] = 'identity'
|
||||
|
||||
logger.debug(f"Download attempt {retry_count + 1}/{self.max_retries + 1} from: {url}")
|
||||
if resume_offset > 0:
|
||||
logger.debug(f"Requesting range from byte {resume_offset}")
|
||||
|
||||
async with session.get(url, headers=request_headers, allow_redirects=True, proxy=self.proxy_url) as response:
|
||||
# Handle different response codes
|
||||
if response.status == 200:
|
||||
# Full content response
|
||||
if resume_offset > 0:
|
||||
# Server doesn't support ranges, restart from beginning
|
||||
logger.warning("Server doesn't support range requests, restarting download")
|
||||
resume_offset = 0
|
||||
if os.path.exists(part_path):
|
||||
os.remove(part_path)
|
||||
elif response.status == 206:
|
||||
# Partial content response (resume successful)
|
||||
content_range = response.headers.get('Content-Range')
|
||||
if content_range:
|
||||
# Parse total size from Content-Range header (e.g., "bytes 1024-2047/2048")
|
||||
range_parts = content_range.split('/')
|
||||
if len(range_parts) == 2:
|
||||
total_size = int(range_parts[1])
|
||||
logger.info(f"Successfully resumed download from byte {resume_offset}")
|
||||
elif response.status == 416:
|
||||
# Range not satisfiable - file might be complete or corrupted
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
part_size = os.path.getsize(part_path)
|
||||
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
|
||||
# Try to get actual file size
|
||||
head_response = await session.head(url, headers=headers, proxy=self.proxy_url)
|
||||
if head_response.status == 200:
|
||||
actual_size = int(head_response.headers.get('content-length', 0))
|
||||
if part_size == actual_size:
|
||||
# File is complete, just rename it
|
||||
if allow_resume:
|
||||
os.rename(part_path, save_path)
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
return True, save_path
|
||||
# Remove corrupted part file and restart
|
||||
os.remove(part_path)
|
||||
resume_offset = 0
|
||||
continue
|
||||
elif response.status == 401:
|
||||
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
|
||||
return False, "Invalid or missing API key, or early access restriction."
|
||||
elif response.status == 403:
|
||||
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
|
||||
return False, "Access forbidden: You don't have permission to download this file."
|
||||
elif response.status == 404:
|
||||
logger.warning(f"Resource not found: {url} (Status 404)")
|
||||
return False, "File not found - the download link may be invalid or expired."
|
||||
else:
|
||||
logger.error(f"Download failed for {url} with status {response.status}")
|
||||
return False, f"Download failed with status {response.status}"
|
||||
|
||||
# Get total file size for progress calculation (if not set from Content-Range)
|
||||
if total_size == 0:
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
if response.status == 206:
|
||||
# For partial content, add the offset to get total file size
|
||||
total_size += resume_offset
|
||||
|
||||
current_size = resume_offset
|
||||
last_progress_report_time = datetime.now()
|
||||
|
||||
# Ensure directory exists
|
||||
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
||||
|
||||
# Stream download to file with progress updates
|
||||
loop = asyncio.get_running_loop()
|
||||
mode = 'ab' if (allow_resume and resume_offset > 0) else 'wb'
|
||||
with open(part_path, mode) as f:
|
||||
async for chunk in response.content.iter_chunked(self.chunk_size):
|
||||
if chunk:
|
||||
# Run blocking file write in executor
|
||||
await loop.run_in_executor(None, f.write, chunk)
|
||||
current_size += len(chunk)
|
||||
|
||||
# Limit progress update frequency to reduce overhead
|
||||
now = datetime.now()
|
||||
time_diff = (now - last_progress_report_time).total_seconds()
|
||||
|
||||
if progress_callback and total_size and time_diff >= 1.0:
|
||||
progress = (current_size / total_size) * 100
|
||||
await progress_callback(progress)
|
||||
last_progress_report_time = now
|
||||
|
||||
# Download completed successfully
|
||||
# Verify file size if total_size was provided
|
||||
final_size = os.path.getsize(part_path)
|
||||
if total_size > 0 and final_size != total_size:
|
||||
logger.warning(f"File size mismatch. Expected: {total_size}, Got: {final_size}")
|
||||
# Don't treat this as fatal error, continue anyway
|
||||
|
||||
# Atomically rename .part to final file (only if using resume)
|
||||
if allow_resume and part_path != save_path:
|
||||
max_rename_attempts = 5
|
||||
rename_attempt = 0
|
||||
rename_success = False
|
||||
|
||||
while rename_attempt < max_rename_attempts and not rename_success:
|
||||
try:
|
||||
# If the destination file exists, remove it first (Windows safe)
|
||||
if os.path.exists(save_path):
|
||||
os.remove(save_path)
|
||||
|
||||
os.rename(part_path, save_path)
|
||||
rename_success = True
|
||||
except PermissionError as e:
|
||||
rename_attempt += 1
|
||||
if rename_attempt < max_rename_attempts:
|
||||
logger.info(f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})")
|
||||
await asyncio.sleep(2)
|
||||
else:
|
||||
logger.error(f"Failed to rename file after {max_rename_attempts} attempts: {e}")
|
||||
return False, f"Failed to finalize download: {str(e)}"
|
||||
|
||||
# Ensure 100% progress is reported
|
||||
if progress_callback:
|
||||
await progress_callback(100)
|
||||
|
||||
return True, save_path
|
||||
|
||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError,
|
||||
aiohttp.ServerDisconnectedError, asyncio.TimeoutError) as e:
|
||||
retry_count += 1
|
||||
logger.warning(f"Network error during download (attempt {retry_count}/{self.max_retries + 1}): {e}")
|
||||
|
||||
if retry_count <= self.max_retries:
|
||||
# Calculate delay with exponential backoff
|
||||
delay = self.base_delay * (2 ** (retry_count - 1))
|
||||
logger.info(f"Retrying in {delay} seconds...")
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Update resume offset for next attempt
|
||||
if allow_resume and os.path.exists(part_path):
|
||||
resume_offset = os.path.getsize(part_path)
|
||||
logger.info(f"Will resume from byte {resume_offset}")
|
||||
|
||||
# Refresh session to get new connection
|
||||
await self._create_session()
|
||||
continue
|
||||
else:
|
||||
logger.error(f"Max retries exceeded for download: {e}")
|
||||
return False, f"Network error after {self.max_retries + 1} attempts: {str(e)}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected download error: {e}")
|
||||
return False, str(e)
|
||||
|
||||
return False, f"Download failed after {self.max_retries + 1} attempts"
|
||||
|
||||
async def download_to_memory(
|
||||
self,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
return_headers: bool = False
|
||||
) -> Tuple[bool, Union[bytes, str], Optional[Dict]]:
|
||||
"""
|
||||
Download a file to memory (for small files like preview images)
|
||||
|
||||
Args:
|
||||
url: Download URL
|
||||
use_auth: Whether to include authentication headers
|
||||
custom_headers: Additional headers to include in request
|
||||
return_headers: Whether to return response headers along with content
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[bytes, str], Optional[Dict]]: (success, content or error message, response headers if requested)
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[download_to_memory] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[download_to_memory] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
async with session.get(url, headers=headers, proxy=self.proxy_url) as response:
|
||||
if response.status == 200:
|
||||
content = await response.read()
|
||||
if return_headers:
|
||||
return True, content, dict(response.headers)
|
||||
else:
|
||||
return True, content, None
|
||||
elif response.status == 401:
|
||||
error_msg = "Unauthorized access - invalid or missing API key"
|
||||
return False, error_msg, None
|
||||
elif response.status == 403:
|
||||
error_msg = "Access forbidden"
|
||||
return False, error_msg, None
|
||||
elif response.status == 404:
|
||||
error_msg = "File not found"
|
||||
return False, error_msg, None
|
||||
else:
|
||||
error_msg = f"Download failed with status {response.status}"
|
||||
return False, error_msg, None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading to memory from {url}: {e}")
|
||||
return False, str(e), None
|
||||
|
||||
async def get_response_headers(
|
||||
self,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None
|
||||
) -> Tuple[bool, Union[Dict, str]]:
|
||||
"""
|
||||
Get response headers without downloading the full content
|
||||
|
||||
Args:
|
||||
url: URL to check
|
||||
use_auth: Whether to include authentication headers
|
||||
custom_headers: Additional headers to include in request
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[Dict, str]]: (success, headers dict or error message)
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[get_response_headers] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[get_response_headers] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
async with session.head(url, headers=headers, proxy=self.proxy_url) as response:
|
||||
if response.status == 200:
|
||||
return True, dict(response.headers)
|
||||
else:
|
||||
return False, f"Head request failed with status {response.status}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting headers from {url}: {e}")
|
||||
return False, str(e)
|
||||
|
||||
async def make_request(
|
||||
self,
|
||||
method: str,
|
||||
url: str,
|
||||
use_auth: bool = False,
|
||||
custom_headers: Optional[Dict[str, str]] = None,
|
||||
**kwargs
|
||||
) -> Tuple[bool, Union[Dict, str]]:
|
||||
"""
|
||||
Make a generic HTTP request and return JSON response
|
||||
|
||||
Args:
|
||||
method: HTTP method (GET, POST, etc.)
|
||||
url: Request URL
|
||||
use_auth: Whether to include authentication headers
|
||||
custom_headers: Additional headers to include in request
|
||||
**kwargs: Additional arguments for aiohttp request
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Union[Dict, str]]: (success, response data or error message)
|
||||
"""
|
||||
try:
|
||||
session = await self.session
|
||||
# Debug log for proxy mode at request time
|
||||
if self.proxy_url:
|
||||
logger.debug(f"[make_request] Using app-level proxy: {self.proxy_url}")
|
||||
else:
|
||||
logger.debug("[make_request] Using system-level proxy (trust_env) if configured.")
|
||||
|
||||
# Prepare headers
|
||||
headers = self._get_auth_headers(use_auth)
|
||||
if custom_headers:
|
||||
headers.update(custom_headers)
|
||||
|
||||
# Add proxy to kwargs if not already present
|
||||
if 'proxy' not in kwargs:
|
||||
kwargs['proxy'] = self.proxy_url
|
||||
|
||||
async with session.request(method, url, headers=headers, **kwargs) as response:
|
||||
if response.status == 200:
|
||||
# Try to parse as JSON, fall back to text
|
||||
try:
|
||||
data = await response.json()
|
||||
return True, data
|
||||
except:
|
||||
text = await response.text()
|
||||
return True, text
|
||||
elif response.status == 401:
|
||||
return False, "Unauthorized access - invalid or missing API key"
|
||||
elif response.status == 403:
|
||||
return False, "Access forbidden"
|
||||
elif response.status == 404:
|
||||
return False, "Resource not found"
|
||||
else:
|
||||
return False, f"Request failed with status {response.status}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error making {method} request to {url}: {e}")
|
||||
return False, str(e)
|
||||
|
||||
async def close(self):
|
||||
"""Close the HTTP session"""
|
||||
if self._session is not None:
|
||||
await self._session.close()
|
||||
self._session = None
|
||||
self._session_created_at = None
|
||||
self._proxy_url = None
|
||||
logger.debug("Closed HTTP session")
|
||||
|
||||
async def refresh_session(self):
|
||||
"""Force refresh the HTTP session (useful when proxy settings change)"""
|
||||
await self._create_session()
|
||||
logger.info("HTTP session refreshed due to settings change")
|
||||
|
||||
|
||||
# Global instance accessor
|
||||
async def get_downloader() -> Downloader:
|
||||
"""Get the global downloader instance"""
|
||||
return await Downloader.get_instance()
|
||||
@@ -34,12 +34,11 @@ class EmbeddingService(BaseModelService):
|
||||
"file_size": embedding_data.get("size", 0),
|
||||
"modified": embedding_data.get("modified", ""),
|
||||
"tags": embedding_data.get("tags", []),
|
||||
"modelDescription": embedding_data.get("modelDescription", ""),
|
||||
"from_civitai": embedding_data.get("from_civitai", True),
|
||||
"notes": embedding_data.get("notes", ""),
|
||||
"model_type": embedding_data.get("model_type", "embedding"),
|
||||
"favorite": embedding_data.get("favorite", False),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(embedding_data.get("civitai", {}))
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(embedding_data.get("civitai", {}), minimal=True)
|
||||
}
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
|
||||
@@ -34,12 +34,11 @@ class LoraService(BaseModelService):
|
||||
"file_size": lora_data.get("size", 0),
|
||||
"modified": lora_data.get("modified", ""),
|
||||
"tags": lora_data.get("tags", []),
|
||||
"modelDescription": lora_data.get("modelDescription", ""),
|
||||
"from_civitai": lora_data.get("from_civitai", True),
|
||||
"usage_tips": lora_data.get("usage_tips", ""),
|
||||
"notes": lora_data.get("notes", ""),
|
||||
"favorite": lora_data.get("favorite", False),
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(lora_data.get("civitai", {}))
|
||||
"civitai": ModelRouteUtils.filter_civitai_data(lora_data.get("civitai", {}), minimal=True)
|
||||
}
|
||||
|
||||
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:
|
||||
@@ -147,16 +146,6 @@ class LoraService(BaseModelService):
|
||||
|
||||
return letters
|
||||
|
||||
async def get_lora_notes(self, lora_name: str) -> Optional[str]:
|
||||
"""Get notes for a specific LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
return lora.get('notes', '')
|
||||
|
||||
return None
|
||||
|
||||
async def get_lora_trigger_words(self, lora_name: str) -> List[str]:
|
||||
"""Get trigger words for a specific LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
@@ -168,41 +157,22 @@ class LoraService(BaseModelService):
|
||||
|
||||
return []
|
||||
|
||||
async def get_lora_preview_url(self, lora_name: str) -> Optional[str]:
|
||||
"""Get the static preview URL for a LoRA file"""
|
||||
async def get_lora_usage_tips_by_relative_path(self, relative_path: str) -> Optional[str]:
|
||||
"""Get usage tips for a LoRA by its relative path"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
preview_url = lora.get('preview_url')
|
||||
if preview_url:
|
||||
return config.get_preview_static_url(preview_url)
|
||||
file_path = lora.get('file_path', '')
|
||||
if file_path:
|
||||
# Convert to forward slashes and extract relative path
|
||||
file_path_normalized = file_path.replace('\\', '/')
|
||||
relative_path = relative_path.replace('\\', '/')
|
||||
# Find the relative path part by looking for the relative_path in the full path
|
||||
if file_path_normalized.endswith(relative_path) or relative_path in file_path_normalized:
|
||||
return lora.get('usage_tips', '')
|
||||
|
||||
return None
|
||||
|
||||
async def get_lora_civitai_url(self, lora_name: str) -> Dict[str, Optional[str]]:
|
||||
"""Get the Civitai URL for a LoRA file"""
|
||||
cache = await self.scanner.get_cached_data()
|
||||
|
||||
for lora in cache.raw_data:
|
||||
if lora['file_name'] == lora_name:
|
||||
civitai_data = lora.get('civitai', {})
|
||||
model_id = civitai_data.get('modelId')
|
||||
version_id = civitai_data.get('id')
|
||||
|
||||
if model_id:
|
||||
civitai_url = f"https://civitai.com/models/{model_id}"
|
||||
if version_id:
|
||||
civitai_url += f"?modelVersionId={version_id}"
|
||||
|
||||
return {
|
||||
'civitai_url': civitai_url,
|
||||
'model_id': str(model_id),
|
||||
'version_id': str(version_id) if version_id else None
|
||||
}
|
||||
|
||||
return {'civitai_url': None, 'model_id': None, 'version_id': None}
|
||||
|
||||
def find_duplicate_hashes(self) -> Dict:
|
||||
"""Find LoRAs with duplicate SHA256 hashes"""
|
||||
return self.scanner._hash_index.get_duplicate_hashes()
|
||||
|
||||
151
py/services/metadata_archive_manager.py
Normal file
151
py/services/metadata_archive_manager.py
Normal file
@@ -0,0 +1,151 @@
|
||||
import zipfile
|
||||
import logging
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MetadataArchiveManager:
|
||||
"""Manages downloading and extracting Civitai metadata archive database"""
|
||||
|
||||
DOWNLOAD_URLS = [
|
||||
"https://github.com/willmiao/civitai-metadata-archive-db/releases/download/db-2025-08-08/civitai.zip",
|
||||
"https://huggingface.co/datasets/willmiao/civitai-metadata-archive-db/blob/main/civitai.zip"
|
||||
]
|
||||
|
||||
def __init__(self, base_path: str):
|
||||
"""Initialize with base path where files will be stored"""
|
||||
self.base_path = Path(base_path)
|
||||
self.civitai_folder = self.base_path / "civitai"
|
||||
self.archive_path = self.base_path / "civitai.zip"
|
||||
self.db_path = self.civitai_folder / "civitai.sqlite"
|
||||
|
||||
def is_database_available(self) -> bool:
|
||||
"""Check if the SQLite database is available and valid"""
|
||||
return self.db_path.exists() and self.db_path.stat().st_size > 0
|
||||
|
||||
def get_database_path(self) -> Optional[str]:
|
||||
"""Get the path to the SQLite database if available"""
|
||||
if self.is_database_available():
|
||||
return str(self.db_path)
|
||||
return None
|
||||
|
||||
async def download_and_extract_database(self, progress_callback=None) -> bool:
|
||||
"""Download and extract the metadata archive database
|
||||
|
||||
Args:
|
||||
progress_callback: Optional callback function to report progress
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Create directories if they don't exist
|
||||
self.base_path.mkdir(parents=True, exist_ok=True)
|
||||
self.civitai_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Download the archive
|
||||
if not await self._download_archive(progress_callback):
|
||||
return False
|
||||
|
||||
# Extract the archive
|
||||
if not await self._extract_archive(progress_callback):
|
||||
return False
|
||||
|
||||
# Clean up the archive file
|
||||
if self.archive_path.exists():
|
||||
self.archive_path.unlink()
|
||||
|
||||
logger.info(f"Successfully downloaded and extracted metadata database to {self.db_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error downloading and extracting metadata database: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
async def _download_archive(self, progress_callback=None) -> bool:
|
||||
"""Download the zip archive from one of the available URLs"""
|
||||
downloader = await get_downloader()
|
||||
|
||||
for url in self.DOWNLOAD_URLS:
|
||||
try:
|
||||
logger.info(f"Attempting to download from {url}")
|
||||
|
||||
if progress_callback:
|
||||
progress_callback("download", f"Downloading from {url}")
|
||||
|
||||
# Custom progress callback to report download progress
|
||||
async def download_progress(progress):
|
||||
if progress_callback:
|
||||
progress_callback("download", f"Downloading archive... {progress:.1f}%")
|
||||
|
||||
success, result = await downloader.download_file(
|
||||
url=url,
|
||||
save_path=str(self.archive_path),
|
||||
progress_callback=download_progress,
|
||||
use_auth=False, # Public download, no auth needed
|
||||
allow_resume=True
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f"Successfully downloaded archive from {url}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"Failed to download from {url}: {result}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error downloading from {url}: {e}")
|
||||
continue
|
||||
|
||||
logger.error("Failed to download archive from any URL")
|
||||
return False
|
||||
|
||||
async def _extract_archive(self, progress_callback=None) -> bool:
|
||||
"""Extract the zip archive to the civitai folder"""
|
||||
try:
|
||||
if progress_callback:
|
||||
progress_callback("extract", "Extracting archive...")
|
||||
|
||||
# Run extraction in thread pool to avoid blocking
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, self._extract_zip_sync)
|
||||
|
||||
if progress_callback:
|
||||
progress_callback("extract", "Extraction completed")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting archive: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def _extract_zip_sync(self):
|
||||
"""Synchronous zip extraction (runs in thread pool)"""
|
||||
with zipfile.ZipFile(self.archive_path, 'r') as archive:
|
||||
archive.extractall(path=self.base_path)
|
||||
|
||||
async def remove_database(self) -> bool:
|
||||
"""Remove the metadata database and folder"""
|
||||
try:
|
||||
if self.civitai_folder.exists():
|
||||
# Remove all files in the civitai folder
|
||||
for file_path in self.civitai_folder.iterdir():
|
||||
if file_path.is_file():
|
||||
file_path.unlink()
|
||||
|
||||
# Remove the folder itself
|
||||
self.civitai_folder.rmdir()
|
||||
|
||||
# Also remove the archive file if it exists
|
||||
if self.archive_path.exists():
|
||||
self.archive_path.unlink()
|
||||
|
||||
logger.info("Successfully removed metadata database")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing metadata database: {e}", exc_info=True)
|
||||
return False
|
||||
117
py/services/metadata_service.py
Normal file
117
py/services/metadata_service.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import os
|
||||
import logging
|
||||
from .model_metadata_provider import (
|
||||
ModelMetadataProviderManager,
|
||||
SQLiteModelMetadataProvider,
|
||||
CivitaiModelMetadataProvider,
|
||||
FallbackMetadataProvider
|
||||
)
|
||||
from .settings_manager import settings
|
||||
from .metadata_archive_manager import MetadataArchiveManager
|
||||
from .service_registry import ServiceRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
async def initialize_metadata_providers():
|
||||
"""Initialize and configure all metadata providers based on settings"""
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
# Clear existing providers to allow reinitialization
|
||||
provider_manager.providers.clear()
|
||||
provider_manager.default_provider = None
|
||||
|
||||
# Get settings
|
||||
enable_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
providers = []
|
||||
|
||||
# Initialize archive database provider if enabled
|
||||
if enable_archive_db:
|
||||
try:
|
||||
# Initialize archive manager
|
||||
base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
archive_manager = MetadataArchiveManager(base_path)
|
||||
|
||||
db_path = archive_manager.get_database_path()
|
||||
if db_path and os.path.exists(db_path):
|
||||
sqlite_provider = SQLiteModelMetadataProvider(db_path)
|
||||
provider_manager.register_provider('sqlite', sqlite_provider)
|
||||
providers.append(('sqlite', sqlite_provider))
|
||||
logger.info(f"SQLite metadata provider registered with database: {db_path}")
|
||||
else:
|
||||
logger.warning("Metadata archive database is enabled but database file not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize SQLite metadata provider: {e}")
|
||||
|
||||
# Initialize Civitai API provider (always available as fallback)
|
||||
try:
|
||||
civitai_client = await ServiceRegistry.get_civitai_client()
|
||||
civitai_provider = CivitaiModelMetadataProvider(civitai_client)
|
||||
provider_manager.register_provider('civitai_api', civitai_provider)
|
||||
providers.append(('civitai_api', civitai_provider))
|
||||
logger.debug("Civitai API metadata provider registered")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize Civitai API metadata provider: {e}")
|
||||
|
||||
# Register CivArchive provider, but do NOT add to fallback providers
|
||||
try:
|
||||
from .model_metadata_provider import CivArchiveModelMetadataProvider
|
||||
civarchive_provider = CivArchiveModelMetadataProvider()
|
||||
provider_manager.register_provider('civarchive', civarchive_provider)
|
||||
logger.debug("CivArchive metadata provider registered (not included in fallback)")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize CivArchive metadata provider: {e}")
|
||||
|
||||
# Set up fallback provider based on available providers
|
||||
if len(providers) > 1:
|
||||
# Always use Civitai API first, then Archive DB
|
||||
ordered_providers = []
|
||||
ordered_providers.extend([p[1] for p in providers if p[0] == 'civitai_api'])
|
||||
ordered_providers.extend([p[1] for p in providers if p[0] == 'sqlite'])
|
||||
|
||||
if ordered_providers:
|
||||
fallback_provider = FallbackMetadataProvider(ordered_providers)
|
||||
provider_manager.register_provider('fallback', fallback_provider, is_default=True)
|
||||
logger.info(f"Fallback metadata provider registered with {len(ordered_providers)} providers, Civitai API first")
|
||||
elif len(providers) == 1:
|
||||
# Only one provider available, set it as default
|
||||
provider_name, provider = providers[0]
|
||||
provider_manager.register_provider(provider_name, provider, is_default=True)
|
||||
logger.debug(f"Single metadata provider registered as default: {provider_name}")
|
||||
else:
|
||||
logger.warning("No metadata providers available - this may cause metadata lookup failures")
|
||||
|
||||
return provider_manager
|
||||
|
||||
async def update_metadata_providers():
|
||||
"""Update metadata providers based on current settings"""
|
||||
try:
|
||||
# Get current settings
|
||||
enable_archive_db = settings.get('enable_metadata_archive_db', False)
|
||||
|
||||
# Reinitialize all providers with new settings
|
||||
provider_manager = await initialize_metadata_providers()
|
||||
|
||||
logger.info(f"Updated metadata providers, archive_db enabled: {enable_archive_db}")
|
||||
return provider_manager
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata providers: {e}")
|
||||
return await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
async def get_metadata_archive_manager():
|
||||
"""Get metadata archive manager instance"""
|
||||
base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
return MetadataArchiveManager(base_path)
|
||||
|
||||
async def get_metadata_provider(provider_name: str = None):
|
||||
"""Get a specific metadata provider or default provider"""
|
||||
provider_manager = await ModelMetadataProviderManager.get_instance()
|
||||
|
||||
if provider_name:
|
||||
return provider_manager._get_provider(provider_name)
|
||||
|
||||
return provider_manager._get_provider()
|
||||
|
||||
async def get_default_metadata_provider():
|
||||
"""Get the default metadata provider (fallback or single provider)"""
|
||||
return await get_metadata_provider()
|
||||
463
py/services/model_file_service.py
Normal file
463
py/services/model_file_service.py
Normal file
@@ -0,0 +1,463 @@
|
||||
import asyncio
|
||||
import os
|
||||
import logging
|
||||
from typing import List, Dict, Optional, Any, Set
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from ..utils.utils import calculate_relative_path_for_model, remove_empty_dirs
|
||||
from ..utils.constants import AUTO_ORGANIZE_BATCH_SIZE
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProgressCallback(ABC):
|
||||
"""Abstract callback interface for progress reporting"""
|
||||
|
||||
@abstractmethod
|
||||
async def on_progress(self, progress_data: Dict[str, Any]) -> None:
|
||||
"""Called when progress is updated"""
|
||||
pass
|
||||
|
||||
|
||||
class AutoOrganizeResult:
|
||||
"""Result object for auto-organize operations"""
|
||||
|
||||
def __init__(self):
|
||||
self.total: int = 0
|
||||
self.processed: int = 0
|
||||
self.success_count: int = 0
|
||||
self.failure_count: int = 0
|
||||
self.skipped_count: int = 0
|
||||
self.operation_type: str = 'unknown'
|
||||
self.cleanup_counts: Dict[str, int] = {}
|
||||
self.results: List[Dict[str, Any]] = []
|
||||
self.results_truncated: bool = False
|
||||
self.sample_results: List[Dict[str, Any]] = []
|
||||
self.is_flat_structure: bool = False
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert result to dictionary"""
|
||||
result = {
|
||||
'success': True,
|
||||
'message': f'Auto-organize {self.operation_type} completed: {self.success_count} moved, {self.skipped_count} skipped, {self.failure_count} failed out of {self.total} total',
|
||||
'summary': {
|
||||
'total': self.total,
|
||||
'success': self.success_count,
|
||||
'skipped': self.skipped_count,
|
||||
'failures': self.failure_count,
|
||||
'organization_type': 'flat' if self.is_flat_structure else 'structured',
|
||||
'cleaned_dirs': self.cleanup_counts,
|
||||
'operation_type': self.operation_type
|
||||
}
|
||||
}
|
||||
|
||||
if self.results_truncated:
|
||||
result['results_truncated'] = True
|
||||
result['sample_results'] = self.sample_results
|
||||
else:
|
||||
result['results'] = self.results
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class ModelFileService:
|
||||
"""Service for handling model file operations and organization"""
|
||||
|
||||
def __init__(self, scanner, model_type: str):
|
||||
"""Initialize the service
|
||||
|
||||
Args:
|
||||
scanner: Model scanner instance
|
||||
model_type: Type of model (e.g., 'lora', 'checkpoint')
|
||||
"""
|
||||
self.scanner = scanner
|
||||
self.model_type = model_type
|
||||
|
||||
def get_model_roots(self) -> List[str]:
|
||||
"""Get model root directories"""
|
||||
return self.scanner.get_model_roots()
|
||||
|
||||
async def auto_organize_models(
|
||||
self,
|
||||
file_paths: Optional[List[str]] = None,
|
||||
progress_callback: Optional[ProgressCallback] = None
|
||||
) -> AutoOrganizeResult:
|
||||
"""Auto-organize models based on current settings
|
||||
|
||||
Args:
|
||||
file_paths: Optional list of specific file paths to organize.
|
||||
If None, organizes all models.
|
||||
progress_callback: Optional callback for progress updates
|
||||
|
||||
Returns:
|
||||
AutoOrganizeResult object with operation results
|
||||
"""
|
||||
result = AutoOrganizeResult()
|
||||
source_directories: Set[str] = set()
|
||||
|
||||
try:
|
||||
# Get all models from cache
|
||||
cache = await self.scanner.get_cached_data()
|
||||
all_models = cache.raw_data
|
||||
|
||||
# Filter models if specific file paths are provided
|
||||
if file_paths:
|
||||
all_models = [model for model in all_models if model.get('file_path') in file_paths]
|
||||
result.operation_type = 'bulk'
|
||||
else:
|
||||
result.operation_type = 'all'
|
||||
|
||||
# Get model roots for this scanner
|
||||
model_roots = self.get_model_roots()
|
||||
if not model_roots:
|
||||
raise ValueError('No model roots configured')
|
||||
|
||||
# Check if flat structure is configured for this model type
|
||||
path_template = settings.get_download_path_template(self.model_type)
|
||||
result.is_flat_structure = not path_template
|
||||
|
||||
# Initialize tracking
|
||||
result.total = len(all_models)
|
||||
|
||||
# Send initial progress
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'started',
|
||||
'total': result.total,
|
||||
'processed': 0,
|
||||
'success': 0,
|
||||
'failures': 0,
|
||||
'skipped': 0,
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
# Process models in batches
|
||||
await self._process_models_in_batches(
|
||||
all_models,
|
||||
model_roots,
|
||||
result,
|
||||
progress_callback,
|
||||
source_directories # Pass the set to track source directories
|
||||
)
|
||||
|
||||
# Send cleanup progress
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'cleaning',
|
||||
'total': result.total,
|
||||
'processed': result.processed,
|
||||
'success': result.success_count,
|
||||
'failures': result.failure_count,
|
||||
'skipped': result.skipped_count,
|
||||
'message': 'Cleaning up empty directories...',
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
# Clean up empty directories - only in affected directories for bulk operations
|
||||
cleanup_paths = list(source_directories) if result.operation_type == 'bulk' else model_roots
|
||||
result.cleanup_counts = await self._cleanup_empty_directories(cleanup_paths)
|
||||
|
||||
# Send completion message
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'completed',
|
||||
'total': result.total,
|
||||
'processed': result.processed,
|
||||
'success': result.success_count,
|
||||
'failures': result.failure_count,
|
||||
'skipped': result.skipped_count,
|
||||
'cleanup': result.cleanup_counts,
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in auto_organize_models: {e}", exc_info=True)
|
||||
|
||||
# Send error message
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'error',
|
||||
'error': str(e),
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
raise e
|
||||
|
||||
async def _process_models_in_batches(
|
||||
self,
|
||||
all_models: List[Dict[str, Any]],
|
||||
model_roots: List[str],
|
||||
result: AutoOrganizeResult,
|
||||
progress_callback: Optional[ProgressCallback],
|
||||
source_directories: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""Process models in batches to avoid overwhelming the system"""
|
||||
|
||||
for i in range(0, result.total, AUTO_ORGANIZE_BATCH_SIZE):
|
||||
batch = all_models[i:i + AUTO_ORGANIZE_BATCH_SIZE]
|
||||
|
||||
for model in batch:
|
||||
await self._process_single_model(model, model_roots, result, source_directories)
|
||||
result.processed += 1
|
||||
|
||||
# Send progress update after each batch
|
||||
if progress_callback:
|
||||
await progress_callback.on_progress({
|
||||
'type': 'auto_organize_progress',
|
||||
'status': 'processing',
|
||||
'total': result.total,
|
||||
'processed': result.processed,
|
||||
'success': result.success_count,
|
||||
'failures': result.failure_count,
|
||||
'skipped': result.skipped_count,
|
||||
'operation_type': result.operation_type
|
||||
})
|
||||
|
||||
# Small delay between batches
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
async def _process_single_model(
|
||||
self,
|
||||
model: Dict[str, Any],
|
||||
model_roots: List[str],
|
||||
result: AutoOrganizeResult,
|
||||
source_directories: Optional[Set[str]] = None
|
||||
) -> None:
|
||||
"""Process a single model for organization"""
|
||||
try:
|
||||
file_path = model.get('file_path')
|
||||
model_name = model.get('model_name', 'Unknown')
|
||||
|
||||
if not file_path:
|
||||
self._add_result(result, model_name, False, "No file path found")
|
||||
result.failure_count += 1
|
||||
return
|
||||
|
||||
# Find which model root this file belongs to
|
||||
current_root = self._find_model_root(file_path, model_roots)
|
||||
if not current_root:
|
||||
self._add_result(result, model_name, False,
|
||||
"Model file not found in any configured root directory")
|
||||
result.failure_count += 1
|
||||
return
|
||||
|
||||
# Determine target directory
|
||||
target_dir = await self._calculate_target_directory(
|
||||
model, current_root, result.is_flat_structure
|
||||
)
|
||||
|
||||
if target_dir is None:
|
||||
self._add_result(result, model_name, False,
|
||||
"Skipped - insufficient metadata for organization")
|
||||
result.skipped_count += 1
|
||||
return
|
||||
|
||||
current_dir = os.path.dirname(file_path)
|
||||
|
||||
# Skip if already in correct location
|
||||
if current_dir.replace(os.sep, '/') == target_dir.replace(os.sep, '/'):
|
||||
result.skipped_count += 1
|
||||
return
|
||||
|
||||
# Check for conflicts
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file_path = os.path.join(target_dir, file_name)
|
||||
|
||||
if os.path.exists(target_file_path):
|
||||
self._add_result(result, model_name, False,
|
||||
f"Target file already exists: {target_file_path}")
|
||||
result.failure_count += 1
|
||||
return
|
||||
|
||||
# Store the source directory for potential cleanup
|
||||
if source_directories is not None:
|
||||
source_directories.add(current_dir)
|
||||
|
||||
# Perform the move
|
||||
success = await self.scanner.move_model(file_path, target_dir)
|
||||
|
||||
if success:
|
||||
result.success_count += 1
|
||||
else:
|
||||
self._add_result(result, model_name, False, "Failed to move model")
|
||||
result.failure_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing model {model.get('model_name', 'Unknown')}: {e}", exc_info=True)
|
||||
self._add_result(result, model.get('model_name', 'Unknown'), False, f"Error: {str(e)}")
|
||||
result.failure_count += 1
|
||||
|
||||
def _find_model_root(self, file_path: str, model_roots: List[str]) -> Optional[str]:
|
||||
"""Find which model root the file belongs to"""
|
||||
for root in model_roots:
|
||||
# Normalize paths for comparison
|
||||
normalized_root = os.path.normpath(root).replace(os.sep, '/')
|
||||
normalized_file = os.path.normpath(file_path).replace(os.sep, '/')
|
||||
|
||||
if normalized_file.startswith(normalized_root):
|
||||
return root
|
||||
return None
|
||||
|
||||
async def _calculate_target_directory(
|
||||
self,
|
||||
model: Dict[str, Any],
|
||||
current_root: str,
|
||||
is_flat_structure: bool
|
||||
) -> Optional[str]:
|
||||
"""Calculate the target directory for a model"""
|
||||
if is_flat_structure:
|
||||
file_path = model.get('file_path')
|
||||
current_dir = os.path.dirname(file_path)
|
||||
|
||||
# Check if already in root directory
|
||||
if os.path.normpath(current_dir) == os.path.normpath(current_root):
|
||||
return None # Signal to skip
|
||||
|
||||
return current_root
|
||||
else:
|
||||
# Calculate new relative path based on settings
|
||||
new_relative_path = calculate_relative_path_for_model(model, self.model_type)
|
||||
|
||||
if not new_relative_path:
|
||||
return None # Signal to skip
|
||||
|
||||
return os.path.join(current_root, new_relative_path).replace(os.sep, '/')
|
||||
|
||||
def _add_result(
|
||||
self,
|
||||
result: AutoOrganizeResult,
|
||||
model_name: str,
|
||||
success: bool,
|
||||
message: str
|
||||
) -> None:
|
||||
"""Add a result entry if under the limit"""
|
||||
if len(result.results) < 100: # Limit detailed results
|
||||
result.results.append({
|
||||
"model": model_name,
|
||||
"success": success,
|
||||
"message": message
|
||||
})
|
||||
elif len(result.results) == 100:
|
||||
# Mark as truncated and save sample
|
||||
result.results_truncated = True
|
||||
result.sample_results = result.results[:50]
|
||||
|
||||
async def _cleanup_empty_directories(self, paths: List[str]) -> Dict[str, int]:
|
||||
"""Clean up empty directories after organizing
|
||||
|
||||
Args:
|
||||
paths: List of paths to check for empty directories
|
||||
|
||||
Returns:
|
||||
Dictionary with counts of removed directories by root path
|
||||
"""
|
||||
cleanup_counts = {}
|
||||
for path in paths:
|
||||
removed = remove_empty_dirs(path)
|
||||
cleanup_counts[path] = removed
|
||||
return cleanup_counts
|
||||
|
||||
|
||||
class ModelMoveService:
|
||||
"""Service for handling individual model moves"""
|
||||
|
||||
def __init__(self, scanner):
|
||||
"""Initialize the service
|
||||
|
||||
Args:
|
||||
scanner: Model scanner instance
|
||||
"""
|
||||
self.scanner = scanner
|
||||
|
||||
async def move_model(self, file_path: str, target_path: str) -> Dict[str, Any]:
|
||||
"""Move a single model file
|
||||
|
||||
Args:
|
||||
file_path: Source file path
|
||||
target_path: Target directory path
|
||||
|
||||
Returns:
|
||||
Dictionary with move result
|
||||
"""
|
||||
try:
|
||||
source_dir = os.path.dirname(file_path)
|
||||
if os.path.normpath(source_dir) == os.path.normpath(target_path):
|
||||
logger.info(f"Source and target directories are the same: {source_dir}")
|
||||
return {
|
||||
'success': True,
|
||||
'message': 'Source and target directories are the same',
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': file_path
|
||||
}
|
||||
|
||||
new_file_path = await self.scanner.move_model(file_path, target_path)
|
||||
if new_file_path:
|
||||
return {
|
||||
'success': True,
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': new_file_path
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Failed to move model',
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': None
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e),
|
||||
'original_file_path': file_path,
|
||||
'new_file_path': None
|
||||
}
|
||||
|
||||
async def move_models_bulk(self, file_paths: List[str], target_path: str) -> Dict[str, Any]:
|
||||
"""Move multiple model files
|
||||
|
||||
Args:
|
||||
file_paths: List of source file paths
|
||||
target_path: Target directory path
|
||||
|
||||
Returns:
|
||||
Dictionary with bulk move results
|
||||
"""
|
||||
try:
|
||||
results = []
|
||||
|
||||
for file_path in file_paths:
|
||||
result = await self.move_model(file_path, target_path)
|
||||
results.append({
|
||||
"original_file_path": file_path,
|
||||
"new_file_path": result.get('new_file_path'),
|
||||
"success": result['success'],
|
||||
"message": result.get('message', result.get('error', 'Unknown'))
|
||||
})
|
||||
|
||||
success_count = sum(1 for r in results if r["success"])
|
||||
failure_count = len(results) - success_count
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': f'Moved {success_count} of {len(file_paths)} models',
|
||||
'results': results,
|
||||
'success_count': success_count,
|
||||
'failure_count': failure_count
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e),
|
||||
'results': [],
|
||||
'success_count': 0,
|
||||
'failure_count': len(file_paths)
|
||||
}
|
||||
@@ -31,29 +31,34 @@ class ModelHashIndex:
|
||||
if file_path not in self._duplicate_hashes.get(sha256, []):
|
||||
self._duplicate_hashes.setdefault(sha256, []).append(file_path)
|
||||
|
||||
# Track duplicates by filename
|
||||
# Track duplicates by filename - FIXED LOGIC
|
||||
if filename in self._filename_to_hash:
|
||||
old_hash = self._filename_to_hash[filename]
|
||||
if old_hash != sha256: # Different models with the same name
|
||||
old_path = self._hash_to_path.get(old_hash)
|
||||
if old_path:
|
||||
if filename not in self._duplicate_filenames:
|
||||
self._duplicate_filenames[filename] = [old_path]
|
||||
if file_path not in self._duplicate_filenames.get(filename, []):
|
||||
self._duplicate_filenames.setdefault(filename, []).append(file_path)
|
||||
existing_hash = self._filename_to_hash[filename]
|
||||
existing_path = self._hash_to_path.get(existing_hash)
|
||||
|
||||
# If this is a different file with the same filename
|
||||
if existing_path and existing_path != file_path:
|
||||
# Initialize duplicates tracking if needed
|
||||
if filename not in self._duplicate_filenames:
|
||||
self._duplicate_filenames[filename] = [existing_path]
|
||||
|
||||
# Add current file to duplicates if not already present
|
||||
if file_path not in self._duplicate_filenames[filename]:
|
||||
self._duplicate_filenames[filename].append(file_path)
|
||||
|
||||
# Remove old path mapping if hash exists
|
||||
if sha256 in self._hash_to_path:
|
||||
old_path = self._hash_to_path[sha256]
|
||||
old_filename = self._get_filename_from_path(old_path)
|
||||
if old_filename in self._filename_to_hash:
|
||||
if old_filename in self._filename_to_hash and self._filename_to_hash[old_filename] == sha256:
|
||||
del self._filename_to_hash[old_filename]
|
||||
|
||||
# Remove old hash mapping if filename exists
|
||||
# Remove old hash mapping if filename exists and points to different hash
|
||||
if filename in self._filename_to_hash:
|
||||
old_hash = self._filename_to_hash[filename]
|
||||
if old_hash in self._hash_to_path:
|
||||
del self._hash_to_path[old_hash]
|
||||
if old_hash != sha256 and old_hash in self._hash_to_path:
|
||||
# Don't delete the old hash mapping, just update filename mapping
|
||||
pass
|
||||
|
||||
# Add new mappings
|
||||
self._hash_to_path[sha256] = file_path
|
||||
@@ -199,8 +204,6 @@ class ModelHashIndex:
|
||||
|
||||
def get_hash_by_filename(self, filename: str) -> Optional[str]:
|
||||
"""Get hash for a filename without extension"""
|
||||
# Strip extension if present to make the function more flexible
|
||||
filename = os.path.splitext(filename)[0]
|
||||
return self._filename_to_hash.get(filename)
|
||||
|
||||
def clear(self) -> None:
|
||||
|
||||
519
py/services/model_metadata_provider.py
Normal file
519
py/services/model_metadata_provider.py
Normal file
@@ -0,0 +1,519 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import json
|
||||
import aiosqlite
|
||||
import logging
|
||||
import aiohttp
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import Optional, Dict, Tuple
|
||||
from .downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ModelMetadataProvider(ABC):
|
||||
"""Base abstract class for all model metadata providers"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
"""Find model by hash value"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
"""Get all versions of a model with their details"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version with additional metadata"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version metadata"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
"""Fetch model metadata (description, tags, and creator info)"""
|
||||
pass
|
||||
|
||||
class CivitaiModelMetadataProvider(ModelMetadataProvider):
|
||||
"""Provider that uses Civitai API for metadata"""
|
||||
|
||||
def __init__(self, civitai_client):
|
||||
self.client = civitai_client
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
return await self.client.get_model_by_hash(model_hash)
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
return await self.client.get_model_versions(model_id)
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
return await self.client.get_model_version(model_id, version_id)
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
return await self.client.get_model_version_info(version_id)
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
return await self.client.get_model_metadata(model_id)
|
||||
|
||||
class CivArchiveModelMetadataProvider(ModelMetadataProvider):
|
||||
"""Provider that uses CivArchive HTML page parsing for metadata"""
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
"""Not supported by CivArchive provider"""
|
||||
return None
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
"""Not supported by CivArchive provider"""
|
||||
return None
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version by parsing CivArchive HTML page"""
|
||||
if model_id is None or version_id is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Construct CivArchive URL
|
||||
url = f"https://civarchive.com/models/{model_id}?modelVersionId={version_id}"
|
||||
|
||||
downloader = await get_downloader()
|
||||
session = await downloader.session
|
||||
async with session.get(url) as response:
|
||||
if response.status != 200:
|
||||
return None
|
||||
|
||||
html_content = await response.text()
|
||||
|
||||
# Parse HTML to extract JSON data
|
||||
soup = BeautifulSoup(html_content, 'html.parser')
|
||||
script_tag = soup.find('script', {'id': '__NEXT_DATA__', 'type': 'application/json'})
|
||||
|
||||
if not script_tag:
|
||||
return None
|
||||
|
||||
# Parse JSON content
|
||||
json_data = json.loads(script_tag.string)
|
||||
model_data = json_data.get('props', {}).get('pageProps', {}).get('model')
|
||||
|
||||
if not model_data or 'version' not in model_data:
|
||||
return None
|
||||
|
||||
# Extract version data as base
|
||||
version = model_data['version'].copy()
|
||||
|
||||
# Restructure stats
|
||||
if 'downloadCount' in version and 'ratingCount' in version and 'rating' in version:
|
||||
version['stats'] = {
|
||||
'downloadCount': version.pop('downloadCount'),
|
||||
'ratingCount': version.pop('ratingCount'),
|
||||
'rating': version.pop('rating')
|
||||
}
|
||||
|
||||
# Rename trigger to trainedWords
|
||||
if 'trigger' in version:
|
||||
version['trainedWords'] = version.pop('trigger')
|
||||
|
||||
# Transform files data to expected format
|
||||
if 'files' in version:
|
||||
transformed_files = []
|
||||
for file_data in version['files']:
|
||||
# Find first available mirror (deletedAt is null)
|
||||
available_mirror = None
|
||||
for mirror in file_data.get('mirrors', []):
|
||||
if mirror.get('deletedAt') is None:
|
||||
available_mirror = mirror
|
||||
break
|
||||
|
||||
# Create transformed file entry
|
||||
transformed_file = {
|
||||
'id': file_data.get('id'),
|
||||
'sizeKB': file_data.get('sizeKB'),
|
||||
'name': available_mirror.get('filename', file_data.get('name')) if available_mirror else file_data.get('name'),
|
||||
'type': file_data.get('type'),
|
||||
'downloadUrl': available_mirror.get('url') if available_mirror else None,
|
||||
'primary': True,
|
||||
'mirrors': file_data.get('mirrors', [])
|
||||
}
|
||||
|
||||
# Transform hash format
|
||||
if 'sha256' in file_data:
|
||||
transformed_file['hashes'] = {
|
||||
'SHA256': file_data['sha256'].upper()
|
||||
}
|
||||
|
||||
transformed_files.append(transformed_file)
|
||||
|
||||
version['files'] = transformed_files
|
||||
|
||||
# Add model information
|
||||
version['model'] = {
|
||||
'name': model_data.get('name'),
|
||||
'type': model_data.get('type'),
|
||||
'nsfw': model_data.get('is_nsfw', False),
|
||||
'description': model_data.get('description'),
|
||||
'tags': model_data.get('tags', [])
|
||||
}
|
||||
|
||||
version['creator'] = {
|
||||
'username': model_data.get('username'),
|
||||
'image': ''
|
||||
}
|
||||
|
||||
# Add source identifier
|
||||
version['source'] = 'civarchive'
|
||||
version['is_deleted'] = json_data.get('query', {}).get('is_deleted', False)
|
||||
|
||||
return version
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivArchive model version {model_id}/{version_id}: {e}")
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Not supported by CivArchive provider - requires both model_id and version_id"""
|
||||
return None, "CivArchive provider requires both model_id and version_id"
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
"""Not supported by CivArchive provider"""
|
||||
return None, 404
|
||||
|
||||
class SQLiteModelMetadataProvider(ModelMetadataProvider):
|
||||
"""Provider that uses SQLite database for metadata"""
|
||||
|
||||
def __init__(self, db_path: str):
|
||||
self.db_path = db_path
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
"""Find model by hash value from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Look up in model_files table to get model_id and version_id
|
||||
query = """
|
||||
SELECT model_id, version_id
|
||||
FROM model_files
|
||||
WHERE sha256 = ?
|
||||
LIMIT 1
|
||||
"""
|
||||
db.row_factory = aiosqlite.Row
|
||||
cursor = await db.execute(query, (model_hash.upper(),))
|
||||
file_row = await cursor.fetchone()
|
||||
|
||||
if not file_row:
|
||||
return None
|
||||
|
||||
# Get version details
|
||||
model_id = file_row['model_id']
|
||||
version_id = file_row['version_id']
|
||||
|
||||
# Build response in the same format as Civitai API
|
||||
return await self._get_version_with_model_data(db, model_id, version_id)
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
"""Get all versions of a model from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# First check if model exists
|
||||
model_query = "SELECT * FROM models WHERE id = ?"
|
||||
cursor = await db.execute(model_query, (model_id,))
|
||||
model_row = await cursor.fetchone()
|
||||
|
||||
if not model_row:
|
||||
return None
|
||||
|
||||
model_data = json.loads(model_row['data'])
|
||||
model_type = model_row['type']
|
||||
|
||||
# Get all versions for this model
|
||||
versions_query = """
|
||||
SELECT id, name, base_model, data, position, published_at
|
||||
FROM model_versions
|
||||
WHERE model_id = ?
|
||||
ORDER BY position ASC
|
||||
"""
|
||||
cursor = await db.execute(versions_query, (model_id,))
|
||||
version_rows = await cursor.fetchall()
|
||||
|
||||
if not version_rows:
|
||||
return {'modelVersions': [], 'type': model_type}
|
||||
|
||||
# Format versions similar to Civitai API
|
||||
model_versions = []
|
||||
for row in version_rows:
|
||||
version_data = json.loads(row['data'])
|
||||
# Add fields from the row to ensure we have the basic fields
|
||||
version_entry = {
|
||||
'id': row['id'],
|
||||
'modelId': int(model_id),
|
||||
'name': row['name'],
|
||||
'baseModel': row['base_model'],
|
||||
'model': {
|
||||
'name': model_row['name'],
|
||||
'type': model_type,
|
||||
},
|
||||
'source': 'archive_db'
|
||||
}
|
||||
# Update with any additional data
|
||||
version_entry.update(version_data)
|
||||
model_versions.append(version_entry)
|
||||
|
||||
return {
|
||||
'modelVersions': model_versions,
|
||||
'type': model_type
|
||||
}
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
"""Get specific model version with additional metadata from SQLite database"""
|
||||
if not model_id and not version_id:
|
||||
return None
|
||||
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# Case 1: Only version_id is provided
|
||||
if model_id is None and version_id is not None:
|
||||
# First get the version info to extract model_id
|
||||
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
|
||||
cursor = await db.execute(version_query, (version_id,))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None
|
||||
|
||||
model_id = version_row['model_id']
|
||||
|
||||
# Case 2: model_id is provided but version_id is not
|
||||
elif model_id is not None and version_id is None:
|
||||
# Find the latest version
|
||||
version_query = """
|
||||
SELECT id FROM model_versions
|
||||
WHERE model_id = ?
|
||||
ORDER BY position ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
cursor = await db.execute(version_query, (model_id,))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None
|
||||
|
||||
version_id = version_row['id']
|
||||
|
||||
# Now we have both model_id and version_id, get the full data
|
||||
return await self._get_version_with_model_data(db, model_id, version_id)
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version metadata from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# Get version details
|
||||
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
|
||||
cursor = await db.execute(version_query, (version_id,))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None, "Model version not found"
|
||||
|
||||
model_id = version_row['model_id']
|
||||
|
||||
# Build complete version data with model info
|
||||
version_data = await self._get_version_with_model_data(db, model_id, version_id)
|
||||
return version_data, None
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
"""Fetch model metadata from SQLite database"""
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
db.row_factory = aiosqlite.Row
|
||||
|
||||
# Get model details
|
||||
model_query = "SELECT name, type, data, username FROM models WHERE id = ?"
|
||||
cursor = await db.execute(model_query, (model_id,))
|
||||
model_row = await cursor.fetchone()
|
||||
|
||||
if not model_row:
|
||||
return None, 404
|
||||
|
||||
# Parse data JSON
|
||||
try:
|
||||
model_data = json.loads(model_row['data'])
|
||||
|
||||
# Extract relevant metadata
|
||||
metadata = {
|
||||
"description": model_data.get("description", "No model description available"),
|
||||
"tags": model_data.get("tags", []),
|
||||
"creator": {
|
||||
"username": model_row['username'] or model_data.get("creator", {}).get("username"),
|
||||
"image": model_data.get("creator", {}).get("image")
|
||||
}
|
||||
}
|
||||
|
||||
return metadata, 200
|
||||
except json.JSONDecodeError:
|
||||
return None, 500
|
||||
|
||||
async def _get_version_with_model_data(self, db, model_id, version_id) -> Optional[Dict]:
|
||||
"""Helper to build version data with model information"""
|
||||
# Get version details
|
||||
version_query = "SELECT name, base_model, data FROM model_versions WHERE id = ? AND model_id = ?"
|
||||
cursor = await db.execute(version_query, (version_id, model_id))
|
||||
version_row = await cursor.fetchone()
|
||||
|
||||
if not version_row:
|
||||
return None
|
||||
|
||||
# Get model details
|
||||
model_query = "SELECT name, type, data, username FROM models WHERE id = ?"
|
||||
cursor = await db.execute(model_query, (model_id,))
|
||||
model_row = await cursor.fetchone()
|
||||
|
||||
if not model_row:
|
||||
return None
|
||||
|
||||
# Parse JSON data
|
||||
try:
|
||||
version_data = json.loads(version_row['data'])
|
||||
model_data = json.loads(model_row['data'])
|
||||
|
||||
# Build response
|
||||
result = {
|
||||
"id": int(version_id),
|
||||
"modelId": int(model_id),
|
||||
"name": version_row['name'],
|
||||
"baseModel": version_row['base_model'],
|
||||
"model": {
|
||||
"name": model_row['name'],
|
||||
"description": model_data.get("description"),
|
||||
"type": model_row['type'],
|
||||
"tags": model_data.get("tags", [])
|
||||
},
|
||||
"creator": {
|
||||
"username": model_row['username'] or model_data.get("creator", {}).get("username"),
|
||||
"image": model_data.get("creator", {}).get("image")
|
||||
},
|
||||
"source": "archive_db"
|
||||
}
|
||||
|
||||
# Add any additional fields from version data
|
||||
result.update(version_data)
|
||||
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
|
||||
class FallbackMetadataProvider(ModelMetadataProvider):
|
||||
"""Try providers in order, return first successful result."""
|
||||
def __init__(self, providers: list):
|
||||
self.providers = providers
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result = await provider.get_model_by_hash(model_hash)
|
||||
if result:
|
||||
return result
|
||||
except Exception:
|
||||
continue
|
||||
return None
|
||||
|
||||
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result = await provider.get_model_versions(model_id)
|
||||
if result:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_versions: {e}")
|
||||
continue
|
||||
return None
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result = await provider.get_model_version(model_id, version_id)
|
||||
if result:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_version: {e}")
|
||||
continue
|
||||
return None
|
||||
|
||||
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result, error = await provider.get_model_version_info(version_id)
|
||||
if result:
|
||||
return result, error
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_version_info: {e}")
|
||||
continue
|
||||
return None, "No provider could retrieve the data"
|
||||
|
||||
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
|
||||
for provider in self.providers:
|
||||
try:
|
||||
result, status = await provider.get_model_metadata(model_id)
|
||||
if result:
|
||||
return result, status
|
||||
except Exception as e:
|
||||
logger.debug(f"Provider failed for get_model_metadata: {e}")
|
||||
continue
|
||||
return None, 404
|
||||
|
||||
class ModelMetadataProviderManager:
|
||||
"""Manager for selecting and using model metadata providers"""
|
||||
|
||||
_instance = None
|
||||
|
||||
@classmethod
|
||||
async def get_instance(cls):
|
||||
"""Get singleton instance of ModelMetadataProviderManager"""
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
self.providers = {}
|
||||
self.default_provider = None
|
||||
|
||||
def register_provider(self, name: str, provider: ModelMetadataProvider, is_default: bool = False):
|
||||
"""Register a metadata provider"""
|
||||
self.providers[name] = provider
|
||||
if is_default or self.default_provider is None:
|
||||
self.default_provider = name
|
||||
|
||||
async def get_model_by_hash(self, model_hash: str, provider_name: str = None) -> Optional[Dict]:
|
||||
"""Find model by hash using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_by_hash(model_hash)
|
||||
|
||||
async def get_model_versions(self, model_id: str, provider_name: str = None) -> Optional[Dict]:
|
||||
"""Get model versions using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_versions(model_id)
|
||||
|
||||
async def get_model_version(self, model_id: int = None, version_id: int = None, provider_name: str = None) -> Optional[Dict]:
|
||||
"""Get specific model version using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_version(model_id, version_id)
|
||||
|
||||
async def get_model_version_info(self, version_id: str, provider_name: str = None) -> Tuple[Optional[Dict], Optional[str]]:
|
||||
"""Fetch model version info using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_version_info(version_id)
|
||||
|
||||
async def get_model_metadata(self, model_id: str, provider_name: str = None) -> Tuple[Optional[Dict], int]:
|
||||
"""Fetch model metadata using specified or default provider"""
|
||||
provider = self._get_provider(provider_name)
|
||||
return await provider.get_model_metadata(model_id)
|
||||
|
||||
def _get_provider(self, provider_name: str = None) -> ModelMetadataProvider:
|
||||
"""Get provider by name or default provider"""
|
||||
if provider_name and provider_name in self.providers:
|
||||
return self.providers[provider_name]
|
||||
|
||||
if self.default_provider is None:
|
||||
raise ValueError("No default provider set and no valid provider specified")
|
||||
|
||||
return self.providers[self.default_provider]
|
||||
@@ -8,7 +8,7 @@ from typing import List, Dict, Optional, Type, Set
|
||||
|
||||
from ..utils.models import BaseModelMetadata
|
||||
from ..config import config
|
||||
from ..utils.file_utils import find_preview_file
|
||||
from ..utils.file_utils import find_preview_file, get_preview_extension
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .model_cache import ModelCache
|
||||
from .model_hash_index import ModelHashIndex
|
||||
@@ -302,6 +302,13 @@ class ModelScanner:
|
||||
for tag in model_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Log duplicate filename warnings after building the index
|
||||
# duplicate_filenames = self._hash_index.get_duplicate_filenames()
|
||||
# if duplicate_filenames:
|
||||
# logger.warning(f"Found {len(duplicate_filenames)} filename(s) with duplicates during {self.model_type} cache build:")
|
||||
# for filename, paths in duplicate_filenames.items():
|
||||
# logger.warning(f" Duplicate filename '{filename}': {paths}")
|
||||
|
||||
# Update cache
|
||||
self._cache.raw_data = raw_data
|
||||
loop.run_until_complete(self._cache.resort())
|
||||
@@ -367,6 +374,13 @@ class ModelScanner:
|
||||
for tag in model_data['tags']:
|
||||
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
|
||||
|
||||
# Log duplicate filename warnings after building the index
|
||||
# duplicate_filenames = self._hash_index.get_duplicate_filenames()
|
||||
# if duplicate_filenames:
|
||||
# logger.warning(f"Found {len(duplicate_filenames)} filename(s) with duplicates during {self.model_type} cache build:")
|
||||
# for filename, paths in duplicate_filenames.items():
|
||||
# logger.warning(f" Duplicate filename '{filename}': {paths}")
|
||||
|
||||
# Update cache
|
||||
self._cache = ModelCache(
|
||||
raw_data=raw_data,
|
||||
@@ -569,12 +583,13 @@ class ModelScanner:
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True) and any(entry.name.endswith(ext) for ext in self.file_extensions):
|
||||
# Use original path instead of real path
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, models)
|
||||
result = await self._process_model_file(file_path, original_root)
|
||||
# Only add to models if result is not None (skip corrupted metadata)
|
||||
if result:
|
||||
models.append(result)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
# For directories, continue scanning with original path
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
@@ -583,15 +598,6 @@ class ModelScanner:
|
||||
|
||||
await scan_recursive(root_path, set())
|
||||
return models
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, models: list):
|
||||
"""Process a single file and add to results list"""
|
||||
try:
|
||||
result = await self._process_model_file(file_path, root_path)
|
||||
if result:
|
||||
models.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
|
||||
def is_initializing(self) -> bool:
|
||||
"""Check if the scanner is currently initializing"""
|
||||
@@ -613,10 +619,18 @@ class ModelScanner:
|
||||
return os.path.dirname(rel_path).replace(os.path.sep, '/')
|
||||
return ''
|
||||
|
||||
# Common methods shared between scanners
|
||||
def adjust_metadata(self, metadata, file_path, root_path):
|
||||
"""Hook for subclasses: adjust metadata during scanning"""
|
||||
return metadata
|
||||
|
||||
async def _process_model_file(self, file_path: str, root_path: str) -> Dict:
|
||||
"""Process a single model file and return its metadata"""
|
||||
metadata = await MetadataManager.load_metadata(file_path, self.model_class)
|
||||
metadata, should_skip = await MetadataManager.load_metadata(file_path, self.model_class)
|
||||
|
||||
if should_skip:
|
||||
# Metadata file exists but cannot be parsed - skip this model
|
||||
logger.warning(f"Skipping model {file_path} due to corrupted metadata file")
|
||||
return None
|
||||
|
||||
if metadata is None:
|
||||
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
|
||||
@@ -632,7 +646,7 @@ class ModelScanner:
|
||||
|
||||
metadata = self.model_class.from_civitai_info(version_info, file_info, file_path)
|
||||
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
|
||||
await MetadataManager.save_metadata(file_path, metadata, True)
|
||||
await MetadataManager.save_metadata(file_path, metadata)
|
||||
logger.debug(f"Created metadata from .civitai.info for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
|
||||
@@ -659,7 +673,7 @@ class ModelScanner:
|
||||
metadata.modelDescription = version_info['model']['description']
|
||||
|
||||
# Save the updated metadata
|
||||
await MetadataManager.save_metadata(file_path, metadata, True)
|
||||
await MetadataManager.save_metadata(file_path, metadata)
|
||||
logger.debug(f"Updated metadata with civitai info for {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error restoring civitai data from .civitai.info for {file_path}: {e}")
|
||||
@@ -667,113 +681,30 @@ class ModelScanner:
|
||||
if metadata is None:
|
||||
metadata = await self._create_default_metadata(file_path)
|
||||
|
||||
# Hook: allow subclasses to adjust metadata
|
||||
metadata = self.adjust_metadata(metadata, file_path, root_path)
|
||||
|
||||
model_data = metadata.to_dict()
|
||||
|
||||
# Skip excluded models
|
||||
if model_data.get('exclude', False):
|
||||
self._excluded_models.append(model_data['file_path'])
|
||||
return None
|
||||
|
||||
await self._fetch_missing_metadata(file_path, model_data)
|
||||
|
||||
# Check for duplicate filename before adding to hash index
|
||||
filename = os.path.splitext(os.path.basename(file_path))[0]
|
||||
existing_hash = self._hash_index.get_hash_by_filename(filename)
|
||||
if existing_hash and existing_hash != model_data.get('sha256', '').lower():
|
||||
existing_path = self._hash_index.get_path(existing_hash)
|
||||
if existing_path and existing_path != file_path:
|
||||
logger.warning(f"Duplicate filename detected: '{filename}' - files: '{existing_path}' and '{file_path}'")
|
||||
|
||||
rel_path = os.path.relpath(file_path, root_path)
|
||||
folder = os.path.dirname(rel_path)
|
||||
model_data['folder'] = folder.replace(os.path.sep, '/')
|
||||
|
||||
return model_data
|
||||
|
||||
async def _fetch_missing_metadata(self, file_path: str, model_data: Dict) -> None:
|
||||
"""Fetch missing description and tags from Civitai if needed"""
|
||||
try:
|
||||
if model_data.get('civitai_deleted', False):
|
||||
logger.debug(f"Skipping metadata fetch for {file_path}: marked as deleted on Civitai")
|
||||
return
|
||||
|
||||
needs_metadata_update = False
|
||||
model_id = None
|
||||
|
||||
if model_data.get('civitai'):
|
||||
model_id = model_data['civitai'].get('modelId')
|
||||
|
||||
if model_id:
|
||||
model_id = str(model_id)
|
||||
tags_missing = not model_data.get('tags') or len(model_data.get('tags', [])) == 0
|
||||
desc_missing = not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")
|
||||
# TODO: not for now, but later we should check if the creator is missing
|
||||
# creator_missing = not model_data.get('civitai', {}).get('creator')
|
||||
creator_missing = False
|
||||
needs_metadata_update = tags_missing or desc_missing or creator_missing
|
||||
|
||||
if needs_metadata_update and model_id:
|
||||
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
client = CivitaiClient()
|
||||
|
||||
model_metadata, status_code = await client.get_model_metadata(model_id)
|
||||
await client.close()
|
||||
|
||||
if status_code == 404:
|
||||
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
|
||||
model_data['civitai_deleted'] = True
|
||||
|
||||
await MetadataManager.save_metadata(file_path, model_data)
|
||||
|
||||
elif model_metadata:
|
||||
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
|
||||
|
||||
if model_metadata.get('tags') and (not model_data.get('tags') or len(model_data.get('tags', [])) == 0):
|
||||
model_data['tags'] = model_metadata['tags']
|
||||
|
||||
if model_metadata.get('description') and (not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")):
|
||||
model_data['modelDescription'] = model_metadata['description']
|
||||
|
||||
model_data['civitai']['creator'] = model_metadata['creator']
|
||||
|
||||
await MetadataManager.save_metadata(file_path, model_data, True)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
|
||||
|
||||
async def _scan_directory(self, root_path: str) -> List[Dict]:
|
||||
"""Base implementation for directory scanning"""
|
||||
models = []
|
||||
original_root = root_path
|
||||
|
||||
async def scan_recursive(path: str, visited_paths: set):
|
||||
try:
|
||||
real_path = os.path.realpath(path)
|
||||
if real_path in visited_paths:
|
||||
logger.debug(f"Skipping already visited path: {path}")
|
||||
return
|
||||
visited_paths.add(real_path)
|
||||
|
||||
with os.scandir(path) as it:
|
||||
entries = list(it)
|
||||
for entry in entries:
|
||||
try:
|
||||
if entry.is_file(follow_symlinks=True):
|
||||
ext = os.path.splitext(entry.name)[1].lower()
|
||||
if ext in self.file_extensions:
|
||||
file_path = entry.path.replace(os.sep, "/")
|
||||
await self._process_single_file(file_path, original_root, models)
|
||||
await asyncio.sleep(0)
|
||||
elif entry.is_dir(follow_symlinks=True):
|
||||
await scan_recursive(entry.path, visited_paths)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing entry {entry.path}: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scanning {path}: {e}")
|
||||
|
||||
await scan_recursive(root_path, set())
|
||||
return models
|
||||
|
||||
async def _process_single_file(self, file_path: str, root_path: str, models_list: list):
|
||||
"""Process a single file and add to results list"""
|
||||
try:
|
||||
result = await self._process_model_file(file_path, root_path)
|
||||
if result:
|
||||
models_list.append(result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {file_path}: {e}")
|
||||
|
||||
async def add_model_to_cache(self, metadata_dict: Dict, folder: str = '') -> bool:
|
||||
"""Add a model to the cache
|
||||
|
||||
@@ -809,8 +740,16 @@ class ModelScanner:
|
||||
logger.error(f"Error adding model to cache: {e}")
|
||||
return False
|
||||
|
||||
async def move_model(self, source_path: str, target_path: str) -> bool:
|
||||
"""Move a model and its associated files to a new location"""
|
||||
async def move_model(self, source_path: str, target_path: str) -> Optional[str]:
|
||||
"""Move a model and its associated files to a new location
|
||||
|
||||
Args:
|
||||
source_path: Original file path
|
||||
target_path: Target directory path
|
||||
|
||||
Returns:
|
||||
Optional[str]: New file path if successful, None if failed
|
||||
"""
|
||||
try:
|
||||
source_path = source_path.replace(os.sep, '/')
|
||||
target_path = target_path.replace(os.sep, '/')
|
||||
@@ -819,14 +758,28 @@ class ModelScanner:
|
||||
|
||||
if not file_ext or file_ext.lower() not in self.file_extensions:
|
||||
logger.error(f"Invalid file extension for model: {file_ext}")
|
||||
return False
|
||||
return None
|
||||
|
||||
base_name = os.path.splitext(os.path.basename(source_path))[0]
|
||||
source_dir = os.path.dirname(source_path)
|
||||
|
||||
os.makedirs(target_path, exist_ok=True)
|
||||
|
||||
target_file = os.path.join(target_path, f"{base_name}{file_ext}").replace(os.sep, '/')
|
||||
def get_source_hash():
|
||||
return self.get_hash_by_path(source_path)
|
||||
|
||||
# Check for filename conflicts and auto-rename if necessary
|
||||
from ..utils.models import BaseModelMetadata
|
||||
final_filename = BaseModelMetadata.generate_unique_filename(
|
||||
target_path, base_name, file_ext, get_source_hash
|
||||
)
|
||||
|
||||
target_file = os.path.join(target_path, final_filename).replace(os.sep, '/')
|
||||
final_base_name = os.path.splitext(final_filename)[0]
|
||||
|
||||
# Log if filename was changed due to conflict
|
||||
if final_filename != f"{base_name}{file_ext}":
|
||||
logger.info(f"Renamed {base_name}{file_ext} to {final_filename} to avoid filename conflict")
|
||||
|
||||
real_source = os.path.realpath(source_path)
|
||||
real_target = os.path.realpath(target_file)
|
||||
@@ -843,12 +796,17 @@ class ModelScanner:
|
||||
for file in os.listdir(source_dir):
|
||||
if file.startswith(base_name + ".") and file != os.path.basename(source_path):
|
||||
source_file_path = os.path.join(source_dir, file)
|
||||
# Generate new filename with the same base name as the model file
|
||||
file_suffix = file[len(base_name):] # Get the part after base_name (e.g., ".metadata.json", ".preview.png")
|
||||
new_associated_filename = f"{final_base_name}{file_suffix}"
|
||||
target_associated_path = os.path.join(target_path, new_associated_filename)
|
||||
|
||||
# Store metadata file path for special handling
|
||||
if file == f"{base_name}.metadata.json":
|
||||
source_metadata = source_file_path
|
||||
moved_metadata_path = os.path.join(target_path, file)
|
||||
moved_metadata_path = target_associated_path
|
||||
else:
|
||||
files_to_move.append((source_file_path, os.path.join(target_path, file)))
|
||||
files_to_move.append((source_file_path, target_associated_path))
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing files in {source_dir}: {e}")
|
||||
|
||||
@@ -870,11 +828,11 @@ class ModelScanner:
|
||||
|
||||
await self.update_single_model_cache(source_path, target_file, metadata)
|
||||
|
||||
return True
|
||||
return target_file
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error moving model: {e}", exc_info=True)
|
||||
return False
|
||||
return None
|
||||
|
||||
async def _update_metadata_paths(self, metadata_path: str, model_path: str) -> Dict:
|
||||
"""Update file paths in metadata file"""
|
||||
@@ -883,12 +841,15 @@ class ModelScanner:
|
||||
metadata = json.load(f)
|
||||
|
||||
metadata['file_path'] = model_path.replace(os.sep, '/')
|
||||
# Update file_name to match the new filename
|
||||
metadata['file_name'] = os.path.splitext(os.path.basename(model_path))[0]
|
||||
|
||||
if 'preview_url' in metadata and metadata['preview_url']:
|
||||
preview_dir = os.path.dirname(model_path)
|
||||
preview_name = os.path.splitext(os.path.basename(metadata['preview_url']))[0]
|
||||
preview_ext = os.path.splitext(metadata['preview_url'])[1]
|
||||
new_preview_path = os.path.join(preview_dir, f"{preview_name}{preview_ext}")
|
||||
# Update preview filename to match the new base name
|
||||
new_base_name = os.path.splitext(os.path.basename(model_path))[0]
|
||||
preview_ext = get_preview_extension(metadata['preview_url'])
|
||||
new_preview_path = os.path.join(preview_dir, f"{new_base_name}{preview_ext}")
|
||||
metadata['preview_url'] = new_preview_path.replace(os.sep, '/')
|
||||
|
||||
await MetadataManager.save_metadata(metadata_path, metadata)
|
||||
@@ -955,8 +916,16 @@ class ModelScanner:
|
||||
|
||||
def get_hash_by_path(self, file_path: str) -> Optional[str]:
|
||||
"""Get hash for a model by its file path"""
|
||||
return self._hash_index.get_hash(file_path)
|
||||
if self._cache is None or not self._cache.raw_data:
|
||||
return None
|
||||
|
||||
# Iterate through cache data to find matching file path
|
||||
for model_data in self._cache.raw_data:
|
||||
if model_data.get('file_path') == file_path:
|
||||
return model_data.get('sha256')
|
||||
|
||||
return None
|
||||
|
||||
def get_hash_by_filename(self, filename: str) -> Optional[str]:
|
||||
"""Get hash for a model by its filename without path"""
|
||||
return self._hash_index.get_hash_by_filename(filename)
|
||||
@@ -1194,13 +1163,12 @@ class ModelScanner:
|
||||
if len(self._hash_index._duplicate_filenames[file_name]) <= 1:
|
||||
del self._hash_index._duplicate_filenames[file_name]
|
||||
|
||||
async def check_model_version_exists(self, model_id: int, model_version_id: int) -> bool:
|
||||
async def check_model_version_exists(self, model_version_id: int) -> bool:
|
||||
"""Check if a specific model version exists in the cache
|
||||
|
||||
|
||||
Args:
|
||||
model_id: Civitai model ID
|
||||
model_version_id: Civitai model version ID
|
||||
|
||||
|
||||
Returns:
|
||||
bool: True if the model version exists, False otherwise
|
||||
"""
|
||||
@@ -1208,13 +1176,11 @@ class ModelScanner:
|
||||
cache = await self.get_cached_data()
|
||||
if not cache or not cache.raw_data:
|
||||
return False
|
||||
|
||||
|
||||
for item in cache.raw_data:
|
||||
if (item.get('civitai') and
|
||||
item['civitai'].get('modelId') == model_id and
|
||||
item['civitai'].get('id') == model_version_id):
|
||||
if item.get('civitai') and item['civitai'].get('id') == model_version_id:
|
||||
return True
|
||||
|
||||
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking model version existence: {e}")
|
||||
|
||||
@@ -8,6 +8,7 @@ from ..config import config
|
||||
from .recipe_cache import RecipeCache
|
||||
from .service_registry import ServiceRegistry
|
||||
from .lora_scanner import LoraScanner
|
||||
from .metadata_service import get_default_metadata_provider
|
||||
from ..utils.utils import fuzzy_match
|
||||
from natsort import natsorted
|
||||
import sys
|
||||
@@ -431,13 +432,13 @@ class RecipeScanner:
|
||||
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
|
||||
"""Get hash from Civitai API"""
|
||||
try:
|
||||
# Get CivitaiClient from ServiceRegistry
|
||||
civitai_client = await self._get_civitai_client()
|
||||
if not civitai_client:
|
||||
logger.error("Failed to get CivitaiClient from ServiceRegistry")
|
||||
# Get metadata provider instead of civitai client directly
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
if not metadata_provider:
|
||||
logger.error("Failed to get metadata provider")
|
||||
return None
|
||||
|
||||
version_info, error_msg = await civitai_client.get_model_version_info(model_version_id)
|
||||
version_info, error_msg = await metadata_provider.get_model_version_info(model_version_id)
|
||||
|
||||
if not version_info:
|
||||
if error_msg and "model not found" in error_msg.lower():
|
||||
|
||||
114
py/services/server_i18n.py
Normal file
114
py/services/server_i18n.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ServerI18nManager:
|
||||
"""Server-side internationalization manager for template rendering"""
|
||||
|
||||
def __init__(self):
|
||||
self.translations = {}
|
||||
self.current_locale = 'en'
|
||||
self._load_translations()
|
||||
|
||||
def _load_translations(self):
|
||||
"""Load all translation files from the locales directory"""
|
||||
i18n_path = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
|
||||
'locales'
|
||||
)
|
||||
|
||||
if not os.path.exists(i18n_path):
|
||||
logger.warning(f"I18n directory not found: {i18n_path}")
|
||||
return
|
||||
|
||||
# Load all available locale files
|
||||
for filename in os.listdir(i18n_path):
|
||||
if filename.endswith('.json'):
|
||||
locale_code = filename[:-5] # Remove .json extension
|
||||
try:
|
||||
self._load_locale_file(i18n_path, filename, locale_code)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading locale file {filename}: {e}")
|
||||
|
||||
def _load_locale_file(self, path: str, filename: str, locale_code: str):
|
||||
"""Load a single locale JSON file"""
|
||||
file_path = os.path.join(path, filename)
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
translations = json.load(f)
|
||||
|
||||
self.translations[locale_code] = translations
|
||||
logger.debug(f"Loaded translations for {locale_code} from {filename}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing locale file {filename}: {e}")
|
||||
|
||||
def set_locale(self, locale: str):
|
||||
"""Set the current locale"""
|
||||
if locale in self.translations:
|
||||
self.current_locale = locale
|
||||
else:
|
||||
logger.warning(f"Locale {locale} not found, using 'en'")
|
||||
self.current_locale = 'en'
|
||||
|
||||
def get_translation(self, key: str, params: Dict[str, Any] = None, **kwargs) -> str:
|
||||
"""Get translation for a key with optional parameters (supports both dict and keyword args)"""
|
||||
# Merge kwargs into params for convenience
|
||||
if params is None:
|
||||
params = {}
|
||||
if kwargs:
|
||||
params = {**params, **kwargs}
|
||||
|
||||
if self.current_locale not in self.translations:
|
||||
return key
|
||||
|
||||
# Navigate through nested object using dot notation
|
||||
keys = key.split('.')
|
||||
value = self.translations[self.current_locale]
|
||||
|
||||
for k in keys:
|
||||
if isinstance(value, dict) and k in value:
|
||||
value = value[k]
|
||||
else:
|
||||
# Fallback to English if current locale doesn't have the key
|
||||
if self.current_locale != 'en' and 'en' in self.translations:
|
||||
en_value = self.translations['en']
|
||||
for k in keys:
|
||||
if isinstance(en_value, dict) and k in en_value:
|
||||
en_value = en_value[k]
|
||||
else:
|
||||
return key
|
||||
value = en_value
|
||||
else:
|
||||
return key
|
||||
break
|
||||
|
||||
if not isinstance(value, str):
|
||||
return key
|
||||
|
||||
# Replace parameters if provided
|
||||
if params:
|
||||
for param_key, param_value in params.items():
|
||||
placeholder = f"{{{param_key}}}"
|
||||
double_placeholder = f"{{{{{param_key}}}}}"
|
||||
value = value.replace(placeholder, str(param_value))
|
||||
value = value.replace(double_placeholder, str(param_value))
|
||||
|
||||
return value
|
||||
|
||||
def get_available_locales(self) -> list:
|
||||
"""Get list of available locales"""
|
||||
return list(self.translations.keys())
|
||||
|
||||
def create_template_filter(self):
|
||||
"""Create a Jinja2 filter function for templates"""
|
||||
def t_filter(key: str, **params) -> str:
|
||||
return self.get_translation(key, params)
|
||||
return t_filter
|
||||
|
||||
# Create global instance
|
||||
server_i18n = ServerI18nManager()
|
||||
@@ -9,6 +9,7 @@ class SettingsManager:
|
||||
def __init__(self):
|
||||
self.settings_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'settings.json')
|
||||
self.settings = self._load_settings()
|
||||
self._migrate_download_path_template()
|
||||
self._auto_set_default_roots()
|
||||
self._check_environment_variables()
|
||||
|
||||
@@ -22,6 +23,24 @@ class SettingsManager:
|
||||
logger.error(f"Error loading settings: {e}")
|
||||
return self._get_default_settings()
|
||||
|
||||
def _migrate_download_path_template(self):
|
||||
"""Migrate old download_path_template to new download_path_templates"""
|
||||
old_template = self.settings.get('download_path_template')
|
||||
templates = self.settings.get('download_path_templates')
|
||||
|
||||
# If old template exists and new templates don't exist, migrate
|
||||
if old_template is not None and not templates:
|
||||
logger.info("Migrating download_path_template to download_path_templates")
|
||||
self.settings['download_path_templates'] = {
|
||||
'lora': old_template,
|
||||
'checkpoint': old_template,
|
||||
'embedding': old_template
|
||||
}
|
||||
# Remove old setting
|
||||
del self.settings['download_path_template']
|
||||
self._save_settings()
|
||||
logger.info("Migration completed")
|
||||
|
||||
def _auto_set_default_roots(self):
|
||||
"""Auto set default root paths if only one folder is present and default is empty."""
|
||||
folder_paths = self.settings.get('folder_paths', {})
|
||||
@@ -61,7 +80,14 @@ class SettingsManager:
|
||||
"""Return default settings"""
|
||||
return {
|
||||
"civitai_api_key": "",
|
||||
"show_only_sfw": False
|
||||
"language": "en",
|
||||
"enable_metadata_archive_db": False, # Enable metadata archive database
|
||||
"proxy_enabled": False, # Enable app-level proxy
|
||||
"proxy_host": "", # Proxy host
|
||||
"proxy_port": "", # Proxy port
|
||||
"proxy_username": "", # Proxy username (optional)
|
||||
"proxy_password": "", # Proxy password (optional)
|
||||
"proxy_type": "http" # Proxy type: http, https, socks4, socks5
|
||||
}
|
||||
|
||||
def get(self, key: str, default: Any = None) -> Any:
|
||||
@@ -73,6 +99,13 @@ class SettingsManager:
|
||||
self.settings[key] = value
|
||||
self._save_settings()
|
||||
|
||||
def delete(self, key: str) -> None:
|
||||
"""Delete setting key and save"""
|
||||
if key in self.settings:
|
||||
del self.settings[key]
|
||||
self._save_settings()
|
||||
logger.info(f"Deleted setting: {key}")
|
||||
|
||||
def _save_settings(self) -> None:
|
||||
"""Save settings to file"""
|
||||
try:
|
||||
@@ -81,4 +114,53 @@ class SettingsManager:
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving settings: {e}")
|
||||
|
||||
def get_download_path_template(self, model_type: str) -> str:
|
||||
"""Get download path template for specific model type
|
||||
|
||||
Args:
|
||||
model_type: The type of model ('lora', 'checkpoint', 'embedding')
|
||||
|
||||
Returns:
|
||||
Template string for the model type, defaults to '{base_model}/{first_tag}'
|
||||
"""
|
||||
templates = self.settings.get('download_path_templates', {})
|
||||
|
||||
# Handle edge case where templates might be stored as JSON string
|
||||
if isinstance(templates, str):
|
||||
try:
|
||||
# Try to parse JSON string
|
||||
parsed_templates = json.loads(templates)
|
||||
if isinstance(parsed_templates, dict):
|
||||
# Update settings with parsed dictionary
|
||||
self.settings['download_path_templates'] = parsed_templates
|
||||
self._save_settings()
|
||||
templates = parsed_templates
|
||||
logger.info("Successfully parsed download_path_templates from JSON string")
|
||||
else:
|
||||
raise ValueError("Parsed JSON is not a dictionary")
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
# If parsing fails, set default values
|
||||
logger.warning(f"Failed to parse download_path_templates JSON string: {e}. Setting default values.")
|
||||
default_template = '{base_model}/{first_tag}'
|
||||
templates = {
|
||||
'lora': default_template,
|
||||
'checkpoint': default_template,
|
||||
'embedding': default_template
|
||||
}
|
||||
self.settings['download_path_templates'] = templates
|
||||
self._save_settings()
|
||||
|
||||
# Ensure templates is a dictionary
|
||||
if not isinstance(templates, dict):
|
||||
default_template = '{base_model}/{first_tag}'
|
||||
templates = {
|
||||
'lora': default_template,
|
||||
'checkpoint': default_template,
|
||||
'embedding': default_template
|
||||
}
|
||||
self.settings['download_path_templates'] = templates
|
||||
self._save_settings()
|
||||
|
||||
return templates.get(model_type, '{base_model}/{first_tag}')
|
||||
|
||||
settings = SettingsManager()
|
||||
|
||||
@@ -16,6 +16,9 @@ class WebSocketManager:
|
||||
self._download_websockets: Dict[str, web.WebSocketResponse] = {} # New dict for download-specific clients
|
||||
# Add progress tracking dictionary
|
||||
self._download_progress: Dict[str, Dict] = {}
|
||||
# Add auto-organize progress tracking
|
||||
self._auto_organize_progress: Optional[Dict] = None
|
||||
self._auto_organize_lock = asyncio.Lock()
|
||||
|
||||
async def handle_connection(self, request: web.Request) -> web.WebSocketResponse:
|
||||
"""Handle new WebSocket connection"""
|
||||
@@ -134,6 +137,33 @@ class WebSocketManager:
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending download progress: {e}")
|
||||
|
||||
async def broadcast_auto_organize_progress(self, data: Dict):
|
||||
"""Broadcast auto-organize progress to connected clients"""
|
||||
# Store progress data in memory
|
||||
self._auto_organize_progress = data
|
||||
|
||||
# Broadcast via WebSocket
|
||||
await self.broadcast(data)
|
||||
|
||||
def get_auto_organize_progress(self) -> Optional[Dict]:
|
||||
"""Get current auto-organize progress"""
|
||||
return self._auto_organize_progress
|
||||
|
||||
def cleanup_auto_organize_progress(self):
|
||||
"""Clear auto-organize progress data"""
|
||||
self._auto_organize_progress = None
|
||||
|
||||
def is_auto_organize_running(self) -> bool:
|
||||
"""Check if auto-organize is currently running"""
|
||||
if not self._auto_organize_progress:
|
||||
return False
|
||||
status = self._auto_organize_progress.get('status')
|
||||
return status in ['started', 'processing', 'cleaning']
|
||||
|
||||
async def get_auto_organize_lock(self):
|
||||
"""Get the auto-organize lock"""
|
||||
return self._auto_organize_lock
|
||||
|
||||
def get_download_progress(self, download_id: str) -> Optional[Dict]:
|
||||
"""Get progress information for a specific download"""
|
||||
return self._download_progress.get(download_id)
|
||||
|
||||
11
py/services/websocket_progress_callback.py
Normal file
11
py/services/websocket_progress_callback.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from typing import Dict, Any
|
||||
from .model_file_service import ProgressCallback
|
||||
from .websocket_manager import ws_manager
|
||||
|
||||
|
||||
class WebSocketProgressCallback(ProgressCallback):
|
||||
"""WebSocket implementation of progress callback"""
|
||||
|
||||
async def on_progress(self, progress_data: Dict[str, Any]) -> None:
|
||||
"""Send progress data via WebSocket"""
|
||||
await ws_manager.broadcast_auto_organize_progress(progress_data)
|
||||
@@ -48,9 +48,13 @@ SUPPORTED_MEDIA_EXTENSIONS = {
|
||||
# Valid Lora types
|
||||
VALID_LORA_TYPES = ['lora', 'locon', 'dora']
|
||||
|
||||
# Auto-organize settings
|
||||
AUTO_ORGANIZE_BATCH_SIZE = 50 # Process models in batches to avoid overwhelming the system
|
||||
|
||||
# Civitai model tags in priority order for subfolder organization
|
||||
CIVITAI_MODEL_TAGS = [
|
||||
'character', 'style', 'concept', 'clothing', 'base model',
|
||||
'poses', 'background', 'tool', 'vehicle', 'buildings',
|
||||
'character', 'concept', 'clothing',
|
||||
'realistic', 'anime', 'toon', 'furry', 'style',
|
||||
'poses', 'background', 'tool', 'vehicle', 'buildings',
|
||||
'objects', 'assets', 'animal', 'action'
|
||||
]
|
||||
@@ -3,11 +3,13 @@ import os
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from .example_images_processor import ExampleImagesProcessor
|
||||
from .example_images_metadata import MetadataUpdater
|
||||
from ..services.websocket_manager import ws_manager # Add this import at the top
|
||||
from ..services.downloader import get_downloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,7 +26,8 @@ download_progress = {
|
||||
'start_time': None,
|
||||
'end_time': None,
|
||||
'processed_models': set(), # Track models that have been processed
|
||||
'refreshed_models': set() # Track models that had metadata refreshed
|
||||
'refreshed_models': set(), # Track models that had metadata refreshed
|
||||
'failed_models': set() # Track models that failed to download after metadata refresh
|
||||
}
|
||||
|
||||
class DownloadManager:
|
||||
@@ -50,6 +53,7 @@ class DownloadManager:
|
||||
response_progress = download_progress.copy()
|
||||
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||
response_progress['failed_models'] = list(download_progress['failed_models'])
|
||||
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
@@ -91,12 +95,15 @@ class DownloadManager:
|
||||
with open(progress_file, 'r', encoding='utf-8') as f:
|
||||
saved_progress = json.load(f)
|
||||
download_progress['processed_models'] = set(saved_progress.get('processed_models', []))
|
||||
logger.info(f"Loaded previous progress, {len(download_progress['processed_models'])} models already processed")
|
||||
download_progress['failed_models'] = set(saved_progress.get('failed_models', []))
|
||||
logger.debug(f"Loaded previous progress, {len(download_progress['processed_models'])} models already processed, {len(download_progress['failed_models'])} models marked as failed")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load progress file: {e}")
|
||||
download_progress['processed_models'] = set()
|
||||
download_progress['failed_models'] = set()
|
||||
else:
|
||||
download_progress['processed_models'] = set()
|
||||
download_progress['failed_models'] = set()
|
||||
|
||||
# Start the download task
|
||||
is_downloading = True
|
||||
@@ -113,6 +120,7 @@ class DownloadManager:
|
||||
response_progress = download_progress.copy()
|
||||
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||
response_progress['failed_models'] = list(download_progress['failed_models'])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
@@ -136,6 +144,7 @@ class DownloadManager:
|
||||
response_progress = download_progress.copy()
|
||||
response_progress['processed_models'] = list(download_progress['processed_models'])
|
||||
response_progress['refreshed_models'] = list(download_progress['refreshed_models'])
|
||||
response_progress['failed_models'] = list(download_progress['failed_models'])
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
@@ -190,19 +199,8 @@ class DownloadManager:
|
||||
"""Download example images for all models"""
|
||||
global is_downloading, download_progress
|
||||
|
||||
# Create independent download session
|
||||
connector = aiohttp.TCPConnector(
|
||||
ssl=True,
|
||||
limit=3,
|
||||
force_close=False,
|
||||
enable_cleanup_closed=True
|
||||
)
|
||||
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
|
||||
independent_session = aiohttp.ClientSession(
|
||||
connector=connector,
|
||||
trust_env=True,
|
||||
timeout=timeout
|
||||
)
|
||||
# Get unified downloader
|
||||
downloader = await get_downloader()
|
||||
|
||||
try:
|
||||
# Get scanners
|
||||
@@ -230,14 +228,14 @@ class DownloadManager:
|
||||
|
||||
# Update total count
|
||||
download_progress['total'] = len(all_models)
|
||||
logger.info(f"Found {download_progress['total']} models to process")
|
||||
logger.debug(f"Found {download_progress['total']} models to process")
|
||||
|
||||
# Process each model
|
||||
for i, (scanner_type, model, scanner) in enumerate(all_models):
|
||||
# Main logic for processing model is here, but actual operations are delegated to other classes
|
||||
was_remote_download = await DownloadManager._process_model(
|
||||
scanner_type, model, scanner,
|
||||
output_dir, optimize, independent_session
|
||||
output_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# Update progress
|
||||
@@ -250,7 +248,7 @@ class DownloadManager:
|
||||
# Mark as completed
|
||||
download_progress['status'] = 'completed'
|
||||
download_progress['end_time'] = time.time()
|
||||
logger.info(f"Example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
|
||||
logger.debug(f"Example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error during example images download: {str(e)}"
|
||||
@@ -261,12 +259,6 @@ class DownloadManager:
|
||||
download_progress['end_time'] = time.time()
|
||||
|
||||
finally:
|
||||
# Close the independent session
|
||||
try:
|
||||
await independent_session.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error closing download session: {e}")
|
||||
|
||||
# Save final progress to file
|
||||
try:
|
||||
DownloadManager._save_progress(output_dir)
|
||||
@@ -277,7 +269,7 @@ class DownloadManager:
|
||||
is_downloading = False
|
||||
|
||||
@staticmethod
|
||||
async def _process_model(scanner_type, model, scanner, output_dir, optimize, independent_session):
|
||||
async def _process_model(scanner_type, model, scanner, output_dir, optimize, downloader):
|
||||
"""Process a single model download"""
|
||||
global download_progress
|
||||
|
||||
@@ -299,6 +291,11 @@ class DownloadManager:
|
||||
# Update current model info
|
||||
download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
|
||||
|
||||
# Skip if already in failed models
|
||||
if model_hash in download_progress['failed_models']:
|
||||
logger.debug(f"Skipping known failed model: {model_name}")
|
||||
return False
|
||||
|
||||
# Skip if already processed AND directory exists with files
|
||||
if model_hash in download_progress['processed_models']:
|
||||
model_dir = os.path.join(output_dir, model_hash)
|
||||
@@ -308,6 +305,8 @@ class DownloadManager:
|
||||
return False
|
||||
else:
|
||||
logger.info(f"Model {model_name} marked as processed but folder empty or missing, reprocessing")
|
||||
# Remove from processed models since we need to reprocess
|
||||
download_progress['processed_models'].discard(model_hash)
|
||||
|
||||
# Create model directory
|
||||
model_dir = os.path.join(output_dir, model_hash)
|
||||
@@ -331,7 +330,7 @@ class DownloadManager:
|
||||
images = model.get('civitai', {}).get('images', [])
|
||||
|
||||
success, is_stale = await ExampleImagesProcessor.download_model_images(
|
||||
model_hash, model_name, images, model_dir, optimize, independent_session
|
||||
model_hash, model_name, images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# If metadata is stale, try to refresh it
|
||||
@@ -349,14 +348,25 @@ class DownloadManager:
|
||||
# Retry download with updated metadata
|
||||
updated_images = updated_model.get('civitai', {}).get('images', [])
|
||||
success, _ = await ExampleImagesProcessor.download_model_images(
|
||||
model_hash, model_name, updated_images, model_dir, optimize, independent_session
|
||||
model_hash, model_name, updated_images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
download_progress['refreshed_models'].add(model_hash)
|
||||
|
||||
# Only mark as processed if all images were downloaded successfully
|
||||
# Mark as processed if successful, or as failed if unsuccessful after refresh
|
||||
if success:
|
||||
download_progress['processed_models'].add(model_hash)
|
||||
else:
|
||||
# If we refreshed metadata and still failed, mark as permanently failed
|
||||
if model_hash in download_progress['refreshed_models']:
|
||||
download_progress['failed_models'].add(model_hash)
|
||||
logger.info(f"Marking model {model_name} as failed after metadata refresh")
|
||||
|
||||
return True # Return True to indicate a remote download happened
|
||||
else:
|
||||
# No civitai data or images available, mark as failed to avoid future attempts
|
||||
download_progress['failed_models'].add(model_hash)
|
||||
logger.debug(f"No civitai images available for model {model_name}, marking as failed")
|
||||
|
||||
# Save progress periodically
|
||||
if download_progress['completed'] % 10 == 0 or download_progress['completed'] == download_progress['total'] - 1:
|
||||
@@ -391,6 +401,7 @@ class DownloadManager:
|
||||
progress_data = {
|
||||
'processed_models': list(download_progress['processed_models']),
|
||||
'refreshed_models': list(download_progress['refreshed_models']),
|
||||
'failed_models': list(download_progress['failed_models']),
|
||||
'completed': download_progress['completed'],
|
||||
'total': download_progress['total'],
|
||||
'last_update': time.time()
|
||||
@@ -405,4 +416,350 @@ class DownloadManager:
|
||||
with open(progress_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(progress_data, f, indent=2)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to save progress file: {e}")
|
||||
logger.error(f"Failed to save progress file: {e}")
|
||||
|
||||
@staticmethod
|
||||
async def start_force_download(request):
|
||||
"""
|
||||
Force download example images for specific models
|
||||
|
||||
Expects a JSON body with:
|
||||
{
|
||||
"model_hashes": ["hash1", "hash2", ...], # List of model hashes to download
|
||||
"output_dir": "path/to/output", # Base directory to save example images
|
||||
"optimize": true, # Whether to optimize images (default: true)
|
||||
"model_types": ["lora", "checkpoint"], # Model types to process (default: both)
|
||||
"delay": 1.0 # Delay between downloads (default: 1.0)
|
||||
}
|
||||
"""
|
||||
global download_task, is_downloading, download_progress
|
||||
|
||||
if is_downloading:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Download already in progress'
|
||||
}, status=400)
|
||||
|
||||
try:
|
||||
# Parse the request body
|
||||
data = await request.json()
|
||||
model_hashes = data.get('model_hashes', [])
|
||||
output_dir = data.get('output_dir')
|
||||
optimize = data.get('optimize', True)
|
||||
model_types = data.get('model_types', ['lora', 'checkpoint'])
|
||||
delay = float(data.get('delay', 0.2)) # Default to 0.2 seconds
|
||||
|
||||
if not model_hashes:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing model_hashes parameter'
|
||||
}, status=400)
|
||||
|
||||
if not output_dir:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Missing output_dir parameter'
|
||||
}, status=400)
|
||||
|
||||
# Create the output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Initialize progress tracking
|
||||
download_progress['total'] = len(model_hashes)
|
||||
download_progress['completed'] = 0
|
||||
download_progress['current_model'] = ''
|
||||
download_progress['status'] = 'running'
|
||||
download_progress['errors'] = []
|
||||
download_progress['last_error'] = None
|
||||
download_progress['start_time'] = time.time()
|
||||
download_progress['end_time'] = None
|
||||
download_progress['processed_models'] = set()
|
||||
download_progress['refreshed_models'] = set()
|
||||
download_progress['failed_models'] = set()
|
||||
|
||||
# Set download status to downloading
|
||||
is_downloading = True
|
||||
|
||||
# Execute the download function directly instead of creating a background task
|
||||
result = await DownloadManager._download_specific_models_example_images_sync(
|
||||
model_hashes,
|
||||
output_dir,
|
||||
optimize,
|
||||
model_types,
|
||||
delay
|
||||
)
|
||||
|
||||
# Set download status to not downloading
|
||||
is_downloading = False
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'message': 'Force download completed',
|
||||
'result': result
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
# Set download status to not downloading
|
||||
is_downloading = False
|
||||
logger.error(f"Failed during forced example images download: {e}", exc_info=True)
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
|
||||
@staticmethod
|
||||
async def _download_specific_models_example_images_sync(model_hashes, output_dir, optimize, model_types, delay):
|
||||
"""Download example images for specific models only - synchronous version"""
|
||||
global download_progress
|
||||
|
||||
# Get unified downloader
|
||||
downloader = await get_downloader()
|
||||
|
||||
try:
|
||||
# Get scanners
|
||||
scanners = []
|
||||
if 'lora' in model_types:
|
||||
lora_scanner = await ServiceRegistry.get_lora_scanner()
|
||||
scanners.append(('lora', lora_scanner))
|
||||
|
||||
if 'checkpoint' in model_types:
|
||||
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
|
||||
scanners.append(('checkpoint', checkpoint_scanner))
|
||||
|
||||
if 'embedding' in model_types:
|
||||
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
|
||||
scanners.append(('embedding', embedding_scanner))
|
||||
|
||||
# Find the specified models
|
||||
models_to_process = []
|
||||
for scanner_type, scanner in scanners:
|
||||
cache = await scanner.get_cached_data()
|
||||
if cache and cache.raw_data:
|
||||
for model in cache.raw_data:
|
||||
if model.get('sha256') in model_hashes:
|
||||
models_to_process.append((scanner_type, model, scanner))
|
||||
|
||||
# Update total count based on found models
|
||||
download_progress['total'] = len(models_to_process)
|
||||
logger.debug(f"Found {download_progress['total']} models to process")
|
||||
|
||||
# Send initial progress via WebSocket
|
||||
await ws_manager.broadcast({
|
||||
'type': 'example_images_progress',
|
||||
'processed': 0,
|
||||
'total': download_progress['total'],
|
||||
'status': 'running',
|
||||
'current_model': ''
|
||||
})
|
||||
|
||||
# Process each model
|
||||
success_count = 0
|
||||
for i, (scanner_type, model, scanner) in enumerate(models_to_process):
|
||||
# Force process this model regardless of previous status
|
||||
was_successful = await DownloadManager._process_specific_model(
|
||||
scanner_type, model, scanner,
|
||||
output_dir, optimize, downloader
|
||||
)
|
||||
|
||||
if was_successful:
|
||||
success_count += 1
|
||||
|
||||
# Update progress
|
||||
download_progress['completed'] += 1
|
||||
|
||||
# Send progress update via WebSocket
|
||||
await ws_manager.broadcast({
|
||||
'type': 'example_images_progress',
|
||||
'processed': download_progress['completed'],
|
||||
'total': download_progress['total'],
|
||||
'status': 'running',
|
||||
'current_model': download_progress['current_model']
|
||||
})
|
||||
|
||||
# Only add delay after remote download, and not after processing the last model
|
||||
if was_successful and i < len(models_to_process) - 1 and download_progress['status'] == 'running':
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# Mark as completed
|
||||
download_progress['status'] = 'completed'
|
||||
download_progress['end_time'] = time.time()
|
||||
logger.debug(f"Forced example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
|
||||
|
||||
# Send final progress via WebSocket
|
||||
await ws_manager.broadcast({
|
||||
'type': 'example_images_progress',
|
||||
'processed': download_progress['completed'],
|
||||
'total': download_progress['total'],
|
||||
'status': 'completed',
|
||||
'current_model': ''
|
||||
})
|
||||
|
||||
return {
|
||||
'total': download_progress['total'],
|
||||
'processed': download_progress['completed'],
|
||||
'successful': success_count,
|
||||
'errors': download_progress['errors']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error during forced example images download: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
download_progress['errors'].append(error_msg)
|
||||
download_progress['last_error'] = error_msg
|
||||
download_progress['status'] = 'error'
|
||||
download_progress['end_time'] = time.time()
|
||||
|
||||
# Send error status via WebSocket
|
||||
await ws_manager.broadcast({
|
||||
'type': 'example_images_progress',
|
||||
'processed': download_progress['completed'],
|
||||
'total': download_progress['total'],
|
||||
'status': 'error',
|
||||
'error': error_msg,
|
||||
'current_model': ''
|
||||
})
|
||||
|
||||
raise
|
||||
|
||||
finally:
|
||||
# No need to close any sessions since we use the global downloader
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
async def _process_specific_model(scanner_type, model, scanner, output_dir, optimize, downloader):
|
||||
"""Process a specific model for forced download, ignoring previous download status"""
|
||||
global download_progress
|
||||
|
||||
# Check if download is paused
|
||||
while download_progress['status'] == 'paused':
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Check if download should continue
|
||||
if download_progress['status'] != 'running':
|
||||
logger.info(f"Download stopped: {download_progress['status']}")
|
||||
return False
|
||||
|
||||
model_hash = model.get('sha256', '').lower()
|
||||
model_name = model.get('model_name', 'Unknown')
|
||||
model_file_path = model.get('file_path', '')
|
||||
model_file_name = model.get('file_name', '')
|
||||
|
||||
try:
|
||||
# Update current model info
|
||||
download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
|
||||
|
||||
# Create model directory
|
||||
model_dir = os.path.join(output_dir, model_hash)
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
|
||||
# First check for local example images - local processing doesn't need delay
|
||||
local_images_processed = await ExampleImagesProcessor.process_local_examples(
|
||||
model_file_path, model_file_name, model_name, model_dir, optimize
|
||||
)
|
||||
|
||||
# If we processed local images, update metadata
|
||||
if local_images_processed:
|
||||
await MetadataUpdater.update_metadata_from_local_examples(
|
||||
model_hash, model, scanner_type, scanner, model_dir
|
||||
)
|
||||
download_progress['processed_models'].add(model_hash)
|
||||
return False # Return False to indicate no remote download happened
|
||||
|
||||
# If no local images, try to download from remote
|
||||
elif model.get('civitai') and model.get('civitai', {}).get('images'):
|
||||
images = model.get('civitai', {}).get('images', [])
|
||||
|
||||
success, is_stale, failed_images = await ExampleImagesProcessor.download_model_images_with_tracking(
|
||||
model_hash, model_name, images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# If metadata is stale, try to refresh it
|
||||
if is_stale and model_hash not in download_progress['refreshed_models']:
|
||||
await MetadataUpdater.refresh_model_metadata(
|
||||
model_hash, model_name, scanner_type, scanner
|
||||
)
|
||||
|
||||
# Get the updated model data
|
||||
updated_model = await MetadataUpdater.get_updated_model(
|
||||
model_hash, scanner
|
||||
)
|
||||
|
||||
if updated_model and updated_model.get('civitai', {}).get('images'):
|
||||
# Retry download with updated metadata
|
||||
updated_images = updated_model.get('civitai', {}).get('images', [])
|
||||
success, _, additional_failed_images = await ExampleImagesProcessor.download_model_images_with_tracking(
|
||||
model_hash, model_name, updated_images, model_dir, optimize, downloader
|
||||
)
|
||||
|
||||
# Combine failed images from both attempts
|
||||
failed_images.extend(additional_failed_images)
|
||||
|
||||
download_progress['refreshed_models'].add(model_hash)
|
||||
|
||||
# For forced downloads, remove failed images from metadata
|
||||
if failed_images:
|
||||
# Create a copy of images excluding failed ones
|
||||
await DownloadManager._remove_failed_images_from_metadata(
|
||||
model_hash, model_name, failed_images, scanner
|
||||
)
|
||||
|
||||
# Mark as processed
|
||||
if success or failed_images: # Mark as processed if we successfully downloaded some images or removed failed ones
|
||||
download_progress['processed_models'].add(model_hash)
|
||||
|
||||
return True # Return True to indicate a remote download happened
|
||||
else:
|
||||
logger.debug(f"No civitai images available for model {model_name}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error processing model {model.get('model_name')}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
download_progress['errors'].append(error_msg)
|
||||
download_progress['last_error'] = error_msg
|
||||
return False # Return False on exception
|
||||
|
||||
@staticmethod
|
||||
async def _remove_failed_images_from_metadata(model_hash, model_name, failed_images, scanner):
|
||||
"""Remove failed images from model metadata"""
|
||||
try:
|
||||
# Get current model data
|
||||
model_data = await MetadataUpdater.get_updated_model(model_hash, scanner)
|
||||
if not model_data:
|
||||
logger.warning(f"Could not find model data for {model_name} to remove failed images")
|
||||
return
|
||||
|
||||
if not model_data.get('civitai', {}).get('images'):
|
||||
logger.warning(f"No images in metadata for {model_name}")
|
||||
return
|
||||
|
||||
# Get current images
|
||||
current_images = model_data['civitai']['images']
|
||||
|
||||
# Filter out failed images
|
||||
updated_images = [img for img in current_images if img.get('url') not in failed_images]
|
||||
|
||||
# If images were removed, update metadata
|
||||
if len(updated_images) < len(current_images):
|
||||
removed_count = len(current_images) - len(updated_images)
|
||||
logger.info(f"Removing {removed_count} failed images from metadata for {model_name}")
|
||||
|
||||
# Update the images list
|
||||
model_data['civitai']['images'] = updated_images
|
||||
|
||||
# Save metadata to file
|
||||
file_path = model_data.get('file_path')
|
||||
if file_path:
|
||||
# Create a copy of model data without 'folder' field
|
||||
model_copy = model_data.copy()
|
||||
model_copy.pop('folder', None)
|
||||
|
||||
# Write metadata to file
|
||||
await MetadataManager.save_metadata(file_path, model_copy)
|
||||
logger.info(f"Saved updated metadata for {model_name} after removing failed images")
|
||||
|
||||
# Update the scanner cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, model_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing failed images from metadata for {model_name}: {e}", exc_info=True)
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
from aiohttp import web
|
||||
@@ -43,7 +42,15 @@ class ExampleImagesFileManager:
|
||||
|
||||
# Construct folder path for this model
|
||||
model_folder = os.path.join(example_images_path, model_hash)
|
||||
|
||||
model_folder = os.path.abspath(model_folder) # Get absolute path
|
||||
|
||||
# Path validation: ensure model_folder is under example_images_path
|
||||
if not model_folder.startswith(os.path.abspath(example_images_path)):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': 'Invalid model folder path'
|
||||
}, status=400)
|
||||
|
||||
# Check if folder exists
|
||||
if not os.path.exists(model_folder):
|
||||
return web.json_response({
|
||||
|
||||
@@ -23,19 +23,62 @@ class ExampleImagesProcessor:
|
||||
return ''.join(random.choice(chars) for _ in range(length))
|
||||
|
||||
@staticmethod
|
||||
def get_civitai_optimized_url(image_url):
|
||||
"""Convert Civitai image URL to its optimized WebP version"""
|
||||
def get_civitai_optimized_url(media_url):
|
||||
"""Convert Civitai media URL (image or video) to its optimized version"""
|
||||
base_pattern = r'(https://image\.civitai\.com/[^/]+/[^/]+)'
|
||||
match = re.match(base_pattern, image_url)
|
||||
match = re.match(base_pattern, media_url)
|
||||
|
||||
if match:
|
||||
base_url = match.group(1)
|
||||
return f"{base_url}/optimized=true/image.webp"
|
||||
return f"{base_url}/optimized=true"
|
||||
|
||||
return image_url
|
||||
return media_url
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, independent_session):
|
||||
def _get_file_extension_from_content_or_headers(content, headers, fallback_url=None):
|
||||
"""Determine file extension from content magic bytes or headers"""
|
||||
# Check magic bytes for common formats
|
||||
if content:
|
||||
if content.startswith(b'\xFF\xD8\xFF'):
|
||||
return '.jpg'
|
||||
elif content.startswith(b'\x89PNG\r\n\x1A\n'):
|
||||
return '.png'
|
||||
elif content.startswith(b'GIF87a') or content.startswith(b'GIF89a'):
|
||||
return '.gif'
|
||||
elif content.startswith(b'RIFF') and b'WEBP' in content[:12]:
|
||||
return '.webp'
|
||||
elif content.startswith(b'\x00\x00\x00\x18ftypmp4') or content.startswith(b'\x00\x00\x00\x20ftypmp4'):
|
||||
return '.mp4'
|
||||
elif content.startswith(b'\x1A\x45\xDF\xA3'):
|
||||
return '.webm'
|
||||
|
||||
# Check Content-Type header
|
||||
if headers:
|
||||
content_type = headers.get('content-type', '').lower()
|
||||
type_map = {
|
||||
'image/jpeg': '.jpg',
|
||||
'image/png': '.png',
|
||||
'image/gif': '.gif',
|
||||
'image/webp': '.webp',
|
||||
'video/mp4': '.mp4',
|
||||
'video/webm': '.webm',
|
||||
'video/quicktime': '.mov'
|
||||
}
|
||||
if content_type in type_map:
|
||||
return type_map[content_type]
|
||||
|
||||
# Fallback to URL extension if available
|
||||
if fallback_url:
|
||||
filename = os.path.basename(fallback_url.split('?')[0])
|
||||
ext = os.path.splitext(filename)[1].lower()
|
||||
if ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or ext in SUPPORTED_MEDIA_EXTENSIONS['videos']:
|
||||
return ext
|
||||
|
||||
# Default fallback
|
||||
return '.jpg'
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images(model_hash, model_name, model_images, model_dir, optimize, downloader):
|
||||
"""Download images for a single model
|
||||
|
||||
Returns:
|
||||
@@ -48,53 +91,59 @@ class ExampleImagesProcessor:
|
||||
if not image_url:
|
||||
continue
|
||||
|
||||
# Get image filename from URL
|
||||
image_filename = os.path.basename(image_url.split('?')[0])
|
||||
image_ext = os.path.splitext(image_filename)[1].lower()
|
||||
|
||||
# Handle images and videos
|
||||
is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {image_filename}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing instead of 1-based indexing
|
||||
save_filename = f"image_{i}{image_ext}"
|
||||
|
||||
# If optimizing images and this is a Civitai image, use their pre-optimized WebP version
|
||||
if is_image and optimize and 'civitai.com' in image_url:
|
||||
# Apply optimization for Civitai URLs if enabled
|
||||
original_url = image_url
|
||||
if optimize and 'civitai.com' in image_url:
|
||||
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
|
||||
save_filename = f"image_{i}.webp"
|
||||
|
||||
# Check if already downloaded
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Download the file
|
||||
# Download the file first to determine the actual file type
|
||||
try:
|
||||
logger.debug(f"Downloading {save_filename} for {model_name}")
|
||||
logger.debug(f"Downloading media file {i} for {model_name}")
|
||||
|
||||
# Download directly using the independent session
|
||||
async with independent_session.get(image_url, timeout=60) as response:
|
||||
if response.status == 200:
|
||||
with open(save_path, 'wb') as f:
|
||||
async for chunk in response.content.iter_chunked(8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
elif response.status == 404:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True # (success, is_metadata_stale)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, status code: {response.status}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
# Download using the unified downloader with headers
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
image_url,
|
||||
use_auth=False, # Example images don't need auth
|
||||
return_headers=True
|
||||
)
|
||||
|
||||
if success:
|
||||
# Determine file extension from content or headers
|
||||
media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
|
||||
content, headers, original_url
|
||||
)
|
||||
|
||||
# Check if the detected file type is supported
|
||||
is_image = media_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = media_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {media_ext}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing with the detected extension
|
||||
save_filename = f"image_{i}{media_ext}"
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
|
||||
# Check if already downloaded
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Save the file
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
elif "404" in str(content):
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True # (success, is_metadata_stale)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, error: {content}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
except Exception as e:
|
||||
error_msg = f"Error downloading file {image_url}: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
@@ -102,6 +151,84 @@ class ExampleImagesProcessor:
|
||||
|
||||
return model_success, False # (success, is_metadata_stale)
|
||||
|
||||
@staticmethod
|
||||
async def download_model_images_with_tracking(model_hash, model_name, model_images, model_dir, optimize, downloader):
|
||||
"""Download images for a single model with tracking of failed image URLs
|
||||
|
||||
Returns:
|
||||
tuple: (success, is_stale_metadata, failed_images) - whether download was successful, whether metadata is stale, list of failed image URLs
|
||||
"""
|
||||
model_success = True
|
||||
failed_images = []
|
||||
|
||||
for i, image in enumerate(model_images):
|
||||
image_url = image.get('url')
|
||||
if not image_url:
|
||||
continue
|
||||
|
||||
# Apply optimization for Civitai URLs if enabled
|
||||
original_url = image_url
|
||||
if optimize and 'civitai.com' in image_url:
|
||||
image_url = ExampleImagesProcessor.get_civitai_optimized_url(image_url)
|
||||
|
||||
# Download the file first to determine the actual file type
|
||||
try:
|
||||
logger.debug(f"Downloading media file {i} for {model_name}")
|
||||
|
||||
# Download using the unified downloader with headers
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
image_url,
|
||||
use_auth=False, # Example images don't need auth
|
||||
return_headers=True
|
||||
)
|
||||
|
||||
if success:
|
||||
# Determine file extension from content or headers
|
||||
media_ext = ExampleImagesProcessor._get_file_extension_from_content_or_headers(
|
||||
content, headers, original_url
|
||||
)
|
||||
|
||||
# Check if the detected file type is supported
|
||||
is_image = media_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
|
||||
is_video = media_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
|
||||
|
||||
if not (is_image or is_video):
|
||||
logger.debug(f"Skipping unsupported file type: {media_ext}")
|
||||
continue
|
||||
|
||||
# Use 0-based indexing with the detected extension
|
||||
save_filename = f"image_{i}{media_ext}"
|
||||
save_path = os.path.join(model_dir, save_filename)
|
||||
|
||||
# Check if already downloaded
|
||||
if os.path.exists(save_path):
|
||||
logger.debug(f"File already exists: {save_path}")
|
||||
continue
|
||||
|
||||
# Save the file
|
||||
with open(save_path, 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
elif "404" in str(content):
|
||||
error_msg = f"Failed to download file: {image_url}, status code: 404 - Model metadata might be stale"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed due to 404 error
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
# Return early to trigger metadata refresh attempt
|
||||
return False, True, failed_images # (success, is_metadata_stale, failed_images)
|
||||
else:
|
||||
error_msg = f"Failed to download file: {image_url}, error: {content}"
|
||||
logger.warning(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
except Exception as e:
|
||||
error_msg = f"Error downloading file {image_url}: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
model_success = False # Mark the model as failed
|
||||
failed_images.append(image_url) # Track failed URL
|
||||
|
||||
return model_success, False, failed_images # (success, is_metadata_stale, failed_images)
|
||||
|
||||
@staticmethod
|
||||
async def process_local_examples(model_file_path, model_file_name, model_name, model_dir, optimize):
|
||||
"""Process local example images
|
||||
@@ -493,4 +620,7 @@ class ExampleImagesProcessor:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}, status=500)
|
||||
}, status=500)
|
||||
|
||||
|
||||
|
||||
@@ -27,39 +27,58 @@ def find_preview_file(base_name: str, dir_path: str) -> str:
|
||||
full_pattern = os.path.join(dir_path, f"{base_name}{ext}")
|
||||
if os.path.exists(full_pattern):
|
||||
# Check if this is an image and not already webp
|
||||
if ext.lower().endswith(('.jpg', '.jpeg', '.png')) and not ext.lower().endswith('.webp'):
|
||||
try:
|
||||
# Optimize the image to webp format
|
||||
webp_path = os.path.join(dir_path, f"{base_name}.webp")
|
||||
# TODO: disable the optimization for now, maybe add a config option later
|
||||
# if ext.lower().endswith(('.jpg', '.jpeg', '.png')) and not ext.lower().endswith('.webp'):
|
||||
# try:
|
||||
# # Optimize the image to webp format
|
||||
# webp_path = os.path.join(dir_path, f"{base_name}.webp")
|
||||
|
||||
# Use ExifUtils to optimize the image
|
||||
with open(full_pattern, 'rb') as f:
|
||||
image_data = f.read()
|
||||
# # Use ExifUtils to optimize the image
|
||||
# with open(full_pattern, 'rb') as f:
|
||||
# image_data = f.read()
|
||||
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=image_data,
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
preserve_metadata=False
|
||||
)
|
||||
# optimized_data, _ = ExifUtils.optimize_image(
|
||||
# image_data=image_data,
|
||||
# target_width=CARD_PREVIEW_WIDTH,
|
||||
# format='webp',
|
||||
# quality=85,
|
||||
# preserve_metadata=False
|
||||
# )
|
||||
|
||||
# Save the optimized webp file
|
||||
with open(webp_path, 'wb') as f:
|
||||
f.write(optimized_data)
|
||||
# # Save the optimized webp file
|
||||
# with open(webp_path, 'wb') as f:
|
||||
# f.write(optimized_data)
|
||||
|
||||
logger.debug(f"Optimized preview image from {full_pattern} to {webp_path}")
|
||||
return webp_path.replace(os.sep, "/")
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing preview image {full_pattern}: {e}")
|
||||
# Fall back to original file if optimization fails
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
# logger.debug(f"Optimized preview image from {full_pattern} to {webp_path}")
|
||||
# return webp_path.replace(os.sep, "/")
|
||||
# except Exception as e:
|
||||
# logger.error(f"Error optimizing preview image {full_pattern}: {e}")
|
||||
# # Fall back to original file if optimization fails
|
||||
# return full_pattern.replace(os.sep, "/")
|
||||
|
||||
# Return the original path for webp images or non-image files
|
||||
return full_pattern.replace(os.sep, "/")
|
||||
|
||||
return ""
|
||||
|
||||
def get_preview_extension(preview_path: str) -> str:
|
||||
"""Get the complete preview extension from a preview file path
|
||||
|
||||
Args:
|
||||
preview_path: Path to the preview file
|
||||
|
||||
Returns:
|
||||
str: The complete extension (e.g., '.preview.png', '.png', '.webp')
|
||||
"""
|
||||
preview_path_lower = preview_path.lower()
|
||||
|
||||
# Check for compound extensions first (longer matches first)
|
||||
for ext in sorted(PREVIEW_EXTENSIONS, key=len, reverse=True):
|
||||
if preview_path_lower.endswith(ext.lower()):
|
||||
return ext
|
||||
|
||||
return os.path.splitext(preview_path)[1]
|
||||
|
||||
def normalize_path(path: str) -> str:
|
||||
"""Normalize file path to use forward slashes"""
|
||||
return path.replace(os.sep, "/") if path else path
|
||||
@@ -1,7 +1,6 @@
|
||||
from datetime import datetime
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
import logging
|
||||
from typing import Dict, Optional, Type, Union
|
||||
|
||||
@@ -17,7 +16,7 @@ class MetadataManager:
|
||||
|
||||
This class is responsible for:
|
||||
1. Loading metadata safely with fallback mechanisms
|
||||
2. Saving metadata with atomic operations and backups
|
||||
2. Saving metadata with atomic operations
|
||||
3. Creating default metadata for models
|
||||
4. Handling unknown fields gracefully
|
||||
"""
|
||||
@@ -25,81 +24,44 @@ class MetadataManager:
|
||||
@staticmethod
|
||||
async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
|
||||
"""
|
||||
Load metadata with robust error handling and data preservation.
|
||||
Load metadata safely.
|
||||
|
||||
Args:
|
||||
file_path: Path to the model file
|
||||
model_class: Class to instantiate (LoraMetadata, CheckpointMetadata, etc.)
|
||||
|
||||
Returns:
|
||||
BaseModelMetadata instance or None if file doesn't exist
|
||||
tuple: (metadata, should_skip)
|
||||
- metadata: BaseModelMetadata instance or None
|
||||
- should_skip: True if corrupted metadata file exists and model should be skipped
|
||||
"""
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
backup_path = f"{metadata_path}.bak"
|
||||
|
||||
# Try loading the main metadata file
|
||||
if os.path.exists(metadata_path):
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Create model instance
|
||||
metadata = model_class.from_dict(data)
|
||||
|
||||
# Normalize paths
|
||||
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||
|
||||
return metadata
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# JSON parsing error - try to restore from backup
|
||||
logger.warning(f"Invalid JSON in metadata file: {metadata_path}")
|
||||
return await MetadataManager._restore_from_backup(backup_path, file_path, model_class)
|
||||
|
||||
except Exception as e:
|
||||
# Other errors might be due to unknown fields or schema changes
|
||||
logger.error(f"Error loading metadata from {metadata_path}: {str(e)}")
|
||||
return await MetadataManager._restore_from_backup(backup_path, file_path, model_class)
|
||||
# Check if metadata file exists
|
||||
if not os.path.exists(metadata_path):
|
||||
return None, False
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def _restore_from_backup(backup_path: str, file_path: str, model_class: Type[BaseModelMetadata]) -> Optional[BaseModelMetadata]:
|
||||
"""
|
||||
Try to restore metadata from backup file
|
||||
|
||||
Args:
|
||||
backup_path: Path to backup file
|
||||
file_path: Path to the original model file
|
||||
model_class: Class to instantiate
|
||||
try:
|
||||
with open(metadata_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
Returns:
|
||||
BaseModelMetadata instance or None if restoration fails
|
||||
"""
|
||||
if os.path.exists(backup_path):
|
||||
try:
|
||||
logger.info(f"Attempting to restore metadata from backup: {backup_path}")
|
||||
with open(backup_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
# Create model instance
|
||||
metadata = model_class.from_dict(data)
|
||||
|
||||
# Process data similarly to normal loading
|
||||
metadata = model_class.from_dict(data)
|
||||
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||
return metadata
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to restore from backup: {str(e)}")
|
||||
|
||||
return None
|
||||
# Normalize paths
|
||||
await MetadataManager._normalize_metadata_paths(metadata, file_path)
|
||||
|
||||
return metadata, False
|
||||
|
||||
except (json.JSONDecodeError, Exception) as e:
|
||||
error_type = "Invalid JSON" if isinstance(e, json.JSONDecodeError) else "Parse error"
|
||||
logger.error(f"{error_type} in metadata file: {metadata_path}. Error: {str(e)}. Skipping model to preserve existing data.")
|
||||
return None, True # should_skip = True
|
||||
|
||||
@staticmethod
|
||||
async def save_metadata(path: str, metadata: Union[BaseModelMetadata, Dict], create_backup: bool = False) -> bool:
|
||||
async def save_metadata(path: str, metadata: Union[BaseModelMetadata, Dict]) -> bool:
|
||||
"""
|
||||
Save metadata with atomic write operations and backup creation.
|
||||
Save metadata with atomic write operations.
|
||||
|
||||
Args:
|
||||
path: Path to the model file or directly to the metadata file
|
||||
metadata: Metadata to save (either BaseModelMetadata object or dict)
|
||||
create_backup: Whether to create a new backup of existing file if a backup doesn't already exist
|
||||
|
||||
Returns:
|
||||
bool: Success or failure
|
||||
@@ -112,19 +74,8 @@ class MetadataManager:
|
||||
file_path = path
|
||||
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
|
||||
temp_path = f"{metadata_path}.tmp"
|
||||
backup_path = f"{metadata_path}.bak"
|
||||
|
||||
try:
|
||||
# Create backup if file exists and either:
|
||||
# 1. create_backup is True, OR
|
||||
# 2. backup file doesn't already exist
|
||||
if os.path.exists(metadata_path) and (create_backup or not os.path.exists(backup_path)):
|
||||
try:
|
||||
shutil.copy2(metadata_path, backup_path)
|
||||
logger.debug(f"Created metadata backup at: {backup_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to create metadata backup: {str(e)}")
|
||||
|
||||
# Convert to dict if needed
|
||||
if isinstance(metadata, BaseModelMetadata):
|
||||
metadata_dict = metadata.to_dict()
|
||||
@@ -240,7 +191,7 @@ class MetadataManager:
|
||||
# await MetadataManager._enrich_metadata(metadata, real_path)
|
||||
|
||||
# Save the created metadata
|
||||
await MetadataManager.save_metadata(file_path, metadata, create_backup=False)
|
||||
await MetadataManager.save_metadata(file_path, metadata)
|
||||
|
||||
return metadata
|
||||
|
||||
@@ -310,4 +261,4 @@ class MetadataManager:
|
||||
|
||||
# If path attributes were changed, save the metadata back to disk
|
||||
if need_update:
|
||||
await MetadataManager.save_metadata(file_path, metadata, create_backup=False)
|
||||
await MetadataManager.save_metadata(file_path, metadata)
|
||||
|
||||
@@ -83,6 +83,50 @@ class BaseModelMetadata:
|
||||
self.size = os.path.getsize(file_path)
|
||||
self.modified = os.path.getmtime(file_path)
|
||||
self.file_path = file_path.replace(os.sep, '/')
|
||||
# Update file_name when file_path changes
|
||||
self.file_name = os.path.splitext(os.path.basename(file_path))[0]
|
||||
|
||||
@staticmethod
|
||||
def generate_unique_filename(target_dir: str, base_name: str, extension: str, hash_provider: callable = None) -> str:
|
||||
"""Generate a unique filename to avoid conflicts
|
||||
|
||||
Args:
|
||||
target_dir: Target directory path
|
||||
base_name: Base filename without extension
|
||||
extension: File extension including the dot
|
||||
hash_provider: A callable that returns the SHA256 hash when needed
|
||||
|
||||
Returns:
|
||||
str: Unique filename that doesn't conflict with existing files
|
||||
"""
|
||||
original_filename = f"{base_name}{extension}"
|
||||
target_path = os.path.join(target_dir, original_filename)
|
||||
|
||||
# If no conflict, return original filename
|
||||
if not os.path.exists(target_path):
|
||||
return original_filename
|
||||
|
||||
# Only compute hash when needed
|
||||
if hash_provider:
|
||||
sha256_hash = hash_provider()
|
||||
else:
|
||||
sha256_hash = "0000"
|
||||
|
||||
# Generate short hash (first 4 characters of SHA256)
|
||||
short_hash = sha256_hash[:4] if sha256_hash else "0000"
|
||||
|
||||
# Try with short hash suffix
|
||||
unique_filename = f"{base_name}-{short_hash}{extension}"
|
||||
unique_path = os.path.join(target_dir, unique_filename)
|
||||
|
||||
# If still conflicts, add incremental number
|
||||
counter = 1
|
||||
while os.path.exists(unique_path):
|
||||
unique_filename = f"{base_name}-{short_hash}-{counter}{extension}"
|
||||
unique_path = os.path.join(target_dir, unique_filename)
|
||||
counter += 1
|
||||
|
||||
return unique_filename
|
||||
|
||||
@dataclass
|
||||
class LoraMetadata(BaseModelMetadata):
|
||||
|
||||
@@ -7,12 +7,13 @@ from aiohttp import web
|
||||
from .model_utils import determine_base_model
|
||||
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
|
||||
from ..config import config
|
||||
from ..services.civitai_client import CivitaiClient
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..services.downloader import get_downloader
|
||||
from ..utils.exif_utils import ExifUtils
|
||||
from ..utils.metadata_manager import MetadataManager
|
||||
from ..services.download_manager import DownloadManager
|
||||
from ..services.websocket_manager import ws_manager
|
||||
from ..services.metadata_service import get_default_metadata_provider, get_metadata_provider
|
||||
from ..services.settings_manager import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -37,59 +38,73 @@ class ModelRouteUtils:
|
||||
local_metadata['from_civitai'] = False
|
||||
await MetadataManager.save_metadata(metadata_path, local_metadata)
|
||||
|
||||
@staticmethod
|
||||
def is_civitai_api_metadata(meta: dict) -> bool:
|
||||
"""
|
||||
Determine if the given civitai metadata is from the civitai API.
|
||||
Returns True if both 'files' and 'images' exist and are non-empty,
|
||||
and the 'source' is not 'archive_db'.
|
||||
"""
|
||||
if not isinstance(meta, dict):
|
||||
return False
|
||||
files = meta.get('files')
|
||||
images = meta.get('images')
|
||||
source = meta.get('source')
|
||||
return bool(files) and bool(images) and source != 'archive_db'
|
||||
|
||||
@staticmethod
|
||||
async def update_model_metadata(metadata_path: str, local_metadata: Dict,
|
||||
civitai_metadata: Dict, client: CivitaiClient) -> None:
|
||||
civitai_metadata: Dict, metadata_provider=None) -> None:
|
||||
"""Update local metadata with CivitAI data"""
|
||||
# Save existing trainedWords and customImages if they exist
|
||||
existing_civitai = local_metadata.get('civitai') or {} # Use empty dict if None
|
||||
|
||||
# Create a new civitai metadata by updating existing with new
|
||||
merged_civitai = existing_civitai.copy()
|
||||
merged_civitai.update(civitai_metadata)
|
||||
# Check if we should skip the update to avoid overwriting richer data
|
||||
if civitai_metadata.get('source') == 'archive_db' and ModelRouteUtils.is_civitai_api_metadata(existing_civitai):
|
||||
logger.info(f"Skip civitai update for {local_metadata.get('model_name', '')} ({existing_civitai.get('name', '')})")
|
||||
else:
|
||||
# Create a new civitai metadata by updating existing with new
|
||||
merged_civitai = existing_civitai.copy()
|
||||
merged_civitai.update(civitai_metadata)
|
||||
|
||||
# Special handling for trainedWords - ensure we don't lose any existing trained words
|
||||
if 'trainedWords' in existing_civitai:
|
||||
existing_trained_words = existing_civitai.get('trainedWords', [])
|
||||
new_trained_words = civitai_metadata.get('trainedWords', [])
|
||||
# Use a set to combine words without duplicates, then convert back to list
|
||||
merged_trained_words = list(set(existing_trained_words + new_trained_words))
|
||||
merged_civitai['trainedWords'] = merged_trained_words
|
||||
if civitai_metadata.get('source') == 'archive_db':
|
||||
model_name = civitai_metadata.get('model', {}).get('name', '')
|
||||
version_name = civitai_metadata.get('name', '')
|
||||
logger.info(f"Recovered metadata from archive_db for deleted model: {model_name} ({version_name})")
|
||||
|
||||
# Update local metadata with merged civitai data
|
||||
local_metadata['civitai'] = merged_civitai
|
||||
local_metadata['from_civitai'] = True
|
||||
# Special handling for trainedWords - ensure we don't lose any existing trained words
|
||||
if 'trainedWords' in existing_civitai:
|
||||
existing_trained_words = existing_civitai.get('trainedWords', [])
|
||||
new_trained_words = civitai_metadata.get('trainedWords', [])
|
||||
# Use a set to combine words without duplicates, then convert back to list
|
||||
merged_trained_words = list(set(existing_trained_words + new_trained_words))
|
||||
merged_civitai['trainedWords'] = merged_trained_words
|
||||
|
||||
# Update local metadata with merged civitai data
|
||||
local_metadata['civitai'] = merged_civitai
|
||||
local_metadata['from_civitai'] = True
|
||||
|
||||
# Update model name if available
|
||||
if 'model' in civitai_metadata:
|
||||
if civitai_metadata.get('model', {}).get('name'):
|
||||
local_metadata['model_name'] = civitai_metadata['model']['name']
|
||||
|
||||
# Extract model metadata directly from civitai_metadata if available
|
||||
model_metadata = None
|
||||
# Update model-related metadata from civitai_metadata.model
|
||||
if 'model' in civitai_metadata and civitai_metadata['model']:
|
||||
model_data = civitai_metadata['model']
|
||||
|
||||
if 'model' in civitai_metadata and civitai_metadata.get('model'):
|
||||
# Data is already available in the response from get_model_version
|
||||
model_metadata = {
|
||||
'description': civitai_metadata.get('model', {}).get('description', ''),
|
||||
'tags': civitai_metadata.get('model', {}).get('tags', []),
|
||||
'creator': civitai_metadata.get('creator', {})
|
||||
}
|
||||
# Update model name if available and not already set
|
||||
if model_data.get('name'):
|
||||
local_metadata['model_name'] = model_data['name']
|
||||
|
||||
# If we have modelId and don't have enough metadata, fetch additional data
|
||||
if not model_metadata or not model_metadata.get('description'):
|
||||
model_id = civitai_metadata.get('modelId')
|
||||
if model_id:
|
||||
fetched_metadata, _ = await client.get_model_metadata(str(model_id))
|
||||
if fetched_metadata:
|
||||
model_metadata = fetched_metadata
|
||||
# Update modelDescription if missing or empty in local_metadata
|
||||
if not local_metadata.get('modelDescription') and model_data.get('description'):
|
||||
local_metadata['modelDescription'] = model_data['description']
|
||||
|
||||
# Update local metadata with the model information
|
||||
if model_metadata:
|
||||
local_metadata['modelDescription'] = model_metadata.get('description', '')
|
||||
local_metadata['tags'] = model_metadata.get('tags', [])
|
||||
if 'creator' in model_metadata and model_metadata['creator']:
|
||||
local_metadata['civitai']['creator'] = model_metadata['creator']
|
||||
# Update tags if missing or empty in local_metadata
|
||||
if not local_metadata.get('tags') and model_data.get('tags'):
|
||||
local_metadata['tags'] = model_data['tags']
|
||||
|
||||
# Update creator in civitai metadata if missing
|
||||
if model_data.get('creator') and not local_metadata.get('civitai', {}).get('creator'):
|
||||
if 'civitai' not in local_metadata:
|
||||
local_metadata['civitai'] = {}
|
||||
local_metadata['civitai']['creator'] = model_data['creator']
|
||||
|
||||
# Update base model
|
||||
local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
|
||||
@@ -113,22 +128,28 @@ class ModelRouteUtils:
|
||||
preview_path = os.path.join(os.path.dirname(metadata_path), preview_filename)
|
||||
|
||||
if is_video:
|
||||
# Download video as is
|
||||
if await client.download_preview_image(first_preview['url'], preview_path):
|
||||
# Download video as is using downloader
|
||||
downloader = await get_downloader()
|
||||
success, result = await downloader.download_file(
|
||||
first_preview['url'],
|
||||
preview_path,
|
||||
use_auth=False
|
||||
)
|
||||
if success:
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
else:
|
||||
# For images, download and then optimize to WebP
|
||||
temp_path = preview_path + ".temp"
|
||||
if await client.download_preview_image(first_preview['url'], temp_path):
|
||||
# For images, download and then optimize to WebP using downloader
|
||||
downloader = await get_downloader()
|
||||
success, content, headers = await downloader.download_to_memory(
|
||||
first_preview['url'],
|
||||
use_auth=False
|
||||
)
|
||||
if success:
|
||||
try:
|
||||
# Read the downloaded image
|
||||
with open(temp_path, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
# Optimize and convert to WebP
|
||||
optimized_data, _ = ExifUtils.optimize_image(
|
||||
image_data=image_data,
|
||||
image_data=content, # Use downloaded content directly
|
||||
target_width=CARD_PREVIEW_WIDTH,
|
||||
format='webp',
|
||||
quality=85,
|
||||
@@ -143,20 +164,19 @@ class ModelRouteUtils:
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
|
||||
# Remove the temporary file
|
||||
if os.path.exists(temp_path):
|
||||
os.remove(temp_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing preview image: {e}")
|
||||
# If optimization fails, try to use the downloaded image directly
|
||||
if os.path.exists(temp_path):
|
||||
os.rename(temp_path, preview_path)
|
||||
# If optimization fails, save the original content
|
||||
try:
|
||||
with open(preview_path, 'wb') as f:
|
||||
f.write(content)
|
||||
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
|
||||
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
|
||||
except Exception as save_error:
|
||||
logger.error(f"Error saving preview image: {save_error}")
|
||||
|
||||
# Save updated metadata
|
||||
await MetadataManager.save_metadata(metadata_path, local_metadata, True)
|
||||
await MetadataManager.save_metadata(metadata_path, local_metadata)
|
||||
|
||||
@staticmethod
|
||||
async def fetch_and_update_model(
|
||||
@@ -176,7 +196,6 @@ class ModelRouteUtils:
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
# Validate input parameters
|
||||
if not isinstance(model_data, dict):
|
||||
@@ -188,8 +207,15 @@ class ModelRouteUtils:
|
||||
# Check if model metadata exists
|
||||
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Fetch metadata from Civitai
|
||||
civitai_metadata = await client.get_model_by_hash(sha256)
|
||||
if model_data.get('from_civitai') is False:
|
||||
if not settings.get('enable_metadata_archive_db', False):
|
||||
return False
|
||||
# Likely deleted from CivitAI, use archive_db if available
|
||||
metadata_provider = await get_metadata_provider('sqlite')
|
||||
else:
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
civitai_metadata = await metadata_provider.get_model_by_hash(sha256)
|
||||
if not civitai_metadata:
|
||||
# Mark as not from CivitAI if not found
|
||||
local_metadata['from_civitai'] = False
|
||||
@@ -202,7 +228,7 @@ class ModelRouteUtils:
|
||||
metadata_path,
|
||||
local_metadata,
|
||||
civitai_metadata,
|
||||
client
|
||||
metadata_provider
|
||||
)
|
||||
|
||||
# Update cache object directly using safe .get() method
|
||||
@@ -225,17 +251,15 @@ class ModelRouteUtils:
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching CivitAI data: {str(e)}", exc_info=True) # Include stack trace
|
||||
return False
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
@staticmethod
|
||||
def filter_civitai_data(data: Dict) -> Dict:
|
||||
def filter_civitai_data(data: Dict, minimal: bool = False) -> Dict:
|
||||
"""Filter relevant fields from CivitAI data"""
|
||||
if not data:
|
||||
return {}
|
||||
|
||||
fields = [
|
||||
"id", "modelId", "name", "createdAt", "updatedAt",
|
||||
|
||||
fields = ["id", "modelId", "name", "trainedWords"] if minimal else [
|
||||
"id", "modelId", "name", "createdAt", "updatedAt",
|
||||
"publishedAt", "trainedWords", "baseModel", "description",
|
||||
"model", "images", "customImages", "creator"
|
||||
]
|
||||
@@ -359,24 +383,22 @@ class ModelRouteUtils:
|
||||
if not local_metadata or not local_metadata.get('sha256'):
|
||||
return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
|
||||
|
||||
# Create a client for fetching from Civitai
|
||||
client = CivitaiClient()
|
||||
try:
|
||||
# Fetch and update metadata
|
||||
civitai_metadata = await client.get_model_by_hash(local_metadata["sha256"])
|
||||
if not civitai_metadata:
|
||||
await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
|
||||
return web.json_response({"success": False, "error": "Not found on CivitAI"}, status=404)
|
||||
# Get metadata provider and fetch from unified provider
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
|
||||
# Fetch and update metadata
|
||||
civitai_metadata = await metadata_provider.get_model_by_hash(local_metadata["sha256"])
|
||||
if not civitai_metadata:
|
||||
await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
|
||||
return web.json_response({"success": False, "error": "Not found on CivitAI"}, status=404)
|
||||
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
|
||||
|
||||
# Return the updated metadata along with success status
|
||||
return web.json_response({"success": True, "metadata": local_metadata})
|
||||
finally:
|
||||
await client.close()
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, metadata_provider)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
|
||||
|
||||
# Return the updated metadata along with success status
|
||||
return web.json_response({"success": True, "metadata": local_metadata})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
|
||||
@@ -580,16 +602,19 @@ class ModelRouteUtils:
|
||||
})
|
||||
|
||||
# Check which identifier is provided and convert to int
|
||||
try:
|
||||
model_id = int(data.get('model_id'))
|
||||
except (TypeError, ValueError):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': "Invalid model_id: Must be an integer"
|
||||
}, status=400)
|
||||
model_id = None
|
||||
model_version_id = None
|
||||
|
||||
if data.get('model_id'):
|
||||
try:
|
||||
model_id = int(data.get('model_id'))
|
||||
except (TypeError, ValueError):
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': "Invalid model_id: Must be an integer"
|
||||
}, status=400)
|
||||
|
||||
# Convert model_version_id to int if provided
|
||||
model_version_id = None
|
||||
if data.get('model_version_id'):
|
||||
try:
|
||||
model_version_id = int(data.get('model_version_id'))
|
||||
@@ -599,14 +624,15 @@ class ModelRouteUtils:
|
||||
'error': "Invalid model_version_id: Must be an integer"
|
||||
}, status=400)
|
||||
|
||||
# Only model_id is required, model_version_id is optional
|
||||
if not model_id:
|
||||
# At least one identifier is required
|
||||
if not model_id and not model_version_id:
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': "Missing required parameter: Please provide 'model_id'"
|
||||
'error': "Missing required parameter: Please provide either 'model_id' or 'model_version_id'"
|
||||
}, status=400)
|
||||
|
||||
use_default_paths = data.get('use_default_paths', False)
|
||||
source = data.get('source') # Optional source parameter
|
||||
|
||||
# Pass the download_id to download_from_civitai
|
||||
result = await download_manager.download_from_civitai(
|
||||
@@ -616,7 +642,8 @@ class ModelRouteUtils:
|
||||
relative_path=data.get('relative_path', ''),
|
||||
use_default_paths=use_default_paths,
|
||||
progress_callback=progress_callback,
|
||||
download_id=download_id # Pass download_id explicitly
|
||||
download_id=download_id, # Pass download_id explicitly
|
||||
source=source # Pass source parameter
|
||||
)
|
||||
|
||||
# Include download_id in the response
|
||||
@@ -625,15 +652,6 @@ class ModelRouteUtils:
|
||||
if not result.get('success', False):
|
||||
error_message = result.get('error', 'Unknown error')
|
||||
|
||||
# Return 401 for early access errors
|
||||
if 'early access' in error_message.lower():
|
||||
logger.warning(f"Early access download failed: {error_message}")
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': f"Early Access Restriction: {error_message}",
|
||||
'download_id': download_id
|
||||
}, status=401)
|
||||
|
||||
return web.json_response({
|
||||
'success': False,
|
||||
'error': error_message,
|
||||
@@ -783,43 +801,38 @@ class ModelRouteUtils:
|
||||
# Check if model metadata exists
|
||||
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Create a client for fetching from Civitai
|
||||
client = await CivitaiClient.get_instance()
|
||||
try:
|
||||
# Fetch metadata using get_model_version which includes more comprehensive data
|
||||
civitai_metadata = await client.get_model_version(model_id, model_version_id)
|
||||
if not civitai_metadata:
|
||||
error_msg = f"Model version not found on CivitAI for ID: {model_id}"
|
||||
if model_version_id:
|
||||
error_msg += f" with version: {model_version_id}"
|
||||
return web.json_response({"success": False, "error": error_msg}, status=404)
|
||||
|
||||
# Try to find the primary model file to get the SHA256 hash
|
||||
primary_model_file = None
|
||||
for file in civitai_metadata.get('files', []):
|
||||
if file.get('primary', False) and file.get('type') == 'Model':
|
||||
primary_model_file = file
|
||||
break
|
||||
|
||||
# Update the SHA256 hash in local metadata if available
|
||||
if primary_model_file and primary_model_file.get('hashes', {}).get('SHA256'):
|
||||
local_metadata['sha256'] = primary_model_file['hashes']['SHA256'].lower()
|
||||
|
||||
# Update metadata with CivitAI information
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, local_metadata)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"message": f"Model successfully re-linked to Civitai model {model_id}" +
|
||||
(f" version {model_version_id}" if model_version_id else ""),
|
||||
"hash": local_metadata.get('sha256', '')
|
||||
})
|
||||
|
||||
finally:
|
||||
await client.close()
|
||||
# Get metadata provider and fetch metadata using get_model_version which includes more comprehensive data
|
||||
metadata_provider = await get_default_metadata_provider()
|
||||
civitai_metadata = await metadata_provider.get_model_version(model_id, model_version_id)
|
||||
if not civitai_metadata:
|
||||
error_msg = f"Model version not found on CivitAI for ID: {model_id}"
|
||||
if model_version_id:
|
||||
error_msg += f" with version: {model_version_id}"
|
||||
return web.json_response({"success": False, "error": error_msg}, status=404)
|
||||
|
||||
# Try to find the primary model file to get the SHA256 hash
|
||||
primary_model_file = None
|
||||
for file in civitai_metadata.get('files', []):
|
||||
if file.get('primary', False) and file.get('type') == 'Model':
|
||||
primary_model_file = file
|
||||
break
|
||||
|
||||
# Update the SHA256 hash in local metadata if available
|
||||
if primary_model_file and primary_model_file.get('hashes', {}).get('SHA256'):
|
||||
local_metadata['sha256'] = primary_model_file['hashes']['SHA256'].lower()
|
||||
|
||||
# Update metadata with CivitAI information
|
||||
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, metadata_provider)
|
||||
|
||||
# Update the cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, local_metadata)
|
||||
|
||||
return web.json_response({
|
||||
"success": True,
|
||||
"message": f"Model successfully re-linked to Civitai model {model_id}" +
|
||||
(f" version {model_version_id}" if model_version_id else ""),
|
||||
"hash": local_metadata.get('sha256', '')
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error re-linking to CivitAI: {e}", exc_info=True)
|
||||
@@ -876,11 +889,11 @@ class ModelRouteUtils:
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Compare hashes
|
||||
stored_hash = metadata.get('sha256', '').lower()
|
||||
stored_hash = metadata.get('sha256', '').lower();
|
||||
|
||||
# Set expected hash from first file if not yet set
|
||||
if not expected_hash:
|
||||
expected_hash = stored_hash
|
||||
expected_hash = stored_hash;
|
||||
|
||||
# Check if hash matches expected hash
|
||||
if actual_hash != expected_hash:
|
||||
@@ -984,10 +997,11 @@ class ModelRouteUtils:
|
||||
if os.path.exists(metadata_path):
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
hash_value = metadata.get('sha256')
|
||||
|
||||
logger.info(f"hash_value: {hash_value}, metadata_path: {metadata_path}, metadata: {metadata}")
|
||||
# Rename all files
|
||||
renamed_files = []
|
||||
new_metadata_path = None
|
||||
new_preview = None
|
||||
|
||||
for old_path, pattern in existing_files:
|
||||
# Get the file extension like .safetensors or .metadata.json
|
||||
@@ -1098,3 +1112,63 @@ class ModelRouteUtils:
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving metadata: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
@staticmethod
|
||||
async def handle_add_tags(request: web.Request, scanner) -> web.Response:
|
||||
"""Handle adding tags to model metadata
|
||||
|
||||
Args:
|
||||
request: The aiohttp request
|
||||
scanner: The model scanner instance
|
||||
|
||||
Returns:
|
||||
web.Response: The HTTP response
|
||||
"""
|
||||
try:
|
||||
data = await request.json()
|
||||
file_path = data.get('file_path')
|
||||
new_tags = data.get('tags', [])
|
||||
|
||||
if not file_path:
|
||||
return web.Response(text='File path is required', status=400)
|
||||
|
||||
if not isinstance(new_tags, list):
|
||||
return web.Response(text='Tags must be a list', status=400)
|
||||
|
||||
# Get metadata file path
|
||||
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
|
||||
|
||||
# Load existing metadata
|
||||
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
|
||||
|
||||
# Get existing tags (case insensitive)
|
||||
existing_tags = metadata.get('tags', [])
|
||||
existing_tags_lower = [tag.lower() for tag in existing_tags]
|
||||
|
||||
# Add new tags that don't already exist (case insensitive check)
|
||||
tags_added = []
|
||||
for tag in new_tags:
|
||||
if isinstance(tag, str) and tag.strip():
|
||||
tag_stripped = tag.strip()
|
||||
if tag_stripped.lower() not in existing_tags_lower:
|
||||
existing_tags.append(tag_stripped)
|
||||
existing_tags_lower.append(tag_stripped.lower())
|
||||
tags_added.append(tag_stripped)
|
||||
|
||||
# Update metadata with combined tags
|
||||
metadata['tags'] = existing_tags
|
||||
|
||||
# Save updated metadata
|
||||
await MetadataManager.save_metadata(file_path, metadata)
|
||||
|
||||
# Update cache
|
||||
await scanner.update_single_model_cache(file_path, file_path, metadata)
|
||||
|
||||
return web.json_response({
|
||||
'success': True,
|
||||
'tags': existing_tags
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding tags: {e}", exc_info=True)
|
||||
return web.Response(text=str(e), status=500)
|
||||
|
||||
@@ -62,13 +62,14 @@ class UsageStats:
|
||||
self._bg_task = asyncio.create_task(self._background_processor())
|
||||
|
||||
self._initialized = True
|
||||
logger.info("Usage statistics tracker initialized")
|
||||
logger.debug("Usage statistics tracker initialized")
|
||||
|
||||
def _get_stats_file_path(self) -> str:
|
||||
"""Get the path to the stats JSON file"""
|
||||
if not config.loras_roots or len(config.loras_roots) == 0:
|
||||
# Fallback to temporary directory if no lora roots
|
||||
return os.path.join(config.temp_directory, self.STATS_FILENAME)
|
||||
# If no lora roots are available, we can't save stats
|
||||
# This will be handled by the caller
|
||||
raise RuntimeError("No LoRA root directories configured. Cannot initialize usage statistics.")
|
||||
|
||||
# Use the first lora root
|
||||
return os.path.join(config.loras_roots[0], self.STATS_FILENAME)
|
||||
@@ -163,7 +164,7 @@ class UsageStats:
|
||||
if "last_save_time" in loaded_stats:
|
||||
self.stats["last_save_time"] = loaded_stats["last_save_time"]
|
||||
|
||||
logger.info(f"Loaded usage statistics from {self._stats_file_path}")
|
||||
logger.debug(f"Loaded usage statistics from {self._stats_file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading usage statistics: {e}")
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from difflib import SequenceMatcher
|
||||
import requests
|
||||
import tempfile
|
||||
import os
|
||||
from bs4 import BeautifulSoup
|
||||
from typing import Dict
|
||||
from ..services.service_registry import ServiceRegistry
|
||||
from ..config import config
|
||||
from ..services.settings_manager import settings
|
||||
from .constants import CIVITAI_MODEL_TAGS
|
||||
import asyncio
|
||||
|
||||
def get_lora_info(lora_name):
|
||||
@@ -50,82 +50,7 @@ def get_lora_info(lora_name):
|
||||
# No event loop is running, we can use asyncio.run()
|
||||
return asyncio.run(_get_lora_info_async())
|
||||
|
||||
def download_twitter_image(url):
|
||||
"""Download image from a URL containing twitter:image meta tag
|
||||
|
||||
Args:
|
||||
url (str): The URL to download image from
|
||||
|
||||
Returns:
|
||||
str: Path to downloaded temporary image file
|
||||
"""
|
||||
try:
|
||||
# Download page content
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse HTML
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
# Find twitter:image meta tag
|
||||
meta_tag = soup.find('meta', attrs={'property': 'twitter:image'})
|
||||
if not meta_tag:
|
||||
return None
|
||||
|
||||
image_url = meta_tag['content']
|
||||
|
||||
# Download image
|
||||
image_response = requests.get(image_url)
|
||||
image_response.raise_for_status()
|
||||
|
||||
# Save to temp file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
||||
temp_file.write(image_response.content)
|
||||
return temp_file.name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error downloading twitter image: {e}")
|
||||
return None
|
||||
|
||||
def download_civitai_image(url):
|
||||
"""Download image from a URL containing avatar image with specific class and style attributes
|
||||
|
||||
Args:
|
||||
url (str): The URL to download image from
|
||||
|
||||
Returns:
|
||||
str: Path to downloaded temporary image file
|
||||
"""
|
||||
try:
|
||||
# Download page content
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse HTML
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
# Find image with specific class and style attributes
|
||||
image = soup.select_one('img.EdgeImage_image__iH4_q.max-h-full.w-auto.max-w-full')
|
||||
|
||||
if not image or 'src' not in image.attrs:
|
||||
return None
|
||||
|
||||
image_url = image['src']
|
||||
|
||||
# Download image
|
||||
image_response = requests.get(image_url)
|
||||
image_response.raise_for_status()
|
||||
|
||||
# Save to temp file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
||||
temp_file.write(image_response.content)
|
||||
return temp_file.name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error downloading civitai avatar: {e}")
|
||||
return None
|
||||
|
||||
def fuzzy_match(text: str, pattern: str, threshold: float = 0.7) -> bool:
|
||||
def fuzzy_match(text: str, pattern: str, threshold: float = 0.85) -> bool:
|
||||
"""
|
||||
Check if text matches pattern using fuzzy matching.
|
||||
Returns True if similarity ratio is above threshold.
|
||||
@@ -206,3 +131,95 @@ def calculate_recipe_fingerprint(loras):
|
||||
fingerprint = "|".join([f"{hash_value}:{strength}" for hash_value, strength in valid_loras])
|
||||
|
||||
return fingerprint
|
||||
|
||||
def calculate_relative_path_for_model(model_data: Dict, model_type: str = 'lora') -> str:
|
||||
"""Calculate relative path for existing model using template from settings
|
||||
|
||||
Args:
|
||||
model_data: Model data from scanner cache
|
||||
model_type: Type of model ('lora', 'checkpoint', 'embedding')
|
||||
|
||||
Returns:
|
||||
Relative path string (empty string for flat structure)
|
||||
"""
|
||||
# Get path template from settings for specific model type
|
||||
path_template = settings.get_download_path_template(model_type)
|
||||
|
||||
# If template is empty, return empty path (flat structure)
|
||||
if not path_template:
|
||||
return ''
|
||||
|
||||
# Get base model name from model metadata
|
||||
civitai_data = model_data.get('civitai', {})
|
||||
|
||||
# For CivitAI models, prefer civitai data only if 'id' exists; for non-CivitAI models, use model_data directly
|
||||
if civitai_data and civitai_data.get('id') is not None:
|
||||
base_model = model_data.get('base_model', '')
|
||||
# Get author from civitai creator data
|
||||
creator_info = civitai_data.get('creator') or {}
|
||||
author = creator_info.get('username') or 'Anonymous'
|
||||
else:
|
||||
# Fallback to model_data fields for non-CivitAI models
|
||||
base_model = model_data.get('base_model', '')
|
||||
author = 'Anonymous' # Default for non-CivitAI models
|
||||
|
||||
model_tags = model_data.get('tags', [])
|
||||
|
||||
# Apply mapping if available
|
||||
base_model_mappings = settings.get('base_model_path_mappings', {})
|
||||
mapped_base_model = base_model_mappings.get(base_model, base_model)
|
||||
|
||||
# Find the first Civitai model tag that exists in model_tags
|
||||
first_tag = ''
|
||||
for civitai_tag in CIVITAI_MODEL_TAGS:
|
||||
if civitai_tag in model_tags:
|
||||
first_tag = civitai_tag
|
||||
break
|
||||
|
||||
# If no Civitai model tag found, fallback to first tag
|
||||
if not first_tag and model_tags:
|
||||
first_tag = model_tags[0]
|
||||
|
||||
if not first_tag:
|
||||
first_tag = 'no tags' # Default if no tags available
|
||||
|
||||
# Format the template with available data
|
||||
formatted_path = path_template
|
||||
formatted_path = formatted_path.replace('{base_model}', mapped_base_model)
|
||||
formatted_path = formatted_path.replace('{first_tag}', first_tag)
|
||||
formatted_path = formatted_path.replace('{author}', author)
|
||||
|
||||
return formatted_path
|
||||
|
||||
def remove_empty_dirs(path):
|
||||
"""Recursively remove empty directories starting from the given path.
|
||||
|
||||
Args:
|
||||
path (str): Root directory to start cleaning from
|
||||
|
||||
Returns:
|
||||
int: Number of empty directories removed
|
||||
"""
|
||||
removed_count = 0
|
||||
|
||||
if not os.path.isdir(path):
|
||||
return removed_count
|
||||
|
||||
# List all files in directory
|
||||
files = os.listdir(path)
|
||||
|
||||
# Process all subdirectories first
|
||||
for file in files:
|
||||
full_path = os.path.join(path, file)
|
||||
if os.path.isdir(full_path):
|
||||
removed_count += remove_empty_dirs(full_path)
|
||||
|
||||
# Check if directory is now empty (after processing subdirectories)
|
||||
if not os.listdir(path):
|
||||
try:
|
||||
os.rmdir(path)
|
||||
removed_count += 1
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return removed_count
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
[project]
|
||||
name = "comfyui-lora-manager"
|
||||
description = "Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!"
|
||||
version = "0.8.24"
|
||||
version = "0.9.3"
|
||||
license = {file = "LICENSE"}
|
||||
dependencies = [
|
||||
"aiohttp",
|
||||
"jinja2",
|
||||
"safetensors",
|
||||
"beautifulsoup4",
|
||||
"piexif",
|
||||
"Pillow",
|
||||
"olefile", # for getting rid of warning message
|
||||
"requests",
|
||||
"toml",
|
||||
"natsort",
|
||||
"GitPython"
|
||||
"GitPython",
|
||||
"aiosqlite"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
38
refs/civitai.sql
Normal file
38
refs/civitai.sql
Normal file
@@ -0,0 +1,38 @@
|
||||
CREATE TABLE models (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
username TEXT,
|
||||
data TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
) STRICT;
|
||||
CREATE TABLE model_versions (
|
||||
id INTEGER PRIMARY KEY,
|
||||
model_id INTEGER NOT NULL,
|
||||
position INTEGER NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
base_model TEXT NOT NULL,
|
||||
published_at INTEGER,
|
||||
data TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
) STRICT;
|
||||
CREATE INDEX model_versions_model_id_idx ON model_versions (model_id);
|
||||
CREATE TABLE model_files (
|
||||
id INTEGER PRIMARY KEY,
|
||||
model_id INTEGER NOT NULL,
|
||||
version_id INTEGER NOT NULL,
|
||||
type TEXT NOT NULL,
|
||||
sha256 TEXT,
|
||||
data TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
) STRICT;
|
||||
CREATE INDEX model_files_model_id_idx ON model_files (model_id);
|
||||
CREATE INDEX model_files_version_id_idx ON model_files (version_id);
|
||||
CREATE TABLE archived_model_files (
|
||||
file_id INTEGER PRIMARY KEY,
|
||||
model_id INTEGER NOT NULL,
|
||||
version_id INTEGER NOT NULL
|
||||
) STRICT;
|
||||
@@ -1,13 +1,12 @@
|
||||
aiohttp
|
||||
jinja2
|
||||
safetensors
|
||||
beautifulsoup4
|
||||
piexif
|
||||
Pillow
|
||||
olefile
|
||||
requests
|
||||
toml
|
||||
numpy
|
||||
natsort
|
||||
pyyaml
|
||||
GitPython
|
||||
aiosqlite
|
||||
beautifulsoup4
|
||||
|
||||
305
scripts/sync_translation_keys.py
Normal file
305
scripts/sync_translation_keys.py
Normal file
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Translation Key Synchronization Script
|
||||
|
||||
This script synchronizes new translation keys from en.json to all other locale files
|
||||
while maintaining exact formatting consistency to pass test_i18n.py validation.
|
||||
|
||||
Features:
|
||||
- Preserves exact line-by-line formatting
|
||||
- Maintains proper indentation and structure
|
||||
- Adds missing keys with placeholder translations
|
||||
- Handles nested objects correctly
|
||||
- Ensures all locale files have identical structure
|
||||
|
||||
Usage:
|
||||
python scripts/sync_translation_keys.py [--dry-run] [--verbose]
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
import argparse
|
||||
from typing import Dict, List, Set, Tuple, Any, Optional
|
||||
from collections import OrderedDict
|
||||
|
||||
# Add the parent directory to the path so we can import modules if needed
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
class TranslationKeySynchronizer:
|
||||
"""Synchronizes translation keys across locale files while maintaining formatting."""
|
||||
|
||||
def __init__(self, locales_dir: str, verbose: bool = False):
|
||||
self.locales_dir = locales_dir
|
||||
self.verbose = verbose
|
||||
self.reference_locale = 'en'
|
||||
self.target_locales = ['zh-CN', 'zh-TW', 'ja', 'ru', 'de', 'fr', 'es', 'ko']
|
||||
|
||||
def log(self, message: str, level: str = 'INFO'):
|
||||
"""Log a message if verbose mode is enabled."""
|
||||
if self.verbose or level == 'ERROR':
|
||||
print(f"[{level}] {message}")
|
||||
|
||||
def load_json_preserve_order(self, file_path: str) -> Tuple[Dict[str, Any], List[str]]:
|
||||
"""
|
||||
Load a JSON file preserving the exact order and formatting.
|
||||
Returns both the parsed data and the original lines.
|
||||
"""
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
content = ''.join(lines)
|
||||
|
||||
# Parse JSON while preserving order
|
||||
data = json.loads(content, object_pairs_hook=OrderedDict)
|
||||
return data, lines
|
||||
|
||||
def get_all_leaf_keys(self, data: Any, prefix: str = '') -> Dict[str, Any]:
|
||||
"""
|
||||
Extract all leaf keys (non-object values) with their full paths.
|
||||
Returns a dictionary mapping full key paths to their values.
|
||||
"""
|
||||
keys = {}
|
||||
|
||||
if isinstance(data, (dict, OrderedDict)):
|
||||
for key, value in data.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, (dict, OrderedDict)):
|
||||
# Recursively get nested keys
|
||||
keys.update(self.get_all_leaf_keys(value, full_key))
|
||||
else:
|
||||
# Leaf node - actual translatable value
|
||||
keys[full_key] = value
|
||||
|
||||
return keys
|
||||
|
||||
def merge_json_structures(self, reference_data: Dict[str, Any], target_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Merge the reference JSON structure with existing target translations.
|
||||
This creates a new structure that matches the reference exactly but preserves
|
||||
existing translations where available. Keys not in reference are removed.
|
||||
"""
|
||||
def merge_recursive(ref_obj, target_obj):
|
||||
if isinstance(ref_obj, (dict, OrderedDict)):
|
||||
result = OrderedDict()
|
||||
# Only include keys that exist in the reference
|
||||
for key, ref_value in ref_obj.items():
|
||||
if key in target_obj and isinstance(target_obj[key], type(ref_value)):
|
||||
# Key exists in target with same type
|
||||
if isinstance(ref_value, (dict, OrderedDict)):
|
||||
# Recursively merge nested objects
|
||||
result[key] = merge_recursive(ref_value, target_obj[key])
|
||||
else:
|
||||
# Use existing translation
|
||||
result[key] = target_obj[key]
|
||||
else:
|
||||
# Key missing in target or type mismatch
|
||||
if isinstance(ref_value, (dict, OrderedDict)):
|
||||
# Recursively handle nested objects
|
||||
result[key] = merge_recursive(ref_value, {})
|
||||
else:
|
||||
# Create placeholder translation
|
||||
result[key] = f"[TODO: Translate] {ref_value}"
|
||||
return result
|
||||
else:
|
||||
# For non-dict values, use reference (this shouldn't happen at root level)
|
||||
return ref_obj
|
||||
|
||||
return merge_recursive(reference_data, target_data)
|
||||
|
||||
def format_json_like_reference(self, data: Dict[str, Any], reference_lines: List[str]) -> List[str]:
|
||||
"""
|
||||
Format the merged JSON data to match the reference file's formatting exactly.
|
||||
"""
|
||||
# Use json.dumps with proper formatting to match the reference style
|
||||
formatted_json = json.dumps(data, indent=4, ensure_ascii=False, separators=(',', ': '))
|
||||
|
||||
# Split into lines and ensure consistent line endings
|
||||
formatted_lines = [line + '\n' for line in formatted_json.split('\n')]
|
||||
|
||||
# Make sure the last line doesn't have extra newlines
|
||||
if formatted_lines and formatted_lines[-1].strip() == '':
|
||||
formatted_lines = formatted_lines[:-1]
|
||||
|
||||
# Ensure the last line ends with just a newline
|
||||
if formatted_lines and not formatted_lines[-1].endswith('\n'):
|
||||
formatted_lines[-1] += '\n'
|
||||
|
||||
return formatted_lines
|
||||
|
||||
def synchronize_locale_simple(self, locale: str, reference_data: Dict[str, Any],
|
||||
reference_lines: List[str], dry_run: bool = False) -> bool:
|
||||
"""
|
||||
Synchronize a locale file using JSON structure merging.
|
||||
Handles both addition of missing keys and removal of obsolete keys.
|
||||
"""
|
||||
locale_file = os.path.join(self.locales_dir, f'{locale}.json')
|
||||
|
||||
if not os.path.exists(locale_file):
|
||||
self.log(f"Locale file {locale_file} does not exist!", 'ERROR')
|
||||
return False
|
||||
|
||||
try:
|
||||
target_data, _ = self.load_json_preserve_order(locale_file)
|
||||
except Exception as e:
|
||||
self.log(f"Error loading {locale_file}: {e}", 'ERROR')
|
||||
return False
|
||||
|
||||
# Get keys to check for differences
|
||||
ref_keys = self.get_all_leaf_keys(reference_data)
|
||||
target_keys = self.get_all_leaf_keys(target_data)
|
||||
missing_keys = set(ref_keys.keys()) - set(target_keys.keys())
|
||||
obsolete_keys = set(target_keys.keys()) - set(ref_keys.keys())
|
||||
|
||||
if not missing_keys and not obsolete_keys:
|
||||
self.log(f"Locale {locale} is already up to date")
|
||||
return False
|
||||
|
||||
# Report changes
|
||||
if missing_keys:
|
||||
self.log(f"Found {len(missing_keys)} missing keys in {locale}:")
|
||||
for key in sorted(missing_keys):
|
||||
self.log(f" + {key}")
|
||||
|
||||
if obsolete_keys:
|
||||
self.log(f"Found {len(obsolete_keys)} obsolete keys in {locale}:")
|
||||
for key in sorted(obsolete_keys):
|
||||
self.log(f" - {key}")
|
||||
|
||||
if dry_run:
|
||||
total_changes = len(missing_keys) + len(obsolete_keys)
|
||||
self.log(f"DRY RUN: Would update {locale} with {len(missing_keys)} additions and {len(obsolete_keys)} deletions ({total_changes} total changes)")
|
||||
return True
|
||||
|
||||
# Merge the structures (this will both add missing keys and remove obsolete ones)
|
||||
try:
|
||||
merged_data = self.merge_json_structures(reference_data, target_data)
|
||||
|
||||
# Format to match reference style
|
||||
new_lines = self.format_json_like_reference(merged_data, reference_lines)
|
||||
|
||||
# Validate that the result is valid JSON
|
||||
reconstructed_content = ''.join(new_lines)
|
||||
json.loads(reconstructed_content) # This will raise an exception if invalid
|
||||
|
||||
# Write the updated file
|
||||
with open(locale_file, 'w', encoding='utf-8') as f:
|
||||
f.writelines(new_lines)
|
||||
|
||||
total_changes = len(missing_keys) + len(obsolete_keys)
|
||||
self.log(f"Successfully updated {locale} with {len(missing_keys)} additions and {len(obsolete_keys)} deletions ({total_changes} total changes)")
|
||||
return True
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
self.log(f"Generated invalid JSON for {locale}: {e}", 'ERROR')
|
||||
return False
|
||||
except Exception as e:
|
||||
self.log(f"Error updating {locale_file}: {e}", 'ERROR')
|
||||
return False
|
||||
|
||||
def synchronize_all(self, dry_run: bool = False) -> bool:
|
||||
"""
|
||||
Synchronize all locale files with the reference.
|
||||
Returns True if all operations were successful.
|
||||
"""
|
||||
# Load reference file
|
||||
reference_file = os.path.join(self.locales_dir, f'{self.reference_locale}.json')
|
||||
|
||||
if not os.path.exists(reference_file):
|
||||
self.log(f"Reference file {reference_file} does not exist!", 'ERROR')
|
||||
return False
|
||||
|
||||
try:
|
||||
reference_data, reference_lines = self.load_json_preserve_order(reference_file)
|
||||
reference_keys = self.get_all_leaf_keys(reference_data)
|
||||
except Exception as e:
|
||||
self.log(f"Error loading reference file: {e}", 'ERROR')
|
||||
return False
|
||||
|
||||
self.log(f"Loaded reference file with {len(reference_keys)} keys")
|
||||
|
||||
success = True
|
||||
changes_made = False
|
||||
|
||||
# Synchronize each target locale
|
||||
for locale in self.target_locales:
|
||||
try:
|
||||
if self.synchronize_locale_simple(locale, reference_data, reference_lines, dry_run):
|
||||
changes_made = True
|
||||
except Exception as e:
|
||||
self.log(f"Error synchronizing {locale}: {e}", 'ERROR')
|
||||
success = False
|
||||
|
||||
if changes_made:
|
||||
self.log("Synchronization completed with changes")
|
||||
else:
|
||||
self.log("All locale files are already up to date")
|
||||
|
||||
return success
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Synchronize translation keys from en.json to all other locale files'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
help='Show what would be changed without making actual changes'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
action='store_true',
|
||||
help='Enable verbose output'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--locales-dir',
|
||||
default=None,
|
||||
help='Path to locales directory (default: auto-detect from script location)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine locales directory
|
||||
if args.locales_dir:
|
||||
locales_dir = args.locales_dir
|
||||
else:
|
||||
# Auto-detect based on script location
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
locales_dir = os.path.join(os.path.dirname(script_dir), 'locales')
|
||||
|
||||
if not os.path.exists(locales_dir):
|
||||
print(f"ERROR: Locales directory not found: {locales_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Translation Key Synchronization")
|
||||
print(f"Locales directory: {locales_dir}")
|
||||
print(f"Mode: {'DRY RUN' if args.dry_run else 'LIVE UPDATE'}")
|
||||
print("-" * 50)
|
||||
|
||||
# Create synchronizer and run
|
||||
synchronizer = TranslationKeySynchronizer(locales_dir, args.verbose)
|
||||
|
||||
try:
|
||||
success = synchronizer.synchronize_all(args.dry_run)
|
||||
|
||||
if success:
|
||||
print("\n✅ Synchronization completed successfully!")
|
||||
if not args.dry_run:
|
||||
print("💡 Run 'python test_i18n.py' to verify formatting consistency")
|
||||
else:
|
||||
print("\n❌ Synchronization completed with errors!")
|
||||
sys.exit(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n⚠️ Operation cancelled by user")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"\n❌ Unexpected error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,6 +1,5 @@
|
||||
{
|
||||
"civitai_api_key": "your_civitai_api_key_here",
|
||||
"show_only_sfw": false,
|
||||
"folder_paths": {
|
||||
"loras": [
|
||||
"C:/path/to/your/loras_folder",
|
||||
@@ -9,6 +8,10 @@
|
||||
"checkpoints": [
|
||||
"C:/path/to/your/checkpoints_folder",
|
||||
"C:/path/to/another/checkpoints_folder"
|
||||
],
|
||||
"embeddings": [
|
||||
"C:/path/to/your/embeddings_folder",
|
||||
"C:/path/to/another/embeddings_folder"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
from py.middleware.cache_middleware import cache_control
|
||||
|
||||
# Create mock modules for py/nodes directory - add this before any other imports
|
||||
def mock_nodes_directory():
|
||||
@@ -129,7 +130,7 @@ class StandaloneServer:
|
||||
"""Server implementation for standalone mode"""
|
||||
|
||||
def __init__(self):
|
||||
self.app = web.Application(logger=logger)
|
||||
self.app = web.Application(logger=logger, middlewares=[cache_control])
|
||||
self.instance = self # Make it compatible with PromptServer.instance pattern
|
||||
|
||||
# Ensure the app's access logger is configured to reduce verbosity
|
||||
@@ -213,6 +214,54 @@ class StandaloneServer:
|
||||
# After all mocks are in place, import LoraManager
|
||||
from py.lora_manager import LoraManager
|
||||
|
||||
def validate_settings():
|
||||
"""Validate that settings.json exists and has required configuration"""
|
||||
settings_path = os.path.join(os.path.dirname(__file__), 'settings.json')
|
||||
if not os.path.exists(settings_path):
|
||||
logger.error("=" * 80)
|
||||
logger.error("CONFIGURATION ERROR: settings.json file not found!")
|
||||
logger.error("")
|
||||
logger.error("To run in standalone mode, you need to create a settings.json file.")
|
||||
logger.error("Please follow these steps:")
|
||||
logger.error("")
|
||||
logger.error("1. Copy the provided settings.json.example file to create a new file")
|
||||
logger.error(" named settings.json in the comfyui-lora-manager folder")
|
||||
logger.error("")
|
||||
logger.error("2. Edit settings.json to include your correct model folder paths")
|
||||
logger.error(" and CivitAI API key")
|
||||
logger.error("=" * 80)
|
||||
return False
|
||||
|
||||
# Check if settings.json has valid folder paths
|
||||
try:
|
||||
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||
settings = json.load(f)
|
||||
|
||||
folder_paths = settings.get('folder_paths', {})
|
||||
has_valid_paths = False
|
||||
|
||||
for path_type in ['loras', 'checkpoints', 'embeddings']:
|
||||
paths = folder_paths.get(path_type, [])
|
||||
if paths and any(os.path.exists(p) for p in paths):
|
||||
has_valid_paths = True
|
||||
break
|
||||
|
||||
if not has_valid_paths:
|
||||
logger.warning("=" * 80)
|
||||
logger.warning("CONFIGURATION WARNING: No valid model folder paths found!")
|
||||
logger.warning("")
|
||||
logger.warning("Your settings.json exists but doesn't contain valid folder paths.")
|
||||
logger.warning("Please check and update the folder_paths section in settings.json")
|
||||
logger.warning("to include existing directories for your models.")
|
||||
logger.warning("=" * 80)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error reading settings.json: {e}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
class StandaloneLoraManager(LoraManager):
|
||||
"""Extended LoraManager for standalone mode"""
|
||||
|
||||
@@ -339,6 +388,11 @@ class StandaloneLoraManager(LoraManager):
|
||||
logger.warning(f"Failed to add static route on initialization for {target_path}: {e}")
|
||||
continue
|
||||
|
||||
# Add static route for locales JSON files
|
||||
if os.path.exists(config.i18n_path):
|
||||
app.router.add_static('/locales', config.i18n_path)
|
||||
logger.info(f"Added static route for locales: /locales -> {config.i18n_path}")
|
||||
|
||||
# Add static route for plugin assets
|
||||
app.router.add_static('/loras_static', config.static_path)
|
||||
|
||||
@@ -400,6 +454,12 @@ async def main():
|
||||
# Set log level
|
||||
logging.getLogger().setLevel(getattr(logging, args.log_level))
|
||||
|
||||
# Validate settings before proceeding
|
||||
if not validate_settings():
|
||||
logger.error("Cannot start server due to configuration issues.")
|
||||
logger.error("Please fix the settings.json file and try again.")
|
||||
return
|
||||
|
||||
# Create the server instance
|
||||
server = StandaloneServer()
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ html, body {
|
||||
|
||||
/* Composed Colors */
|
||||
--lora-accent: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h));
|
||||
--lora-surface: oklch(100% 0 0 / 0.98);
|
||||
--lora-surface: oklch(97% 0 0 / 0.95);
|
||||
--lora-border: oklch(90% 0.02 256 / 0.15);
|
||||
--lora-text: oklch(95% 0.02 256);
|
||||
--lora-error: oklch(75% 0.32 29);
|
||||
|
||||
@@ -1,77 +1,3 @@
|
||||
/* Bulk Operations Styles */
|
||||
.bulk-operations-panel {
|
||||
position: fixed;
|
||||
bottom: 20px;
|
||||
left: 50%;
|
||||
transform: translateY(100px) translateX(-50%);
|
||||
background: var(--card-bg);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-base);
|
||||
padding: 12px 16px;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
||||
z-index: var(--z-overlay);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
min-width: 300px;
|
||||
transition: all 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275);
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.bulk-operations-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 12px;
|
||||
gap: 20px; /* Increase space between count and buttons */
|
||||
}
|
||||
|
||||
#selectedCount {
|
||||
font-weight: 500;
|
||||
background: var(--bg-color);
|
||||
padding: 6px 12px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
min-width: 80px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.bulk-operations-actions {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.bulk-operations-actions button {
|
||||
padding: 6px 12px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
border: 1px solid var(--border-color);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.bulk-operations-actions button:hover {
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Danger button style - updated to use proper theme variables */
|
||||
.bulk-operations-actions button.danger-btn {
|
||||
background: oklch(70% 0.2 29); /* Light red background that works in both themes */
|
||||
color: oklch(98% 0.01 0); /* Almost white text for good contrast */
|
||||
border-color: var(--lora-error);
|
||||
}
|
||||
|
||||
.bulk-operations-actions button.danger-btn:hover {
|
||||
background: var(--lora-error);
|
||||
color: oklch(100% 0 0); /* Pure white text on hover for maximum contrast */
|
||||
}
|
||||
|
||||
/* Style for selected cards */
|
||||
.model-card.selected {
|
||||
box-shadow: 0 0 0 2px var(--lora-accent);
|
||||
@@ -95,201 +21,61 @@
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
/* Update bulk operations button to match others when active */
|
||||
#bulkOperationsBtn.active {
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.bulk-operations-panel {
|
||||
width: calc(100% - 40px);
|
||||
left: 20px;
|
||||
transform: none;
|
||||
border-radius: var(--border-radius-sm);
|
||||
}
|
||||
|
||||
.bulk-operations-actions {
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
}
|
||||
|
||||
.bulk-operations-panel.visible {
|
||||
transform: translateY(0) translateX(-50%);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* Thumbnail Strip Styles */
|
||||
.selected-thumbnails-strip {
|
||||
/* Marquee selection styles */
|
||||
.marquee-selection {
|
||||
position: fixed;
|
||||
bottom: 80px; /* Position above the bulk operations panel */
|
||||
left: 50%;
|
||||
transform: translateX(-50%) translateY(20px);
|
||||
background: var(--card-bg);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-base);
|
||||
box-shadow: 0 4px 16px rgba(0, 0, 0, 0.15);
|
||||
z-index: calc(var(--z-overlay) - 1); /* Just below the bulk panel z-index */
|
||||
padding: 16px;
|
||||
max-width: 80%;
|
||||
width: auto;
|
||||
transition: all 0.3s ease;
|
||||
opacity: 0;
|
||||
overflow: hidden;
|
||||
border: 2px dashed var(--lora-accent, #007bff);
|
||||
background: rgba(0, 123, 255, 0.1);
|
||||
pointer-events: none;
|
||||
z-index: 9999;
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
.selected-thumbnails-strip.visible {
|
||||
opacity: 1;
|
||||
transform: translateX(-50%) translateY(0);
|
||||
/* Visual feedback when marquee selecting */
|
||||
.marquee-selecting {
|
||||
cursor: crosshair;
|
||||
user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
}
|
||||
|
||||
.thumbnails-container {
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
overflow-x: auto;
|
||||
padding-bottom: 8px; /* Space for scrollbar */
|
||||
/* Prevent text selection during marquee */
|
||||
.marquee-selecting * {
|
||||
user-select: none;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
}
|
||||
|
||||
/* Remove bulk base model modal specific styles - now using shared components */
|
||||
/* Use shared metadata editing styles instead */
|
||||
|
||||
/* Override for bulk base model select to ensure proper width */
|
||||
.bulk-base-model-select {
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.selected-thumbnail {
|
||||
position: relative;
|
||||
width: 80px;
|
||||
min-width: 80px; /* Prevent shrinking */
|
||||
padding: 6px 10px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
overflow: hidden;
|
||||
cursor: pointer;
|
||||
background: var(--bg-color);
|
||||
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
||||
}
|
||||
|
||||
.selected-thumbnail:hover {
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.selected-thumbnail img,
|
||||
.selected-thumbnail video {
|
||||
width: 100%;
|
||||
aspect-ratio: 1 / 1;
|
||||
object-fit: cover;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.thumbnail-name {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
background: rgba(0, 0, 0, 0.6);
|
||||
color: white;
|
||||
font-size: 10px;
|
||||
padding: 3px 5px;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.thumbnail-remove {
|
||||
position: absolute;
|
||||
top: 3px;
|
||||
right: 3px;
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
border-radius: 50%;
|
||||
background: rgba(0, 0, 0, 0.5);
|
||||
color: white;
|
||||
border: none;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
cursor: pointer;
|
||||
font-size: 10px;
|
||||
opacity: 0.7;
|
||||
transition: opacity 0.2s ease, background-color 0.2s ease;
|
||||
}
|
||||
|
||||
.thumbnail-remove:hover {
|
||||
opacity: 1;
|
||||
background: var(--lora-error);
|
||||
}
|
||||
|
||||
.strip-close-btn {
|
||||
position: absolute;
|
||||
top: 5px;
|
||||
right: 5px;
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
background: none;
|
||||
border: none;
|
||||
background-color: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
opacity: 0.7;
|
||||
transition: opacity 0.2s ease;
|
||||
font-size: 0.95em;
|
||||
height: 32px;
|
||||
}
|
||||
|
||||
.strip-close-btn:hover {
|
||||
opacity: 1;
|
||||
.bulk-base-model-select:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
}
|
||||
|
||||
/* Style the selectedCount to indicate it's clickable */
|
||||
.selectable-count {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 5px;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s ease;
|
||||
/* Dark theme support for bulk base model select */
|
||||
[data-theme="dark"] .bulk-base-model-select {
|
||||
background-color: rgba(30, 30, 30, 0.9);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.selectable-count:hover {
|
||||
background: var(--lora-border);
|
||||
}
|
||||
|
||||
.dropdown-caret {
|
||||
font-size: 12px;
|
||||
visibility: hidden; /* Will be shown via JS when items are selected */
|
||||
}
|
||||
|
||||
/* Scrollbar styling for the thumbnails container */
|
||||
.thumbnails-container::-webkit-scrollbar {
|
||||
height: 6px;
|
||||
}
|
||||
|
||||
.thumbnails-container::-webkit-scrollbar-track {
|
||||
background: var(--bg-color);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.thumbnails-container::-webkit-scrollbar-thumb {
|
||||
background: var(--border-color);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.thumbnails-container::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Mobile optimizations */
|
||||
@media (max-width: 768px) {
|
||||
.selected-thumbnails-strip {
|
||||
width: calc(100% - 40px);
|
||||
max-width: none;
|
||||
left: 20px;
|
||||
transform: translateY(20px);
|
||||
border-radius: var(--border-radius-sm);
|
||||
}
|
||||
|
||||
.selected-thumbnails-strip.visible {
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
.selected-thumbnail {
|
||||
width: 70px;
|
||||
min-width: 70px;
|
||||
}
|
||||
[data-theme="dark"] .bulk-base-model-select option {
|
||||
background-color: #2d2d2d;
|
||||
color: var(--text-color);
|
||||
}
|
||||
@@ -41,7 +41,7 @@
|
||||
}
|
||||
|
||||
/* Responsive adjustments for 1440p screens (2K) */
|
||||
@media (min-width: 2000px) {
|
||||
@media (min-width: 2150px) {
|
||||
.card-grid {
|
||||
max-width: 1800px; /* Increased for 2K screens */
|
||||
grid-template-columns: repeat(auto-fill, minmax(270px, 1fr));
|
||||
@@ -525,7 +525,7 @@
|
||||
}
|
||||
|
||||
/* For larger screens, allow more space for the cards */
|
||||
@media (min-width: 2000px) {
|
||||
@media (min-width: 2150px) {
|
||||
.card-grid.virtual-scroll {
|
||||
max-width: 1800px;
|
||||
}
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
/* Download Modal Styles */
|
||||
.download-step {
|
||||
margin: var(--space-2) 0;
|
||||
}
|
||||
|
||||
.input-group {
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
.input-group label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.input-group input,
|
||||
.input-group select {
|
||||
width: 100%;
|
||||
padding: 8px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Version List Styles */
|
||||
.version-list {
|
||||
max-height: 400px;
|
||||
overflow-y: auto;
|
||||
margin: var(--space-2) 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
padding: 1px;
|
||||
}
|
||||
|
||||
.version-item {
|
||||
display: flex;
|
||||
gap: var(--space-2);
|
||||
padding: var(--space-2);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-sm);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
background: var(--bg-color);
|
||||
margin: 1px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.version-item:hover {
|
||||
border-color: var(--lora-accent);
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.version-item.selected {
|
||||
border: 2px solid var(--lora-accent);
|
||||
background: oklch(var(--lora-accent) / 0.05);
|
||||
}
|
||||
|
||||
.version-thumbnail {
|
||||
width: 80px;
|
||||
height: 80px;
|
||||
flex-shrink: 0;
|
||||
border-radius: var(--border-radius-xs);
|
||||
overflow: hidden;
|
||||
background: var(--bg-color);
|
||||
}
|
||||
|
||||
.version-thumbnail img {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
object-fit: cover;
|
||||
}
|
||||
|
||||
.version-content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.version-header {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.version-content h3 {
|
||||
margin: 0;
|
||||
font-size: 1.1em;
|
||||
color: var(--text-color);
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.version-content .version-info {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
flex-direction: row !important;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.version-content .version-info .base-model {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
padding: 2px 8px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
}
|
||||
|
||||
.version-meta {
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
font-size: 0.85em;
|
||||
color: var(--text-color);
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.version-meta span {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
/* Folder Browser Styles */
|
||||
.folder-browser {
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
padding: var(--space-1);
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.folder-item {
|
||||
padding: 8px;
|
||||
cursor: pointer;
|
||||
border-radius: var(--border-radius-xs);
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.folder-item:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.folder-item.selected {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
border: 1px solid var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Path Preview Styles */
|
||||
.path-preview {
|
||||
margin-bottom: var(--space-3);
|
||||
padding: var(--space-2);
|
||||
background: var(--bg-color);
|
||||
border-radius: var(--border-radius-sm);
|
||||
border: 1px dashed var(--border-color);
|
||||
}
|
||||
|
||||
.path-preview label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
color: var(--text-color);
|
||||
font-size: 0.9em;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.path-display {
|
||||
padding: var(--space-1);
|
||||
color: var(--text-color);
|
||||
font-family: monospace;
|
||||
font-size: 0.9em;
|
||||
line-height: 1.4;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-all;
|
||||
opacity: 0.85;
|
||||
background: var(--lora-surface);
|
||||
border-radius: var(--border-radius-xs);
|
||||
}
|
||||
|
||||
/* Dark theme adjustments */
|
||||
[data-theme="dark"] .version-item {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .local-path {
|
||||
background: var(--lora-surface);
|
||||
border-color: var(--lora-border);
|
||||
}
|
||||
|
||||
/* Enhance the local badge to make it more noticeable */
|
||||
.version-item.exists-locally {
|
||||
background: oklch(var(--lora-accent) / 0.05);
|
||||
border-left: 4px solid var(--lora-accent);
|
||||
}
|
||||
@@ -27,7 +27,7 @@
|
||||
}
|
||||
|
||||
/* Responsive container for larger screens - match container in layout.css */
|
||||
@media (min-width: 2000px) {
|
||||
@media (min-width: 2150px) {
|
||||
.duplicates-banner .banner-content {
|
||||
max-width: 1800px;
|
||||
}
|
||||
@@ -130,7 +130,7 @@
|
||||
}
|
||||
|
||||
/* Add responsive container adjustments for duplicate groups - match container in banner */
|
||||
@media (min-width: 2000px) {
|
||||
@media (min-width: 2150px) {
|
||||
.duplicate-group {
|
||||
max-width: 1800px;
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
z-index: var(--z-header);
|
||||
height: 48px; /* Reduced height */
|
||||
width: 100%;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1); /* Slightly stronger shadow */
|
||||
}
|
||||
|
||||
.header-container {
|
||||
@@ -19,6 +19,18 @@
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
/* Responsive header container for larger screens */
|
||||
@media (min-width: 2150px) {
|
||||
.header-container {
|
||||
max-width: 1800px;
|
||||
}
|
||||
}
|
||||
@media (min-width: 3000px) {
|
||||
.header-container {
|
||||
max-width: 2400px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Logo and title styling */
|
||||
.header-branding {
|
||||
display: flex;
|
||||
|
||||
@@ -337,72 +337,7 @@
|
||||
margin-left: 8px;
|
||||
}
|
||||
|
||||
/* Location Selection Styles */
|
||||
.location-selection {
|
||||
margin: var(--space-2) 0;
|
||||
padding: var(--space-2);
|
||||
background: var(--lora-surface);
|
||||
border-radius: var(--border-radius-sm);
|
||||
}
|
||||
|
||||
/* Reuse folder browser and path preview styles from download-modal.css */
|
||||
.folder-browser {
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
padding: var(--space-1);
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.folder-item {
|
||||
padding: 8px;
|
||||
cursor: pointer;
|
||||
border-radius: var(--border-radius-xs);
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.folder-item:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.folder-item.selected {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
border: 1px solid var(--lora-accent);
|
||||
}
|
||||
|
||||
.path-preview {
|
||||
margin-bottom: var(--space-3);
|
||||
padding: var(--space-2);
|
||||
background: var(--bg-color);
|
||||
border-radius: var(--border-radius-sm);
|
||||
border: 1px dashed var(--border-color);
|
||||
}
|
||||
|
||||
.path-preview label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
color: var(--text-color);
|
||||
font-size: 0.9em;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.path-display {
|
||||
padding: var(--space-1);
|
||||
color: var(--text-color);
|
||||
font-family: monospace;
|
||||
font-size: 0.9em;
|
||||
line-height: 1.4;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-all;
|
||||
opacity: 0.85;
|
||||
background: var(--lora-surface);
|
||||
border-radius: var(--border-radius-xs);
|
||||
}
|
||||
|
||||
/* Input Group Styles */
|
||||
.input-group {
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
.input-with-button {
|
||||
display: flex;
|
||||
@@ -430,22 +365,6 @@
|
||||
background: oklch(from var(--lora-accent) l c h / 0.9);
|
||||
}
|
||||
|
||||
.input-group label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.input-group input,
|
||||
.input-group select {
|
||||
width: 100%;
|
||||
padding: 8px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Dark theme adjustments */
|
||||
[data-theme="dark"] .lora-item {
|
||||
background: var(--lora-surface);
|
||||
|
||||
@@ -40,10 +40,10 @@
|
||||
border-radius: var(--border-radius-xs);
|
||||
padding: 8px;
|
||||
position: absolute;
|
||||
z-index: 9999; /* 确保在卡片上方显示 */
|
||||
left: 120%; /* 将tooltip显示在图标右侧 */
|
||||
top: 50%; /* 垂直居中 */
|
||||
transform: translateY(-50%); /* 垂直居中 */
|
||||
z-index: 9999; /* Ensure tooltip appears above cards */
|
||||
left: 120%; /* Position tooltip to the right of the icon */
|
||||
top: 50%; /* Vertically center */
|
||||
transform: translateY(-15%); /* Vertically center */
|
||||
opacity: 0;
|
||||
transition: opacity 0.3s;
|
||||
box-shadow: 0 3px 8px rgba(0, 0, 0, 0.15);
|
||||
@@ -55,12 +55,12 @@
|
||||
.tooltip .tooltiptext::after {
|
||||
content: "";
|
||||
position: absolute;
|
||||
top: 50%; /* 箭头垂直居中 */
|
||||
right: 100%; /* 箭头在左侧 */
|
||||
top: 50%; /* Vertically center arrow */
|
||||
right: 100%; /* Arrow on the left side */
|
||||
margin-top: -5px;
|
||||
border-width: 5px;
|
||||
border-style: solid;
|
||||
border-color: transparent var(--lora-border) transparent transparent; /* 箭头指向左侧 */
|
||||
border-color: transparent var(--lora-border) transparent transparent; /* Arrow points left */
|
||||
}
|
||||
|
||||
.tooltip:hover .tooltiptext {
|
||||
|
||||
@@ -67,6 +67,14 @@
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.file-path[data-action="open-file-location"] {
|
||||
cursor: pointer;
|
||||
text-decoration: underline;
|
||||
}
|
||||
.file-path[data-action="open-file-location"]:hover {
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.description-text {
|
||||
line-height: 1.5;
|
||||
max-height: 100px;
|
||||
|
||||
@@ -176,11 +176,6 @@
|
||||
background: linear-gradient(45deg, #4a90e2, #357abd);
|
||||
}
|
||||
|
||||
/* Remove old node-color-indicator styles */
|
||||
.node-color-indicator {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.send-all-item {
|
||||
border-top: 1px solid var(--border-color);
|
||||
font-weight: 500;
|
||||
@@ -217,4 +212,24 @@
|
||||
font-size: 12px;
|
||||
color: var(--text-muted);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
/* Bulk Context Menu Header */
|
||||
.bulk-context-header {
|
||||
padding: 10px 12px;
|
||||
background: var(--card-bg); /* Use card background for subtlety */
|
||||
color: var(--text-color); /* Use standard text color */
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
font-weight: 500;
|
||||
font-size: 14px;
|
||||
border-radius: var(--border-radius-xs) var(--border-radius-xs) 0 0;
|
||||
border-bottom: 1px solid var(--border-color); /* Add subtle separator */
|
||||
}
|
||||
|
||||
.bulk-context-header i {
|
||||
width: 16px;
|
||||
text-align: center;
|
||||
color: var(--lora-accent); /* Accent only the icon for a hint of color */
|
||||
}
|
||||
@@ -23,7 +23,7 @@ body.modal-open {
|
||||
position: relative;
|
||||
max-width: 800px;
|
||||
height: auto;
|
||||
max-height: calc(90vh - 48px); /* Adjust to account for header height */
|
||||
max-height: calc(90vh);
|
||||
margin: 1rem auto; /* Keep reduced top margin */
|
||||
background: var(--lora-surface);
|
||||
border-radius: var(--border-radius-base);
|
||||
@@ -37,6 +37,10 @@ body.modal-open {
|
||||
overflow-x: hidden; /* 防止水平滚动条 */
|
||||
}
|
||||
|
||||
.modal-content-large {
|
||||
min-height: 480px;
|
||||
}
|
||||
|
||||
/* 当 modal 打开时锁定 body */
|
||||
body.modal-open {
|
||||
overflow: hidden !important; /* 覆盖 base.css 中的 scroll */
|
||||
@@ -204,6 +208,14 @@ body.modal-open {
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
button:disabled,
|
||||
.primary-btn:disabled,
|
||||
.danger-btn:disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.restart-required-icon {
|
||||
color: var(--lora-warning);
|
||||
margin-left: 5px;
|
||||
@@ -224,14 +236,76 @@ body.modal-open {
|
||||
background-color: oklch(35% 0.02 256 / 0.98);
|
||||
}
|
||||
|
||||
.primary-btn.disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
/* Danger button styles */
|
||||
.danger-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
padding: 8px 16px;
|
||||
background-color: var(--lora-error);
|
||||
color: white;
|
||||
border: none;
|
||||
border-radius: var(--border-radius-sm);
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.primary-btn.disabled {
|
||||
opacity: 0.5;
|
||||
cursor: not-allowed;
|
||||
.danger-btn:hover {
|
||||
background-color: oklch(from var(--lora-error) l c h / 85%);
|
||||
color: white;
|
||||
}
|
||||
|
||||
/* Metadata archive status styles */
|
||||
.metadata-archive-status {
|
||||
background: rgba(0, 0, 0, 0.03);
|
||||
border: 1px solid rgba(0, 0, 0, 0.1);
|
||||
border-radius: var(--border-radius-sm);
|
||||
padding: var(--space-2);
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .metadata-archive-status {
|
||||
background: rgba(255, 255, 255, 0.03);
|
||||
border: 1px solid var(--lora-border);
|
||||
}
|
||||
|
||||
.archive-status-item {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 8px;
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.archive-status-item:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.archive-status-label {
|
||||
font-weight: 500;
|
||||
color: var(--text-color);
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.archive-status-value {
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.archive-status-value.status-available {
|
||||
color: var(--lora-success, #10b981);
|
||||
}
|
||||
|
||||
.archive-status-value.status-unavailable {
|
||||
color: var(--lora-warning, #f59e0b);
|
||||
}
|
||||
|
||||
.archive-status-value.status-enabled {
|
||||
color: var(--lora-success, #10b981);
|
||||
}
|
||||
|
||||
.archive-status-value.status-disabled {
|
||||
color: var(--lora-error, #ef4444);
|
||||
}
|
||||
|
||||
/* Add styles for delete preview image */
|
||||
|
||||
505
static/css/components/modal/download-modal.css
Normal file
505
static/css/components/modal/download-modal.css
Normal file
@@ -0,0 +1,505 @@
|
||||
/* Download Modal Styles */
|
||||
.input-group {
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
.input-group label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.input-group input,
|
||||
.input-group select {
|
||||
width: 100%;
|
||||
padding: 8px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Version List Styles */
|
||||
.version-list {
|
||||
max-height: 400px;
|
||||
overflow-y: auto;
|
||||
margin: var(--space-2) 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
padding: 1px;
|
||||
}
|
||||
|
||||
.version-item {
|
||||
display: flex;
|
||||
gap: var(--space-2);
|
||||
padding: var(--space-2);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-sm);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
background: var(--bg-color);
|
||||
margin: 1px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.version-item:hover {
|
||||
border-color: var(--lora-accent);
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.version-item.selected {
|
||||
border: 2px solid var(--lora-accent);
|
||||
background: oklch(var(--lora-accent) / 0.05);
|
||||
}
|
||||
|
||||
.version-thumbnail {
|
||||
width: 80px;
|
||||
height: 80px;
|
||||
flex-shrink: 0;
|
||||
border-radius: var(--border-radius-xs);
|
||||
overflow: hidden;
|
||||
background: var(--bg-color);
|
||||
}
|
||||
|
||||
.version-thumbnail img {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
object-fit: cover;
|
||||
}
|
||||
|
||||
.version-content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
flex: 1;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
.version-header {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
justify-content: space-between;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.version-content h3 {
|
||||
margin: 0;
|
||||
font-size: 1.1em;
|
||||
color: var(--text-color);
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.version-content .version-info {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
flex-direction: row !important;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.version-content .version-info .base-model {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
padding: 2px 8px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
}
|
||||
|
||||
.version-meta {
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
font-size: 0.85em;
|
||||
color: var(--text-color);
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.version-meta span {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.folder-item {
|
||||
padding: 8px;
|
||||
cursor: pointer;
|
||||
border-radius: var(--border-radius-xs);
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.folder-item:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.folder-item.selected {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
border: 1px solid var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Path Input Styles */
|
||||
.path-input-container {
|
||||
position: relative;
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.path-input-container input {
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.create-folder-btn {
|
||||
padding: 8px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 36px;
|
||||
height: 36px;
|
||||
}
|
||||
|
||||
.create-folder-btn:hover {
|
||||
border-color: var(--lora-accent);
|
||||
background: oklch(var(--lora-accent) / 0.05);
|
||||
}
|
||||
|
||||
.path-suggestions {
|
||||
position: absolute;
|
||||
top: 46%;
|
||||
left: 0;
|
||||
right: 0;
|
||||
z-index: 1000;
|
||||
margin: 0 24px;
|
||||
background: var(--bg-color);
|
||||
border: 1px solid var(--border-color);
|
||||
border-top: none;
|
||||
border-radius: 0 0 var(--border-radius-xs) var(--border-radius-xs);
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.path-suggestion {
|
||||
padding: 8px 12px;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.path-suggestion:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
.path-suggestion:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.path-suggestion.active {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Breadcrumb Navigation Styles */
|
||||
.breadcrumb-nav {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
margin-bottom: var(--space-2);
|
||||
padding: var(--space-1);
|
||||
background: var(--lora-surface);
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
overflow-x: auto;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.breadcrumb-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
padding: 4px 8px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
color: var(--text-color);
|
||||
opacity: 0.7;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.breadcrumb-item:hover {
|
||||
background: var(--bg-color);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.breadcrumb-item.active {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.breadcrumb-separator {
|
||||
color: var(--text-color);
|
||||
opacity: 0.5;
|
||||
margin: 0 4px;
|
||||
}
|
||||
|
||||
/* Folder Tree Styles */
|
||||
.folder-tree-container {
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.folder-tree {
|
||||
padding: var(--space-1);
|
||||
}
|
||||
|
||||
.tree-node {
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.tree-node-content {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
padding: 4px 8px;
|
||||
cursor: pointer;
|
||||
border-radius: var(--border-radius-xs);
|
||||
transition: all 0.2s ease;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.tree-node-content:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.tree-node-content.selected {
|
||||
background: oklch(var(--lora-accent) / 0.1);
|
||||
border: 1px solid var(--lora-accent);
|
||||
}
|
||||
|
||||
.tree-expand-icon {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
cursor: pointer;
|
||||
border-radius: 2px;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.tree-expand-icon:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.tree-expand-icon.expanded {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
|
||||
.tree-folder-icon {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.tree-folder-name {
|
||||
flex: 1;
|
||||
font-size: 0.9em;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.tree-children {
|
||||
margin-left: 20px;
|
||||
display: none;
|
||||
}
|
||||
|
||||
.tree-children.expanded {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.tree-node.has-children > .tree-node-content .tree-expand-icon {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.tree-node:not(.has-children) > .tree-node-content .tree-expand-icon {
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
/* Create folder inline form */
|
||||
.create-folder-form {
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
margin-left: 20px;
|
||||
align-items: center;
|
||||
height: 21px;
|
||||
}
|
||||
|
||||
.create-folder-form input {
|
||||
flex: 1;
|
||||
padding: 4px 8px;
|
||||
border: 1px solid var(--lora-accent);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.create-folder-form button {
|
||||
padding: 4px 8px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
font-size: 0.8em;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.create-folder-form button.confirm {
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.create-folder-form button:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
/* Path Preview Styles */
|
||||
.path-preview {
|
||||
margin-bottom: var(--space-3);
|
||||
padding: var(--space-2);
|
||||
background: var(--bg-color);
|
||||
border-radius: var(--border-radius-sm);
|
||||
border: 1px dashed var(--border-color);
|
||||
}
|
||||
|
||||
.path-preview-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
margin-bottom: 12px;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.path-preview-header label {
|
||||
margin: 0;
|
||||
color: var(--text-color);
|
||||
font-size: 0.9em;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.path-display {
|
||||
padding: var(--space-1);
|
||||
color: var(--text-color);
|
||||
font-family: monospace;
|
||||
font-size: 0.9em;
|
||||
line-height: 1.4;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-all;
|
||||
opacity: 0.85;
|
||||
background: var(--lora-surface);
|
||||
border-radius: var(--border-radius-xs);
|
||||
}
|
||||
|
||||
/* Inline Toggle Styles */
|
||||
.inline-toggle-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
cursor: pointer;
|
||||
user-select: none;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.inline-toggle-label {
|
||||
font-size: 0.85em;
|
||||
color: var(--text-color);
|
||||
opacity: 0.9;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.inline-toggle-container .toggle-switch {
|
||||
position: relative;
|
||||
width: 36px;
|
||||
height: 18px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.inline-toggle-container .toggle-switch input {
|
||||
opacity: 0;
|
||||
width: 0;
|
||||
height: 0;
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.inline-toggle-container .toggle-slider {
|
||||
position: absolute;
|
||||
cursor: pointer;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background-color: var(--border-color);
|
||||
transition: all 0.3s ease;
|
||||
border-radius: 18px;
|
||||
}
|
||||
|
||||
.inline-toggle-container .toggle-slider:before {
|
||||
position: absolute;
|
||||
content: "";
|
||||
height: 12px;
|
||||
width: 12px;
|
||||
left: 3px;
|
||||
bottom: 3px;
|
||||
background-color: white;
|
||||
transition: all 0.3s ease;
|
||||
border-radius: 50%;
|
||||
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.inline-toggle-container .toggle-switch input:checked + .toggle-slider {
|
||||
background-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.inline-toggle-container .toggle-switch input:checked + .toggle-slider:before {
|
||||
transform: translateX(18px);
|
||||
}
|
||||
|
||||
/* Dark theme adjustments */
|
||||
[data-theme="dark"] .version-item {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .local-path {
|
||||
background: var(--lora-surface);
|
||||
border-color: var(--lora-border);
|
||||
}
|
||||
|
||||
[data-theme="dark"] .toggle-slider:before {
|
||||
background-color: #f0f0f0;
|
||||
}
|
||||
|
||||
/* Enhance the local badge to make it more noticeable */
|
||||
.version-item.exists-locally {
|
||||
background: oklch(var(--lora-accent) / 0.05);
|
||||
border-left: 4px solid var(--lora-accent);
|
||||
}
|
||||
|
||||
.manual-path-selection.disabled {
|
||||
opacity: 0.5;
|
||||
pointer-events: none;
|
||||
user-select: none;
|
||||
}
|
||||
@@ -101,7 +101,7 @@
|
||||
.api-key-input input {
|
||||
width: 100%;
|
||||
padding: 6px 40px 6px 10px; /* Add left padding */
|
||||
height: 32px;
|
||||
height: 20px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
@@ -123,6 +123,36 @@
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* Text input wrapper styles for consistent input styling */
|
||||
.text-input-wrapper {
|
||||
width: 100%;
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.text-input-wrapper input {
|
||||
width: 100%;
|
||||
padding: 6px 10px;
|
||||
height: 20px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
font-size: 0.95em;
|
||||
}
|
||||
|
||||
.text-input-wrapper input:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
box-shadow: 0 0 0 2px rgba(var(--lora-accent-rgb, 79, 70, 229), 0.1);
|
||||
}
|
||||
|
||||
/* Dark theme specific adjustments */
|
||||
[data-theme="dark"] .text-input-wrapper input {
|
||||
background-color: rgba(30, 30, 30, 0.9);
|
||||
}
|
||||
|
||||
.input-help {
|
||||
font-size: 0.85em;
|
||||
color: var(--text-color);
|
||||
@@ -312,7 +342,7 @@ input:checked + .toggle-slider:before {
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
color: var (--text-color);
|
||||
color: var(--text-color);
|
||||
font-size: 0.95em;
|
||||
height: 32px;
|
||||
}
|
||||
@@ -346,7 +376,7 @@ input:checked + .toggle-slider:before {
|
||||
padding: var(--space-1);
|
||||
margin-top: 8px;
|
||||
font-family: monospace;
|
||||
font-size: 1.1em;
|
||||
font-size: 0.9em;
|
||||
color: var(--lora-accent);
|
||||
display: none;
|
||||
}
|
||||
@@ -482,4 +512,120 @@ input:checked + .toggle-slider:before {
|
||||
[data-theme="dark"] .base-model-select option {
|
||||
background-color: #2d2d2d;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Template Configuration Styles */
|
||||
.placeholder-info {
|
||||
margin-top: var(--space-1);
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
align-items: center;
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
.placeholder-tag {
|
||||
display: inline-block;
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
padding: 2px 6px;
|
||||
border-radius: 3px;
|
||||
font-family: monospace;
|
||||
font-size: 1em;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.template-custom-row {
|
||||
margin-top: 8px;
|
||||
animation: slideDown 0.2s ease-out;
|
||||
}
|
||||
|
||||
@keyframes slideDown {
|
||||
from {
|
||||
opacity: 0;
|
||||
transform: translateY(-10px);
|
||||
}
|
||||
to {
|
||||
opacity: 1;
|
||||
transform: translateY(0);
|
||||
}
|
||||
}
|
||||
|
||||
.template-custom-input {
|
||||
width: 96%;
|
||||
padding: 6px 10px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
border: 1px solid var(--border-color);
|
||||
background-color: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
font-size: 0.95em;
|
||||
font-family: monospace;
|
||||
height: 24px;
|
||||
transition: border-color 0.2s;
|
||||
}
|
||||
|
||||
.template-custom-input:focus {
|
||||
border-color: var(--lora-accent);
|
||||
outline: none;
|
||||
box-shadow: 0 0 0 2px rgba(var(--lora-accent-rgb, 79, 70, 229), 0.1);
|
||||
}
|
||||
|
||||
.template-custom-input::placeholder {
|
||||
color: var(--text-color);
|
||||
opacity: 0.5;
|
||||
font-family: inherit;
|
||||
}
|
||||
|
||||
.template-validation {
|
||||
margin-top: 6px;
|
||||
font-size: 0.85em;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
min-height: 20px;
|
||||
}
|
||||
|
||||
.template-validation.valid {
|
||||
color: var(--lora-success, #22c55e);
|
||||
}
|
||||
|
||||
.template-validation.invalid {
|
||||
color: var(--lora-error, #ef4444);
|
||||
}
|
||||
|
||||
.template-validation i {
|
||||
width: 12px;
|
||||
}
|
||||
|
||||
/* Dark theme specific adjustments */
|
||||
[data-theme="dark"] .template-custom-input {
|
||||
background-color: rgba(30, 30, 30, 0.9);
|
||||
}
|
||||
|
||||
/* Proxy Settings Styles */
|
||||
.proxy-settings-group {
|
||||
margin-left: var(--space-1);
|
||||
padding-left: var(--space-1);
|
||||
border-left: 2px solid var(--lora-border);
|
||||
animation: slideDown 0.3s ease-out;
|
||||
}
|
||||
|
||||
.proxy-settings-group .setting-item {
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
/* Responsive adjustments */
|
||||
@media (max-width: 768px) {
|
||||
.placeholder-info {
|
||||
flex-direction: column;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
.proxy-settings-group {
|
||||
margin-left: 0;
|
||||
padding-left: var(--space-1);
|
||||
border-left: none;
|
||||
border-top: 1px solid var(--lora-border);
|
||||
padding-top: var(--space-2);
|
||||
margin-top: var(--space-2);
|
||||
}
|
||||
}
|
||||
@@ -445,69 +445,6 @@
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Switch styles */
|
||||
.search-option-switch {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 4px 0;
|
||||
}
|
||||
|
||||
.switch {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
width: 46px;
|
||||
height: 24px;
|
||||
}
|
||||
|
||||
.switch input {
|
||||
opacity: 0;
|
||||
width: 0;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
.slider {
|
||||
position: absolute;
|
||||
cursor: pointer;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background-color: #ccc;
|
||||
transition: .4s;
|
||||
}
|
||||
|
||||
.slider:before {
|
||||
position: absolute;
|
||||
content: "";
|
||||
height: 18px;
|
||||
width: 18px;
|
||||
left: 3px;
|
||||
bottom: 3px;
|
||||
background-color: white;
|
||||
transition: .4s;
|
||||
}
|
||||
|
||||
input:checked + .slider {
|
||||
background-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
input:focus + .slider {
|
||||
box-shadow: 0 0 1px var(--lora-accent);
|
||||
}
|
||||
|
||||
input:checked + .slider:before {
|
||||
transform: translateX(22px);
|
||||
}
|
||||
|
||||
.slider.round {
|
||||
border-radius: 34px;
|
||||
}
|
||||
|
||||
.slider.round:before {
|
||||
border-radius: 50%;
|
||||
}
|
||||
|
||||
/* Mobile adjustments */
|
||||
@media (max-width: 768px) {
|
||||
.search-options-panel,
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
align-items: flex-start;
|
||||
margin-bottom: var(--space-2);
|
||||
width: 100%;
|
||||
min-height: 30px; /* Ensure some height even if empty to prevent layout shifts */
|
||||
}
|
||||
|
||||
/* Individual Item */
|
||||
@@ -153,17 +154,42 @@
|
||||
}
|
||||
|
||||
.metadata-save-btn,
|
||||
.save-tags-btn {
|
||||
.save-tags-btn,
|
||||
.append-tags-btn,
|
||||
.replace-tags-btn {
|
||||
background: var(--lora-accent) !important;
|
||||
color: white !important;
|
||||
border-color: var(--lora-accent) !important;
|
||||
}
|
||||
|
||||
.metadata-save-btn:hover,
|
||||
.save-tags-btn:hover {
|
||||
.save-tags-btn:hover,
|
||||
.append-tags-btn:hover,
|
||||
.replace-tags-btn:hover {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
/* Specific styling for bulk tag action buttons */
|
||||
.bulk-append-tags-btn {
|
||||
background: var(--lora-accent) !important;
|
||||
color: white !important;
|
||||
border-color: var(--lora-accent) !important;
|
||||
}
|
||||
|
||||
.bulk-replace-tags-btn {
|
||||
background: var(--lora-warning, #f59e0b) !important;
|
||||
color: white !important;
|
||||
border-color: var(--lora-warning, #f59e0b) !important;
|
||||
}
|
||||
|
||||
.bulk-append-tags-btn:hover {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.bulk-replace-tags-btn:hover {
|
||||
background: var(--lora-warning-dark, #d97706) !important;
|
||||
}
|
||||
|
||||
/* Add Form */
|
||||
.metadata-add-form {
|
||||
display: flex;
|
||||
|
||||
554
static/css/components/sidebar.css
Normal file
554
static/css/components/sidebar.css
Normal file
@@ -0,0 +1,554 @@
|
||||
.folder-sidebar {
|
||||
position: fixed;
|
||||
top: 68px; /* Below header */
|
||||
left: 0px;
|
||||
width: 230px;
|
||||
height: calc(100vh - 88px);
|
||||
background: var(--bg-color);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
overflow: hidden;
|
||||
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
flex-shrink: 0;
|
||||
z-index: var(--z-overlay);
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.08);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
backdrop-filter: blur(8px);
|
||||
/* Default state: hidden off-screen */
|
||||
transform: translateX(-100%);
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
/* Visible state */
|
||||
.folder-sidebar.visible {
|
||||
transform: translateX(0);
|
||||
opacity: 1;
|
||||
pointer-events: all;
|
||||
}
|
||||
|
||||
/* Auto-hide states */
|
||||
.folder-sidebar.auto-hide {
|
||||
transform: translateX(-100%);
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.folder-sidebar.auto-hide.hover-active {
|
||||
transform: translateX(0);
|
||||
opacity: 1;
|
||||
pointer-events: all;
|
||||
}
|
||||
|
||||
.folder-sidebar.collapsed {
|
||||
transform: translateX(-100%);
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
/* Hover detection area for auto-hide */
|
||||
.sidebar-hover-area {
|
||||
position: fixed;
|
||||
top: 68px;
|
||||
left: 0;
|
||||
width: 20px;
|
||||
height: calc(100vh - 88px);
|
||||
z-index: calc(var(--z-overlay) - 1);
|
||||
background: transparent;
|
||||
pointer-events: all;
|
||||
}
|
||||
|
||||
.sidebar-hover-area.disabled {
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.sidebar-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 12px 16px;
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
font-weight: 500;
|
||||
font-size: 0.9em;
|
||||
flex-shrink: 0;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.sidebar-header:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.sidebar-header.root-selected {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.sidebar-header h3 {
|
||||
margin: 0;
|
||||
font-size: 0.9em;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
font-weight: 500;
|
||||
flex: 1;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.sidebar-header-actions {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.sidebar-action-btn {
|
||||
background: none;
|
||||
border: none;
|
||||
color: var(--text-muted);
|
||||
cursor: pointer;
|
||||
padding: 4px;
|
||||
border-radius: 4px;
|
||||
opacity: 0.6;
|
||||
transition: all 0.2s ease;
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.sidebar-action-btn:hover {
|
||||
opacity: 1;
|
||||
background: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.sidebar-action-btn.active {
|
||||
opacity: 1;
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.15);
|
||||
color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.sidebar-action-btn.disabled {
|
||||
opacity: 0.3;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
/* Remove old close button styles */
|
||||
.sidebar-toggle-close {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.sidebar-content {
|
||||
flex: 1;
|
||||
overflow: hidden;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.sidebar-tree-container {
|
||||
flex: 1;
|
||||
overflow-y: auto;
|
||||
padding: 8px 0;
|
||||
}
|
||||
|
||||
/* Sidebar Tree Node Styles */
|
||||
.sidebar-tree-node {
|
||||
position: relative;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.sidebar-tree-node-content {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 8px 16px;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
font-size: 0.85em;
|
||||
border-left: 3px solid transparent;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.sidebar-tree-node-content:hover {
|
||||
background: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.sidebar-tree-node-content.selected {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
border-left-color: var(--lora-accent);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.sidebar-tree-expand-icon {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
margin-right: 4px;
|
||||
transition: transform 0.2s ease;
|
||||
opacity: 0.6;
|
||||
}
|
||||
|
||||
.sidebar-tree-expand-icon.expanded {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
|
||||
.sidebar-tree-expand-icon i {
|
||||
font-size: 10px;
|
||||
}
|
||||
|
||||
.sidebar-tree-folder-icon {
|
||||
margin-right: 8px;
|
||||
color: var(--text-muted);
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.sidebar-tree-node-content.selected .sidebar-tree-folder-icon {
|
||||
color: var(--lora-accent);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.sidebar-tree-node-content:hover .sidebar-tree-folder-icon {
|
||||
color: var(--text-color);
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.sidebar-tree-folder-name {
|
||||
flex: 1;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.sidebar-tree-children {
|
||||
overflow: hidden;
|
||||
max-height: 0;
|
||||
transition: max-height 0.3s ease;
|
||||
}
|
||||
|
||||
.sidebar-tree-children.expanded {
|
||||
max-height: 50000px;
|
||||
}
|
||||
|
||||
.sidebar-tree-children .sidebar-tree-node-content {
|
||||
padding-left: 32px;
|
||||
}
|
||||
|
||||
.sidebar-tree-children .sidebar-tree-children .sidebar-tree-node-content {
|
||||
padding-left: 48px;
|
||||
}
|
||||
|
||||
.sidebar-tree-children .sidebar-tree-children .sidebar-tree-children .sidebar-tree-node-content {
|
||||
padding-left: 64px;
|
||||
}
|
||||
|
||||
/* Enhanced Sidebar Breadcrumb Styles */
|
||||
.sidebar-breadcrumb-container {
|
||||
margin-top: 8px;
|
||||
padding: 8px 0;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
background: var(--bg-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-nav {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
flex-wrap: wrap;
|
||||
gap: 4px;
|
||||
font-size: 0.85em;
|
||||
padding: 0 8px;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
padding: 4px 8px;
|
||||
border-radius: var(--border-radius-xs);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
color: var(--text-muted);
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item:hover {
|
||||
background: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item.active {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-separator {
|
||||
color: var(--text-muted);
|
||||
opacity: 0.6;
|
||||
margin: 0 2px;
|
||||
}
|
||||
|
||||
/* New Breadcrumb Dropdown Styles */
|
||||
.breadcrumb-dropdown {
|
||||
position: relative;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown-indicator {
|
||||
margin-left: 6px;
|
||||
color: inherit;
|
||||
opacity: 0.6;
|
||||
transition: all 0.2s ease;
|
||||
pointer-events: none;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item:hover .breadcrumb-dropdown-indicator {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item.placeholder {
|
||||
color: var(--text-muted);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item.placeholder:hover {
|
||||
background: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown.open .breadcrumb-dropdown-indicator {
|
||||
transform: rotate(180deg);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown-menu {
|
||||
position: absolute;
|
||||
top: 100%;
|
||||
left: 0;
|
||||
min-width: 160px;
|
||||
max-width: 240px;
|
||||
background: var(--bg-color);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
box-shadow: 0 3px 8px rgba(0,0,0,0.15);
|
||||
z-index: calc(var(--z-overlay) + 20);
|
||||
overflow-y: auto;
|
||||
max-height: 450px;
|
||||
display: none;
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown.open .breadcrumb-dropdown-menu {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown-item {
|
||||
padding: 6px 12px;
|
||||
cursor: pointer;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown-item:hover {
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.breadcrumb-dropdown-item.active {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
/* Folder List Mode Styles */
|
||||
.sidebar-folder-item {
|
||||
position: relative;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.sidebar-node-content {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 8px 16px;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
font-size: 0.85em;
|
||||
border-left: 3px solid transparent;
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.sidebar-node-content:hover {
|
||||
background: var(--lora-surface);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.sidebar-folder-item.selected .sidebar-node-content {
|
||||
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
|
||||
color: var(--lora-accent);
|
||||
border-left-color: var(--lora-accent);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.sidebar-folder-icon {
|
||||
margin-right: 8px;
|
||||
color: var(--text-muted);
|
||||
opacity: 0.7;
|
||||
width: 16px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.sidebar-folder-item.selected .sidebar-folder-icon {
|
||||
color: var(--lora-accent);
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.sidebar-node-content:hover .sidebar-folder-icon {
|
||||
color: var(--text-color);
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
.sidebar-folder-name {
|
||||
flex: 1;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
/* Responsive Design */
|
||||
@media (min-width: 2150px) {
|
||||
.folder-sidebar {
|
||||
width: 280px;
|
||||
left: 0px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width: 3000px) {
|
||||
.folder-sidebar {
|
||||
width: 320px;
|
||||
left: 0px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 1400px) {
|
||||
.folder-sidebar {
|
||||
width: 260px;
|
||||
left: 0px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Empty State */
|
||||
.sidebar-tree-placeholder {
|
||||
padding: 24px 16px;
|
||||
text-align: center;
|
||||
color: var(--text-muted);
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.sidebar-tree-placeholder i {
|
||||
font-size: 2em;
|
||||
opacity: 0.5;
|
||||
margin-bottom: 8px;
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Smooth transitions for tree nodes */
|
||||
.sidebar-tree-node {
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.sidebar-tree-children {
|
||||
transition: max-height 0.25s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
}
|
||||
|
||||
.sidebar-tree-expand-icon {
|
||||
transition: transform 0.2s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
}
|
||||
|
||||
/* Visual separator for nested levels */
|
||||
.sidebar-tree-children .sidebar-tree-node-content {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.sidebar-tree-children .sidebar-tree-node-content::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: 8px;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
width: 1px;
|
||||
background: var(--border-color);
|
||||
opacity: 0.3;
|
||||
}
|
||||
|
||||
/* Responsive Design */
|
||||
@media (max-width: 1024px) {
|
||||
.folder-sidebar {
|
||||
top: 68px;
|
||||
left: 0px;
|
||||
width: calc(100vw - 32px);
|
||||
max-width: 320px;
|
||||
height: calc(100vh - 88px);
|
||||
z-index: calc(var(--z-overlay) + 10);
|
||||
}
|
||||
|
||||
.folder-sidebar.collapsed {
|
||||
transform: translateX(-100%);
|
||||
}
|
||||
|
||||
/* Mobile overlay */
|
||||
.folder-sidebar:not(.collapsed)::before {
|
||||
content: '';
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
background: rgba(0, 0, 0, 0.3);
|
||||
z-index: -1;
|
||||
backdrop-filter: blur(2px);
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 768px) {
|
||||
.folder-sidebar {
|
||||
width: calc(100vw - 32px);
|
||||
max-width: 280px;
|
||||
left: 0px;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-nav {
|
||||
font-size: 0.8em;
|
||||
}
|
||||
|
||||
.sidebar-breadcrumb-item {
|
||||
padding: 3px 6px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Hide scrollbar but keep functionality */
|
||||
.sidebar-tree-container::-webkit-scrollbar {
|
||||
width: 6px;
|
||||
}
|
||||
|
||||
.sidebar-tree-container::-webkit-scrollbar-track {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.sidebar-tree-container::-webkit-scrollbar-thumb {
|
||||
background: var(--border-color);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.sidebar-tree-container::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--text-muted);
|
||||
}
|
||||
@@ -7,6 +7,7 @@
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: var(--space-2);
|
||||
margin-top: var(--space-2);
|
||||
margin-bottom: var(--space-3);
|
||||
}
|
||||
|
||||
|
||||
@@ -9,14 +9,24 @@
|
||||
|
||||
.container {
|
||||
max-width: 1400px;
|
||||
margin: 20px auto;
|
||||
margin: 0 auto;
|
||||
padding: 0 15px;
|
||||
position: relative;
|
||||
z-index: var(--z-base);
|
||||
}
|
||||
|
||||
/* Sticky controls container */
|
||||
.controls {
|
||||
position: sticky;
|
||||
top: -54px;
|
||||
z-index: calc(var(--z-header) - 1);
|
||||
background: var(--bg-color);
|
||||
padding: var(--space-2) 0;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
|
||||
}
|
||||
|
||||
/* Responsive container for larger screens */
|
||||
@media (min-width: 2000px) {
|
||||
@media (min-width: 2150px) {
|
||||
.container {
|
||||
max-width: 1800px;
|
||||
}
|
||||
@@ -28,13 +38,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
.controls {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
.controls-right {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
@@ -225,63 +228,6 @@
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.folder-tags-container {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
margin-bottom: 8px; /* Add margin to ensure space for the button */
|
||||
}
|
||||
|
||||
.folder-tags {
|
||||
display: flex;
|
||||
gap: 4px;
|
||||
padding: 2px 0;
|
||||
flex-wrap: wrap;
|
||||
transition: max-height 0.3s ease, opacity 0.2s ease;
|
||||
max-height: 150px; /* Limit height to prevent overflow */
|
||||
opacity: 1;
|
||||
overflow-y: auto; /* Enable vertical scrolling */
|
||||
margin-bottom: 5px; /* Add margin below the tags */
|
||||
}
|
||||
|
||||
.folder-tags.collapsed {
|
||||
max-height: 0;
|
||||
opacity: 0;
|
||||
margin: 0;
|
||||
padding-bottom: 0;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.toggle-folders-container {
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
/* Toggle Folders Button */
|
||||
.toggle-folders-btn {
|
||||
width: 36px;
|
||||
height: 36px;
|
||||
border-radius: 50%;
|
||||
background: var(--card-bg);
|
||||
border: 1px solid var(--border-color);
|
||||
color: var(--text-color);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
cursor: pointer;
|
||||
transition: all 0.3s ease;
|
||||
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
|
||||
}
|
||||
|
||||
.toggle-folders-btn:hover {
|
||||
background: var(--lora-accent);
|
||||
color: white;
|
||||
transform: translateY(-2px);
|
||||
box-shadow: 0 3px 6px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
.toggle-folders-btn i {
|
||||
transition: transform 0.3s ease;
|
||||
}
|
||||
|
||||
/* Icon-only button style */
|
||||
.icon-only {
|
||||
min-width: unset !important;
|
||||
@@ -290,55 +236,6 @@
|
||||
height: 32px !important;
|
||||
}
|
||||
|
||||
/* Rotate icon when folders are collapsed */
|
||||
.folder-tags.collapsed ~ .actions .toggle-folders-btn i {
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
|
||||
/* Add custom scrollbar for better visibility */
|
||||
.folder-tags::-webkit-scrollbar {
|
||||
width: 6px;
|
||||
}
|
||||
|
||||
.folder-tags::-webkit-scrollbar-track {
|
||||
background: var(--card-bg);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.folder-tags::-webkit-scrollbar-thumb {
|
||||
background: var(--border-color);
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
.folder-tags::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--lora-accent);
|
||||
}
|
||||
|
||||
.tag {
|
||||
cursor: pointer;
|
||||
padding: 2px 8px;
|
||||
margin: 2px;
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius-xs);
|
||||
display: inline-block;
|
||||
line-height: 1.2;
|
||||
font-size: 14px;
|
||||
background-color: var(--card-bg);
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.tag:hover {
|
||||
border-color: var(--lora-accent);
|
||||
background-color: oklch(var(--lora-accent) / 0.1);
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
|
||||
.tag.active {
|
||||
background-color: var(--lora-accent);
|
||||
color: white;
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
/* Back to Top Button */
|
||||
.back-to-top {
|
||||
position: fixed;
|
||||
@@ -376,10 +273,8 @@
|
||||
}
|
||||
|
||||
/* Prevent text selection in control and header areas */
|
||||
.tag,
|
||||
.control-group button,
|
||||
.control-group select,
|
||||
.toggle-folders-btn,
|
||||
.bulk-operations-panel,
|
||||
.app-header,
|
||||
.header-branding,
|
||||
@@ -387,8 +282,7 @@
|
||||
.main-nav,
|
||||
.nav-item,
|
||||
.header-actions button,
|
||||
.header-controls,
|
||||
.toggle-folders-container button {
|
||||
.header-controls {
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
-ms-user-select: none;
|
||||
@@ -472,18 +366,6 @@
|
||||
justify-content: flex-end;
|
||||
margin-top: 8px;
|
||||
}
|
||||
|
||||
.toggle-folders-container {
|
||||
margin-left: 0;
|
||||
}
|
||||
|
||||
.folder-tags-container {
|
||||
order: -1;
|
||||
}
|
||||
|
||||
.toggle-folders-btn:hover {
|
||||
transform: none; /* Disable hover effects on mobile */
|
||||
}
|
||||
|
||||
.control-group button:hover {
|
||||
transform: none; /* Disable hover effects on mobile */
|
||||
@@ -493,10 +375,6 @@
|
||||
transform: none; /* Disable hover effects on mobile */
|
||||
}
|
||||
|
||||
.tag:hover {
|
||||
transform: none; /* Disable hover effects on mobile */
|
||||
}
|
||||
|
||||
.back-to-top {
|
||||
bottom: 60px; /* Give some extra space from bottom on mobile */
|
||||
}
|
||||
@@ -505,4 +383,9 @@
|
||||
left: auto;
|
||||
right: 0; /* Align to right on mobile */
|
||||
}
|
||||
|
||||
/* Adjust controls padding on mobile */
|
||||
.controls {
|
||||
padding: 10px 0;
|
||||
}
|
||||
}
|
||||
|
||||
252
static/css/onboarding.css
Normal file
252
static/css/onboarding.css
Normal file
@@ -0,0 +1,252 @@
|
||||
/* Onboarding Tutorial Styles */
|
||||
.onboarding-overlay {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background: rgba(0, 0, 0, 0.8);
|
||||
z-index: var(--z-overlay);
|
||||
display: none;
|
||||
/* Use mask to create cutout for highlighted element */
|
||||
mask-composite: subtract;
|
||||
-webkit-mask-composite: subtract;
|
||||
}
|
||||
|
||||
.onboarding-overlay.active {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.onboarding-spotlight {
|
||||
position: absolute;
|
||||
background: transparent;
|
||||
border: 3px solid var(--lora-accent);
|
||||
border-radius: var(--border-radius-base);
|
||||
z-index: calc(var(--z-overlay) + 1);
|
||||
pointer-events: none;
|
||||
transition: all 0.3s ease;
|
||||
/* Add glow effect */
|
||||
box-shadow:
|
||||
0 0 0 2px rgba(24, 144, 255, 0.3),
|
||||
0 0 20px rgba(24, 144, 255, 0.2),
|
||||
inset 0 0 0 1px rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
|
||||
/* Target element highlighting */
|
||||
.onboarding-target-highlight {
|
||||
position: relative;
|
||||
z-index: calc(var(--z-overlay) + 2) !important;
|
||||
pointer-events: auto !important;
|
||||
}
|
||||
|
||||
/* Ensure highlighted elements are interactive */
|
||||
.onboarding-target-highlight * {
|
||||
pointer-events: auto !important;
|
||||
}
|
||||
|
||||
.onboarding-popup {
|
||||
position: absolute;
|
||||
background: var(--lora-surface);
|
||||
border: 1px solid var(--lora-border);
|
||||
border-radius: var(--border-radius-base);
|
||||
padding: var(--space-3);
|
||||
min-width: 320px;
|
||||
max-width: 400px;
|
||||
z-index: calc(var(--z-overlay) + 3);
|
||||
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3);
|
||||
backdrop-filter: blur(10px);
|
||||
}
|
||||
|
||||
.onboarding-popup h3 {
|
||||
margin: 0 0 var(--space-2) 0;
|
||||
color: var(--lora-accent);
|
||||
font-size: 1.2em;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.onboarding-popup p {
|
||||
margin: 0 0 var(--space-3) 0;
|
||||
color: var(--text-color);
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.onboarding-controls {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.onboarding-progress {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: var(--space-1);
|
||||
font-size: 0.85em;
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
.onboarding-actions {
|
||||
display: flex;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.onboarding-btn {
|
||||
padding: var(--space-1) var(--space-2);
|
||||
border: 1px solid var(--lora-border);
|
||||
border-radius: var(--border-radius-sm);
|
||||
background: var(--card-bg);
|
||||
color: var(--text-color);
|
||||
cursor: pointer;
|
||||
font-size: 0.9em;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.onboarding-btn:hover {
|
||||
background: var(--lora-accent);
|
||||
color: var(--lora-text);
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.onboarding-btn.primary {
|
||||
background: var(--lora-accent);
|
||||
color: var(--lora-text);
|
||||
border-color: var(--lora-accent);
|
||||
}
|
||||
|
||||
.onboarding-btn.primary:hover {
|
||||
opacity: 0.9;
|
||||
}
|
||||
|
||||
/* Language Selection Modal */
|
||||
.language-selection-modal {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background: rgba(0, 0, 0, 0.9);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
z-index: calc(var(--z-overlay) + 10);
|
||||
}
|
||||
|
||||
.language-selection-content {
|
||||
background: var(--lora-surface);
|
||||
border: 1px solid var(--lora-border);
|
||||
border-radius: var(--border-radius-base);
|
||||
padding: var(--space-3);
|
||||
min-width: 510px;
|
||||
text-align: center;
|
||||
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.4);
|
||||
backdrop-filter: blur(10px);
|
||||
}
|
||||
|
||||
.language-selection-content h2 {
|
||||
margin: 0 0 var(--space-2) 0;
|
||||
color: var(--lora-accent);
|
||||
font-size: 1.5em;
|
||||
}
|
||||
|
||||
.language-selection-content p {
|
||||
margin: 0 0 var(--space-3) 0;
|
||||
color: var(--text-color);
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.language-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
gap: var(--space-2);
|
||||
margin-bottom: var(--space-3);
|
||||
}
|
||||
|
||||
.language-option {
|
||||
padding: var(--space-2);
|
||||
border: 2px solid var(--lora-border);
|
||||
border-radius: var(--border-radius-sm);
|
||||
background: var(--card-bg);
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
.language-option:hover {
|
||||
border-color: var(--lora-accent);
|
||||
background: var(--lora-surface);
|
||||
}
|
||||
|
||||
.language-option.selected {
|
||||
border-color: var(--lora-accent);
|
||||
background: var(--lora-accent);
|
||||
color: var(--lora-text);
|
||||
}
|
||||
|
||||
.language-flag {
|
||||
font-size: 1.5em;
|
||||
}
|
||||
|
||||
.language-name {
|
||||
font-size: 0.9em;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.language-actions {
|
||||
display: flex;
|
||||
gap: var(--space-2);
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
/* Shortcut Key Highlighting */
|
||||
.onboarding-shortcut {
|
||||
display: inline-block;
|
||||
background: var(--shortcut-bg);
|
||||
border: 1px solid var(--shortcut-border);
|
||||
border-radius: var(--border-radius-xs);
|
||||
padding: 2px 6px;
|
||||
font-size: 0.8em;
|
||||
font-weight: 600;
|
||||
color: var(--shortcut-text);
|
||||
margin: 0 2px;
|
||||
}
|
||||
|
||||
/* Animation for highlighting elements */
|
||||
.onboarding-highlight {
|
||||
animation: onboarding-pulse 2s infinite;
|
||||
}
|
||||
|
||||
@keyframes onboarding-pulse {
|
||||
0%, 100% {
|
||||
box-shadow:
|
||||
0 0 0 2px rgba(24, 144, 255, 0.4),
|
||||
0 0 20px rgba(24, 144, 255, 0.3),
|
||||
inset 0 0 0 1px rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
50% {
|
||||
box-shadow:
|
||||
0 0 0 4px rgba(24, 144, 255, 0.6),
|
||||
0 0 30px rgba(24, 144, 255, 0.4),
|
||||
inset 0 0 0 1px rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
}
|
||||
|
||||
/* Responsive adjustments */
|
||||
@media (max-width: 768px) {
|
||||
.onboarding-popup {
|
||||
min-width: 280px;
|
||||
max-width: calc(100vw - 40px);
|
||||
padding: var(--space-2);
|
||||
}
|
||||
|
||||
.language-grid {
|
||||
grid-template-columns: repeat(2, 1fr);
|
||||
}
|
||||
|
||||
.language-selection-content {
|
||||
min-width: calc(100vw - 40px);
|
||||
max-width: 400px;
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
@import 'components/modal/relink-civitai-modal.css';
|
||||
@import 'components/modal/example-access-modal.css';
|
||||
@import 'components/modal/support-modal.css';
|
||||
@import 'components/download-modal.css';
|
||||
@import 'components/modal/download-modal.css';
|
||||
@import 'components/toast.css';
|
||||
@import 'components/loading.css';
|
||||
@import 'components/menu.css';
|
||||
@@ -34,10 +34,10 @@
|
||||
@import 'components/filter-indicator.css';
|
||||
@import 'components/initialization.css';
|
||||
@import 'components/progress-panel.css';
|
||||
@import 'components/alphabet-bar.css'; /* Add alphabet bar component */
|
||||
@import 'components/duplicates.css'; /* Add duplicates component */
|
||||
@import 'components/keyboard-nav.css'; /* Add keyboard navigation component */
|
||||
@import 'components/statistics.css'; /* Add statistics component */
|
||||
@import 'components/sidebar.css'; /* Add sidebar component */
|
||||
|
||||
.initialization-notice {
|
||||
display: flex;
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 1.9 MiB After Width: | Height: | Size: 2.0 MiB |
@@ -29,7 +29,7 @@ export const MODEL_CONFIG = {
|
||||
defaultPageSize: 100,
|
||||
supportsLetterFilter: false,
|
||||
supportsBulkOperations: true,
|
||||
supportsMove: false,
|
||||
supportsMove: true,
|
||||
templateName: 'checkpoints.html'
|
||||
},
|
||||
[MODEL_TYPES.EMBEDDING]: {
|
||||
@@ -55,7 +55,7 @@ export function getApiEndpoints(modelType) {
|
||||
|
||||
return {
|
||||
// Base CRUD operations
|
||||
list: `/api/${modelType}`,
|
||||
list: `/api/${modelType}/list`,
|
||||
delete: `/api/${modelType}/delete`,
|
||||
exclude: `/api/${modelType}/exclude`,
|
||||
rename: `/api/${modelType}/rename`,
|
||||
@@ -64,6 +64,13 @@ export function getApiEndpoints(modelType) {
|
||||
// Bulk operations
|
||||
bulkDelete: `/api/${modelType}/bulk-delete`,
|
||||
|
||||
// Tag operations
|
||||
addTags: `/api/${modelType}/add-tags`,
|
||||
|
||||
// Move operations (now common for all model types that support move)
|
||||
moveModel: `/api/${modelType}/move_model`,
|
||||
moveBulk: `/api/${modelType}/move_models_bulk`,
|
||||
|
||||
// CivitAI integration
|
||||
fetchCivitai: `/api/${modelType}/fetch-civitai`,
|
||||
fetchAllCivitai: `/api/${modelType}/fetch-all-civitai`,
|
||||
@@ -79,9 +86,17 @@ export function getApiEndpoints(modelType) {
|
||||
baseModels: `/api/${modelType}/base-models`,
|
||||
roots: `/api/${modelType}/roots`,
|
||||
folders: `/api/${modelType}/folders`,
|
||||
folderTree: `/api/${modelType}/folder-tree`,
|
||||
unifiedFolderTree: `/api/${modelType}/unified-folder-tree`,
|
||||
duplicates: `/api/${modelType}/find-duplicates`,
|
||||
conflicts: `/api/${modelType}/find-filename-conflicts`,
|
||||
verify: `/api/${modelType}/verify-duplicates`,
|
||||
metadata: `/api/${modelType}/metadata`,
|
||||
modelDescription: `/api/${modelType}/model-description`,
|
||||
|
||||
// Auto-organize operations
|
||||
autoOrganize: `/api/${modelType}/auto-organize`,
|
||||
autoOrganizeProgress: `/api/${modelType}/auto-organize-progress`,
|
||||
|
||||
// Model-specific endpoints (will be merged with specific configs)
|
||||
specific: {}
|
||||
@@ -98,17 +113,19 @@ export const MODEL_SPECIFIC_ENDPOINTS = {
|
||||
triggerWords: `/api/${MODEL_TYPES.LORA}/get-trigger-words`,
|
||||
previewUrl: `/api/${MODEL_TYPES.LORA}/preview-url`,
|
||||
civitaiUrl: `/api/${MODEL_TYPES.LORA}/civitai-url`,
|
||||
modelDescription: `/api/${MODEL_TYPES.LORA}/model-description`,
|
||||
moveModel: `/api/${MODEL_TYPES.LORA}/move_model`,
|
||||
moveBulk: `/api/${MODEL_TYPES.LORA}/move_models_bulk`,
|
||||
metadata: `/api/${MODEL_TYPES.LORA}/metadata`,
|
||||
getTriggerWordsPost: `/api/${MODEL_TYPES.LORA}/get_trigger_words`,
|
||||
civitaiModelByVersion: `/api/${MODEL_TYPES.LORA}/civitai/model/version`,
|
||||
civitaiModelByHash: `/api/${MODEL_TYPES.LORA}/civitai/model/hash`,
|
||||
},
|
||||
[MODEL_TYPES.CHECKPOINT]: {
|
||||
info: `/api/${MODEL_TYPES.CHECKPOINT}/info`,
|
||||
checkpoints_roots: `/api/${MODEL_TYPES.CHECKPOINT}/checkpoints_roots`,
|
||||
unet_roots: `/api/${MODEL_TYPES.CHECKPOINT}/unet_roots`,
|
||||
metadata: `/api/${MODEL_TYPES.CHECKPOINT}/metadata`,
|
||||
},
|
||||
[MODEL_TYPES.EMBEDDING]: {
|
||||
metadata: `/api/${MODEL_TYPES.EMBEDDING}/metadata`,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -159,7 +176,8 @@ export const DOWNLOAD_ENDPOINTS = {
|
||||
download: '/api/download-model',
|
||||
downloadGet: '/api/download-model-get',
|
||||
cancelGet: '/api/cancel-download-get',
|
||||
progress: '/api/download-progress'
|
||||
progress: '/api/download-progress',
|
||||
exampleImages: '/api/force-download-example-images' // New endpoint for downloading example images
|
||||
};
|
||||
|
||||
// WebSocket endpoints
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
92
static/js/api/checkpointApi.js
Normal file
92
static/js/api/checkpointApi.js
Normal file
@@ -0,0 +1,92 @@
|
||||
import { BaseModelApiClient } from './baseModelApi.js';
|
||||
|
||||
/**
|
||||
* Checkpoint-specific API client
|
||||
*/
|
||||
export class CheckpointApiClient extends BaseModelApiClient {
|
||||
/**
|
||||
* Get checkpoint information
|
||||
*/
|
||||
async getCheckpointInfo(filePath) {
|
||||
try {
|
||||
const response = await fetch(this.apiConfig.endpoints.specific.info, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({ file_path: filePath })
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to fetch checkpoint info');
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.error('Error fetching checkpoint info:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get checkpoint roots
|
||||
*/
|
||||
async getCheckpointsRoots() {
|
||||
try {
|
||||
const response = await fetch(this.apiConfig.endpoints.specific.checkpoints_roots, {
|
||||
method: 'GET'
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to fetch checkpoints roots');
|
||||
}
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.error('Error fetching checkpoints roots:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get unet roots
|
||||
*/
|
||||
async getUnetRoots() {
|
||||
try {
|
||||
const response = await fetch(this.apiConfig.endpoints.specific.unet_roots, {
|
||||
method: 'GET'
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to fetch unet roots');
|
||||
}
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.error('Error fetching unet roots:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get appropriate roots based on model type
|
||||
*/
|
||||
async fetchModelRoots(modelType = 'checkpoint') {
|
||||
try {
|
||||
let response;
|
||||
if (modelType === 'diffusion_model') {
|
||||
response = await fetch(this.apiConfig.endpoints.specific.unet_roots, {
|
||||
method: 'GET'
|
||||
});
|
||||
} else {
|
||||
response = await fetch(this.apiConfig.endpoints.specific.checkpoints_roots, {
|
||||
method: 'GET'
|
||||
});
|
||||
}
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch ${modelType} roots`);
|
||||
}
|
||||
return await response.json();
|
||||
} catch (error) {
|
||||
console.error(`Error fetching ${modelType} roots:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
7
static/js/api/embeddingApi.js
Normal file
7
static/js/api/embeddingApi.js
Normal file
@@ -0,0 +1,7 @@
|
||||
import { BaseModelApiClient } from './baseModelApi.js';
|
||||
|
||||
/**
|
||||
* Embedding-specific API client
|
||||
*/
|
||||
export class EmbeddingApiClient extends BaseModelApiClient {
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user