Compare commits

..

180 Commits

Author SHA1 Message Date
Will Miao
18cdaabf5e Update release notes and version to v0.8.17, adding new features including duplicate model detection, enhanced URL recipe imports, and improved trigger word control. 2025-06-09 19:07:53 +08:00
Will Miao
787e37b7c6 Add CivitAI re-linking functionality and related UI components. Fixes #216
- Implemented new API endpoints for re-linking models to CivitAI.
- Added context menu options for re-linking in both Lora and Checkpoint context menus.
- Created a modal for user confirmation and input for CivitAI model URL.
- Updated styles for the new modal and context menu items.
- Enhanced error handling and user feedback during the re-linking process.
2025-06-09 17:23:03 +08:00
Will Miao
4e5c8b2dd0 Add help modal functionality and update related UI components 2025-06-09 14:55:18 +08:00
Will Miao
d8ddacde38 Remove 'folder' field from model metadata before saving to file. See #211 2025-06-09 11:26:24 +08:00
Will Miao
bb1e42f0d3 Add restart required icon to example images download location label. See #212 2025-06-08 20:43:10 +08:00
pixelpaws
923669c495 Merge pull request #213 from willmiao/migrate-images
Migrate images
2025-06-08 20:11:37 +08:00
Will Miao
7a4139544c Add method to update model metadata from local example images. Fixes #211 2025-06-08 20:10:36 +08:00
Will Miao
4d6ea0236b Add centralized example images setting and update related UI components 2025-06-08 17:38:46 +08:00
Will Miao
e872a06f22 Refactor MiscRoutes and move example images related api to ExampleImagesRoutes 2025-06-08 14:40:30 +08:00
Will Miao
647bda2160 Add API endpoint and frontend integration for fetching example image files 2025-06-07 22:31:57 +08:00
Will Miao
c1e93d23f3 Merge branch 'migrate-images' of https://github.com/willmiao/ComfyUI-Lora-Manager into migrate-images 2025-06-07 11:32:55 +08:00
Will Miao
c96550cc68 Enhance migration and download processes: add backend path update and prevent duplicate completion toasts 2025-06-07 11:29:53 +08:00
Will Miao
b1015ecdc5 Add migration functionality for example images: implement API endpoint and UI controls 2025-06-07 11:27:25 +08:00
Will Miao
f1b928a037 Add migration functionality for example images: implement API endpoint and UI controls 2025-06-07 09:34:07 +08:00
Will Miao
16c312c90b Fix version description not showing. Fixes #210 2025-06-07 01:29:38 +08:00
Will Miao
110ffd0118 Refactor modal close behavior: ensure consistent handling of closeOnOutsideClick option across multiple modals. 2025-06-06 10:32:18 +08:00
Will Miao
35ad872419 Enhance duplicates management: add help tooltip for duplicate groups and improve responsive styling for banners and groups. 2025-06-05 15:06:53 +08:00
Will Miao
9b943cf2b8 Update custom node icon 2025-06-05 06:48:48 +08:00
Will Miao
9d1b357e64 Enhance cache validation logic: add logging for version and model type mismatches, and relax directory structure checks to improve cache validity. 2025-06-04 20:47:14 +08:00
Will Miao
9fc2fb4d17 Enhance model caching and exclusion functionality: update cache version, add excluded models to cache data, and ensure cache is saved to disk after model exclusion and deletion. 2025-06-04 18:38:45 +08:00
Will Miao
641fa8a3d9 Enhance duplicates mode functionality: add toggle for entering/exiting mode, improve exit button styling, and manage control button states during duplicates mode. 2025-06-04 16:46:57 +08:00
Will Miao
add9269706 Enhance duplicate mode exit logic: hide duplicates banner, clear model grid, and re-enable virtual scrolling. Improve spacer element handling in VirtualScroller by recreating it if not found in the DOM. 2025-06-04 16:05:57 +08:00
Will Miao
1a01c4a344 Refactor trigger words UI handling: improve event listener management, restore original words on cancel, and enhance dropdown update logic. See #147 2025-06-04 15:02:13 +08:00
Will Miao
b4e7feed06 Enhance trained words extraction and display: include class tokens in response and update UI accordingly. See #147 2025-06-04 12:04:38 +08:00
Will Miao
4b96c650eb Enhance example image handling: improve filename extraction and fallback for local images 2025-06-04 11:30:56 +08:00
Will Miao
107aef3785 Enhance SaveImage and TriggerWordToggle: add tooltips for parameters to improve user guidance 2025-06-03 19:40:01 +08:00
Will Miao
b49807824f Fix optimizeExampleImages setting in SettingsManager 2025-06-03 18:10:43 +08:00
Will Miao
e5ef2ef8b5 Add default_active parameter to TriggerWordToggle for controlling default state 2025-06-03 17:45:52 +08:00
Will Miao
88779ed56c Enhance Lora Manager widget: add configurable window size for Shift+Click behavior 2025-06-03 16:25:31 +08:00
Will Miao
8b59fb6adc Refactor ShowcaseView and uiHelpers for improved image/video handling
- Moved getLocalExampleImageUrl function to uiHelpers.js for better modularity.
- Updated ShowcaseView.js to utilize the new structure for local and fallback URLs.
- Enhanced lazy loading functions to support both primary and fallback URLs for images and videos.
- Simplified metadata panel generation in ShowcaseView.js.
- Improved showcase toggle functionality and added initialization for lazy loading and metadata handlers.
2025-06-03 16:06:54 +08:00
Will Miao
7945647b0b Refactor core application and recipe manager: remove lazy loading functionality and clean up imports in uiHelpers. 2025-06-03 15:40:51 +08:00
Will Miao
2d39b84806 Add CivitaiApiMetadataParser and improve recipe parsing logic for Civitai images. Also fixes #197
Additional info: Now prioritizes using the Civitai Images API to fetch image and generation metadata. Even NSFW images can now be imported via URL.
2025-06-03 14:58:43 +08:00
Will Miao
e151a19fcf Implement bulk operations for LoRAs: add send to workflow and bulk delete functionality with modal confirmation. 2025-06-03 07:44:52 +08:00
Will Miao
99d2ba26b9 Add API endpoint for fetching trained words and implement dropdown suggestions in the trigger words editor. See #147 2025-06-02 17:04:33 +08:00
Will Miao
396924f4cc Add badge for duplicate count and update logic in ModelDuplicatesManager and PageControls 2025-06-02 09:42:28 +08:00
Will Miao
7545312229 Add bulk delete endpoint for checkpoints and enhance ModelDuplicatesManager for better handling of model types 2025-06-02 08:54:31 +08:00
Will Miao
26f9779fbf Add bulk delete functionality for loras and implement model duplicates management. See #198
- Introduced a new API endpoint for bulk deleting loras.
- Added ModelDuplicatesManager to handle duplicate models for loras and checkpoints.
- Implemented UI components for displaying duplicates and managing selections.
- Enhanced controls with a button for finding duplicates.
- Updated templates to include a duplicates banner and associated actions.
2025-06-02 08:08:45 +08:00
Will Miao
0bd62eef3a Add endpoints for finding duplicate loras and filename conflicts; implement tracking for duplicates in ModelHashIndex and update ModelScanner to handle new data structures. 2025-05-31 20:50:51 +08:00
Will Miao
e06d15f508 Remove LoraHashIndex class and related functionality to streamline codebase. 2025-05-31 20:25:12 +08:00
Will Miao
aa1ee96bc9 Add versioning and history tracking to usage statistics. Implement backup and conversion for old stats format, enhancing data structure for checkpoints and loras. 2025-05-31 16:38:18 +08:00
Will Miao
355c73512d Enhance modal close behavior by tracking mouse events on the background. Implement logic to close modals only if mouseup occurs on the background after mousedown, improving user experience. 2025-05-31 08:53:20 +08:00
Will Miao
0daf9d92ff Update version to 0.8.16 and enhance release notes with new features, improvements, and bug fixes. 2025-05-30 21:04:24 +08:00
Will Miao
37de26ce25 Enhance Lora code update handling for browser and desktop modes. Implement broadcast support for Lora Loader nodes and improve node ID management in the workflow. 2025-05-30 20:12:38 +08:00
Will Miao
0eaef7e7a0 Refactor extension name for consistency in usage statistics tracking 2025-05-30 17:30:29 +08:00
Will Miao
8063cee3cd Add rename functionality for checkpoint and LoRA files with loading indicators 2025-05-30 16:38:18 +08:00
Will Miao
cbb25b4ac0 Enhance model metadata saving functionality with loading indicators and improved validation. Refactor editing logic for better user experience in both checkpoint and LoRA modals. Fixes #200 2025-05-30 16:30:01 +08:00
Will Miao
c62206a157 Add preprocessing for MessagePack serialization to handle large integers. See #201 2025-05-30 10:55:48 +08:00
Will Miao
09832141d0 Add functionality to open example images folder for models 2025-05-30 09:42:36 +08:00
Will Miao
bf8e121a10 Add functionality to copy LoRA syntax and update event handling for copy action 2025-05-30 09:02:17 +08:00
Will Miao
68568073ec Refactor model caching logic to streamline adding models and ensure disk persistence 2025-05-30 07:34:39 +08:00
Will Miao
ec36524c35 Add Civitai image URL optimization and simplify image processing logic 2025-05-29 22:20:16 +08:00
Will Miao
67acd9fd2c Relax cache validation by removing strict modification time checks, allowing users to refresh the cache as needed. 2025-05-29 20:58:06 +08:00
Will Miao
f7be5c8d25 Change log level to info for cache save operation and ensure cache is saved to disk after updating preview URL 2025-05-29 20:09:58 +08:00
Will Miao
ceacac75e0 Increase minimum width of dropdown menu for improved usability 2025-05-29 15:55:14 +08:00
Will Miao
bae66f94e8 Add full rebuild option to model refresh functionality and enhance dropdown controls 2025-05-29 15:51:45 +08:00
Will Miao
ddf132bd78 Add cache management feature: implement clear cache API and modal confirmation 2025-05-29 14:36:13 +08:00
Will Miao
afb012029f Enhance get_cached_data method: improve cache rebuilding logic and ensure cache is saved after initialization 2025-05-29 08:50:17 +08:00
Will Miao
651e14c8c3 Enhance get_cached_data method: add rebuild_cache option for improved cache management 2025-05-29 08:36:18 +08:00
Will Miao
e7c626eb5f Add MessagePack support for efficient cache serialization and update dependencies 2025-05-28 22:30:06 +08:00
pixelpaws
a0b0d40a19 Update README.md 2025-05-27 22:28:26 +08:00
Will Miao
42e3ab9e27 Update tutorial links in README: replace outdated video links with the latest tutorial 2025-05-27 19:24:22 +08:00
Will Miao
6e5f333364 Enhance model file moving logic: support moving associated files and handle metadata paths 2025-05-27 05:41:39 +08:00
Will Miao
f33a9abe60 Limit Lora hash display to first 10 characters and improve WebP metadata handling 2025-05-22 16:29:12 +08:00
Will Miao
7f1bbdd615 Remove debug print statement for primary sampler ID in MetadataProcessor 2025-05-22 16:01:55 +08:00
Will Miao
d3bf8eaceb Add container padding properties to VirtualScroller and adjust card padding 2025-05-22 15:23:32 +08:00
Will Miao
b9c9d602de Enhance download modals: auto-focus on URL input and auto-select version if only one available 2025-05-22 11:07:52 +08:00
Will Miao
b25fbd6e24 Refactor modal styles: remove model name field and adjust margin for modal content header 2025-05-22 10:02:13 +08:00
Will Miao
6052608a4e Update version to 0.8.15-bugfix in pyproject.toml 2025-05-22 04:42:12 +08:00
Will Miao
a073b82751 Enhance WebP image saving: add EXIF data and workflow metadata support. Fixes #193 2025-05-21 19:17:12 +08:00
Will Miao
8250acdfb5 Add creator information display to Lora and Checkpoint modals. #186 2025-05-21 15:31:23 +08:00
Will Miao
8e1f73a34e Refactor display density settings: replace compact mode with display density option and update related UI components 2025-05-20 19:35:41 +08:00
Will Miao
50704bc882 Enhance error handling and input validation in fetch_and_update_model method 2025-05-20 13:57:22 +08:00
Will Miao
35d34e3513 Revert db0b49c427 Refactor load_metadata to use save_metadata for updating metadata files 2025-05-19 21:46:01 +08:00
Will Miao
ea834f3de6 Revert "Enhance metadata processing in ModelScanner: prevent intermediate writes, restore missing civitai data, and ensure base_model consistency. #185"
This reverts commit 99b36442bb.
2025-05-19 21:39:31 +08:00
Will Miao
11aedde72f Fix save_metadata call to await asynchronous execution in load_metadata function. Fixes #192 2025-05-19 15:01:56 +08:00
Will Miao
488654abc8 Improve card layout responsiveness and scrolling behavior 2025-05-18 07:49:39 +08:00
Will Miao
da1be0dc65 Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-05-17 15:40:23 +08:00
Will Miao
d0c728a339 Enhance node tracing logic and improve prompt handling in metadata processing. See #189 2025-05-17 15:40:05 +08:00
pixelpaws
66c66c4d9b Update README.md 2025-05-16 17:08:23 +08:00
Will Miao
4882721387 Update version to 0.8.15 and add release notes for enhanced features and improvements 2025-05-16 16:13:37 +08:00
Will Miao
06a8850c0c Add more wiki images 2025-05-16 15:54:52 +08:00
Will Miao
370aa06c67 Refactor duplicates banner styles for improved layout and responsiveness 2025-05-16 15:47:08 +08:00
Will Miao
c9fa0564e7 Update images 2025-05-16 11:36:37 +08:00
Will Miao
2ba7a0ceba Add keyboard navigation support and related styles for enhanced user experience 2025-05-15 20:17:57 +08:00
Will Miao
276aedfbb9 Set 'from_civitai' flag to True when updating local metadata with CivitAI data 2025-05-15 16:50:32 +08:00
Will Miao
c193c75674 Fix misleading error message for invalid civitai api key or early access deny 2025-05-15 13:46:46 +08:00
Will Miao
a562ba3746 Fix TriggerWord Toggle not updating when all LoRAs are disabled 2025-05-15 10:30:46 +08:00
Will Miao
2fedd572ff Add header drag functionality for proportional strength adjustment of LoRAs 2025-05-15 10:12:46 +08:00
Will Miao
db0b49c427 Refactor load_metadata to use save_metadata for updating metadata files 2025-05-15 09:49:30 +08:00
Will Miao
03a6f8111c Add functionality to copy and send LoRA/Recipe syntax to workflow
- Implemented copy functionality for LoRA and Recipe syntax in context menus.
- Added options to send LoRA and Recipe to workflow in both append and replace modes.
- Updated HTML templates to include new context menu items for sending actions.
2025-05-15 07:01:50 +08:00
Will Miao
925ad7b3e0 Add user-select: none to prevent text selection on cards and control elements 2025-05-15 05:36:56 +08:00
Will Miao
bf793d5b8b Refactor Lora and Recipe card event handling: replace copy functionality with direct send to ComfyUI workflow, update UI elements, and enhance sendLoraToWorkflow to support recipe syntax. 2025-05-14 23:51:00 +08:00
Will Miao
64a906ca5e Add Lora syntax send to comfyui functionality: implement API endpoint and frontend integration for sending and updating LoRA codes in ComfyUI nodes. 2025-05-14 21:09:36 +08:00
Will Miao
99b36442bb Enhance metadata processing in ModelScanner: prevent intermediate writes, restore missing civitai data, and ensure base_model consistency. #185 2025-05-14 19:16:58 +08:00
Will Miao
3c5164d510 Update screenshot 2025-05-13 22:56:51 +08:00
Will Miao
ec4b5a4d45 Update release notes and version to v0.8.14: add virtualized scrolling, compact display mode, and enhanced LoRA node functionality. 2025-05-13 22:50:32 +08:00
Will Miao
78e1901779 Add compact mode settings and styles for improved layout control. Fixes #33 2025-05-13 21:40:37 +08:00
Will Miao
cb539314de Ensure full LoRA node chain is considered when updating TriggerWord Toggle nodes 2025-05-13 20:33:52 +08:00
Will Miao
c7627fe0de Remove no longer needed ref files. 2025-05-13 17:57:59 +08:00
Will Miao
84bfad7ce5 Enhance model deletion handling in UI: integrate virtual scroller updates and remove legacy UI card removal logic. 2025-05-13 17:50:28 +08:00
Will Miao
3e06938b05 Add enableDataWindowing option to VirtualScroller for improved control over data fetching. (Disable data windowing for now) 2025-05-13 17:13:17 +08:00
Will Miao
4f712fec14 Reduce default delay in model processing from 0.2 to 0.1 seconds for improved responsiveness. 2025-05-13 15:30:09 +08:00
Will Miao
c5c9659c76 Update refreshModels to pass folder update flag to resetAndReloadFunction 2025-05-13 15:25:40 +08:00
Will Miao
d6e175c1f1 Add API endpoints for retrieving LoRA notes and trigger words; enhance context menu with copy options. Supports #177 2025-05-13 15:14:25 +08:00
Will Miao
88088e1071 Restructure the code of loras_widget into smaller, more manageable modules. 2025-05-13 14:42:28 +08:00
Will Miao
958ddbca86 Fix workaround for saved value retrieval in Loras widget to address custom nodes issue. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/176 2025-05-13 12:27:18 +08:00
Will Miao
6670fd28f4 Add sync functionality for clipStrength when collapsed in Loras widget. https://github.com/willmiao/ComfyUI-Lora-Manager/issues/176 2025-05-13 11:45:13 +08:00
pixelpaws
1e59c31de3 Merge pull request #184 from willmiao/vscroll
Add virtual scroll
2025-05-12 22:27:40 +08:00
Will Miao
c966dbbbbc Enhance DuplicatesManager and VirtualScroller to manage virtual scrolling state and improve rendering logic 2025-05-12 21:31:03 +08:00
Will Miao
af8f5ba04e Implement client-side placeholder handling for empty recipe grid and remove server-side conditional rendering 2025-05-12 21:20:28 +08:00
Will Miao
b741ed0b3b Refactor recipe and checkpoint management to implement virtual scrolling and improve state handling 2025-05-12 20:07:47 +08:00
Will Miao
01ba3c14f8 Implement virtual scrolling for model loading and checkpoint management 2025-05-12 17:47:57 +08:00
Will Miao
d13b1a83ad checkpoint 2025-05-12 16:44:45 +08:00
Will Miao
303477db70 update 2025-05-12 14:50:10 +08:00
Will Miao
311e89e9e7 checkpoint 2025-05-12 13:59:11 +08:00
Will Miao
8546cfe714 checkpoint 2025-05-12 10:25:58 +08:00
Will Miao
e6f4d84b9a Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-05-11 18:50:53 +08:00
Will Miao
ce7e422169 Revert "refactor: streamline LoraCard event handling and implement virtual scrolling for improved performance"
This reverts commit 5dd8d905fa.
2025-05-11 18:50:19 +08:00
pixelpaws
e5aec80984 Merge pull request #179 from jakerdy/patch-1
[Fix] `/api/chekcpoints/info/{name}` change misspelled method call
2025-05-11 17:10:40 +08:00
Jak Erdy
6d97817390 [Fix] /api/chekcpoints/info/{name} change misspelled method call
If you call:
`http://127.0.0.1:8188/api/checkpoints/info/some_name`
You will get error, that there is no method `get_checkpoint_info_by_name` in `scanner`.
Lookslike it wasn't fixed after refactoring or something. Now it works as expected.
2025-05-10 17:38:10 +07:00
Will Miao
d516f22159 Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-05-10 07:34:06 +08:00
pixelpaws
e918c18ca2 Create FUNDING.yml 2025-05-09 20:17:35 +08:00
Will Miao
5dd8d905fa refactor: streamline LoraCard event handling and implement virtual scrolling for improved performance 2025-05-09 16:33:34 +08:00
Will Miao
1121d1ee6c Revert "update"
This reverts commit 4793f096af.
2025-05-09 16:14:10 +08:00
Will Miao
4793f096af update 2025-05-09 15:42:56 +08:00
Will Miao
7b5b4ce082 refactor: enhance CFGGuider handling and add CFGGuiderExtractor for improved metadata extraction. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/172 2025-05-09 13:50:22 +08:00
Will Miao
fa08c9c3e4 Update version to 0.8.13; enhance recipe management and source tracking features in release notes 2025-05-09 11:38:46 +08:00
pixelpaws
d0d5eb956a Merge pull request #174 from willmiao/dev
Dev
2025-05-09 11:06:47 +08:00
Will Miao
969f949330 refactor(lora-loader, lora-stacker, loras-widget): enhance handling of model and clip strengths; update formatting and UI interactions. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/171 2025-05-09 11:05:59 +08:00
Will Miao
9169bbd04d refactor(widget-serialization): remove dummy items from serialization which was a fix to ComfyUI issues 2025-05-08 20:25:26 +08:00
Will Miao
99463ad01c refactor(import-modal): remove outdated duplicate styles and clean up modal button layout 2025-05-08 20:16:25 +08:00
pixelpaws
f1d6b0feda Merge pull request #173 from willmiao/dev
Dev
2025-05-08 18:33:52 +08:00
Will Miao
e33da50278 refactor: update duplicate recipe management; simplify UI and remove deprecated functions 2025-05-08 18:33:19 +08:00
Will Miao
4034eb3221 feat: implement duplicate recipe detection and management; add UI for marking duplicates for deletion 2025-05-08 17:29:58 +08:00
Will Miao
75a95f0109 refactor: enhance recipe fingerprint calculation and return detailed recipe information; remove unnecessary console logs in import managers 2025-05-08 16:54:49 +08:00
Will Miao
92fdc16fe6 feat(modals): implement duplicate delete confirmation modal and enhance deletion workflow 2025-05-08 16:17:52 +08:00
Will Miao
23fa2995c8 refactor(import): Implement DownloadManager, FolderBrowser, ImageProcessor, and RecipeDataManager for enhanced recipe import functionality
- Added DownloadManager to handle saving recipes and downloading missing LoRAs.
- Introduced FolderBrowser for selecting LoRA root directories and managing folder navigation.
- Created ImageProcessor for handling image uploads and URL inputs for recipe analysis.
- Developed RecipeDataManager to manage recipe details, including metadata and LoRA information.
- Implemented ImportStepManager to control the flow of the import process and manage UI steps.
- Added utility function for formatting file sizes for better user experience.
2025-05-08 15:41:13 +08:00
Will Miao
59aefdff77 feat: implement duplicate detection and management features; add UI components and styles for duplicates 2025-05-08 15:13:14 +08:00
Will Miao
e92ab9e3cc refactor: add endpoints for finding duplicates and bulk deletion of recipes; enhance fingerprint calculation and handling 2025-05-07 19:34:27 +08:00
Will Miao
e3bf1f763c refactor: remove workflow parsing module and associated files for cleanup 2025-05-07 17:13:30 +08:00
Will Miao
1c6e9d0b69 refactor: enhance hash processing in AutomaticMetadataParser for improved key handling 2025-05-07 05:29:16 +08:00
Will Miao
bfd4eb3e11 refactor: update import paths for config in AutomaticMetadataParser and RecipeFormatParser. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/168 2025-05-07 04:39:06 +08:00
Will Miao
c9f902a8af Refactor recipe metadata parser package for ComfyUI-Lora-Manager
- Implemented the base class `RecipeMetadataParser` for parsing recipe metadata from user comments.
- Created a factory class `RecipeParserFactory` to instantiate appropriate parser based on user comment content.
- Developed multiple parser classes: `ComfyMetadataParser`, `AutomaticMetadataParser`, `MetaFormatParser`, and `RecipeFormatParser` to handle different metadata formats.
- Introduced constants for generation parameters and valid LoRA types.
- Enhanced error handling and logging throughout the parsing process.
- Added functionality to populate LoRA and checkpoint information from Civitai API responses.
- Structured the output of parsed metadata to include prompts, LoRAs, generation parameters, and model information.
2025-05-06 21:11:25 +08:00
Will Miao
0b67510ec9 refactor: remove StandardMetadataParser and ImageSaverMetadataParser, integrate AutomaticMetadataParser for improved metadata handling 2025-05-06 17:51:44 +08:00
Will Miao
b5cd320e8b Update 'natsort' to dependencies in pyproject.toml 2025-05-06 08:59:48 +08:00
pixelpaws
deb25b4987 Merge pull request #166 from Rauks/add-natural-sort
fix: use natural sorting when sorting by name
2025-05-06 08:58:19 +08:00
pixelpaws
4612da264a Merge pull request #167 from willmiao/dev
Dev
2025-05-06 08:28:20 +08:00
Karl Woditsch
59b67e1e10 fix: use natural sorting when sorting by name 2025-05-05 22:25:50 +02:00
Will Miao
5fad936b27 feat: implement recipe card update functionality after modal edits 2025-05-05 23:17:58 +08:00
Will Miao
e376a45dea refactor: remove unused source URL tooltip from RecipeModal component 2025-05-05 21:11:52 +08:00
Will Miao
fd593bb61d feat: add source URL functionality to recipe modal, including dynamic display and editing options 2025-05-05 20:50:32 +08:00
Will Miao
71b97d5974 fix: update recipe data structure to include source_path from metadata and improve loading messages 2025-05-05 18:15:59 +08:00
Will Miao
2b405ae164 fix: update load_metadata to set preview_nsfw_level based on civitai data. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/53 2025-05-05 15:46:37 +08:00
Will Miao
2fe4736b69 fix: update ImageSaverMetadataParser to improve metadata matching and parsing logic. https://github.com/willmiao/ComfyUI-Lora-Manager/issues/104 2025-05-05 14:41:56 +08:00
Will Miao
184f8ca6cf feat: add local image analysis functionality and update import modal for URL/local path input. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/140 2025-05-05 11:35:20 +08:00
Will Miao
1ff2019dde fix: update model type checks to include LoCon and lycoris in API routes. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/159 2025-05-05 07:48:08 +08:00
Will Miao
a3d8261686 fix: remove console log and update file extension handling for LoRA syntax. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/158 2025-05-04 08:52:35 +08:00
Will Miao
7d0600976e fix: enhance pointer event handling for progress panel visibility 2025-05-04 08:08:59 +08:00
Will Miao
e1e6e4f3dc feat: update version to 0.8.12 and enhance release notes in README 2025-05-03 17:21:21 +08:00
pixelpaws
fba2853773 Merge pull request #157 from willmiao/dev
Dev
2025-05-03 17:07:48 +08:00
Will Miao
48df7e1078 Refactor code structure for improved readability and maintainability 2025-05-03 17:06:57 +08:00
Will Miao
235dcd5fa6 feat: enhance metadata panel visibility handling in showcase view 2025-05-03 16:41:47 +08:00
Will Miao
2027db7411 feat: refactor model deletion functionality with confirmation modal 2025-05-03 16:31:17 +08:00
Will Miao
611dd33c75 feat: add model exclution functionality frontend 2025-05-03 16:14:09 +08:00
Will Miao
ec1c92a714 feat: add model exclusion functionality with new API endpoints and metadata handling 2025-05-02 22:36:50 +08:00
Will Miao
6ac78156ac feat: comment out "View Details" option in context menus for checkpoints and recipes 2025-05-02 20:59:06 +08:00
pixelpaws
e94b74e92d Merge pull request #156 from willmiao/dev
Dev
2025-05-02 19:35:25 +08:00
Will Miao
2bbec47f63 feat: update WeChat and Alipay QR code to use WebP format for improved performance 2025-05-02 19:34:40 +08:00
pixelpaws
b5ddf4c953 Merge pull request #155 from Rauks/add-base-models
feat: Add "HiDream" and "LTXV" base models
2025-05-02 19:17:18 +08:00
Will Miao
44be75aeef feat: add WeChat and Alipay support section with QR code toggle functionality 2025-05-02 19:15:54 +08:00
Karl Woditsch
2c03759b5d feat: Add "HiDream" and "LTXV" base models 2025-05-02 11:56:10 +02:00
Will Miao
2e3da03723 feat: update metadata panel visibility logic to show on media hover and add rendering calculations 2025-05-02 17:53:15 +08:00
Will Miao
6e96fbcda7 feat: enhance alphabet bar with toggle functionality and visual indicators 2025-05-01 20:50:31 +08:00
Will Miao
d1fd5b7f27 feat: implement alphabet filtering feature with letter counts and UI components v1 2025-05-01 20:07:12 +08:00
Will Miao
9dbcc105e7 feat: add model metadata refresh functionality and enhance download progress tracking. https://github.com/willmiao/ComfyUI-Lora-Manager/issues/151 2025-05-01 18:57:29 +08:00
Will Miao
5cd5a82ddc feat: add creator information to model metadata handling 2025-05-01 15:56:57 +08:00
Will Miao
88c1892dc9 feat: enhance model metadata fetching to include creator information 2025-05-01 15:30:05 +08:00
Will Miao
3c1b181675 fix: enhance version comparison by ignoring suffixes in semantic version strings 2025-05-01 07:47:09 +08:00
Will Miao
6777dc16ca fix: update version to 0.8.11-bugfix in pyproject.toml 2025-05-01 06:19:03 +08:00
Will Miao
3833647dfe refactor: remove unused tkinter imports from misc_routes.py. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/150 2025-05-01 06:06:20 +08:00
166 changed files with 17474 additions and 8138 deletions

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
# These are supported funding model platforms
ko_fi: pixelpawsai
custom: ['paypal.me/pixelpawsai']

1
.gitignore vendored
View File

@@ -3,3 +3,4 @@ settings.json
output/*
py/run_test.py
.vscode/
cache/

118
README.md
View File

@@ -10,16 +10,61 @@ A comprehensive toolset that streamlines organizing, downloading, and applying L
![Interface Preview](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/static/images/screenshot.png)
One-click Integration:
![One-Click Integration](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/static/images/one-click-send.jpg)
## 📺 Tutorial: One-Click LoRA Integration
Watch this quick tutorial to learn how to use the new one-click LoRA integration feature:
[![One-Click LoRA Integration Tutorial](https://img.youtube.com/vi/qS95OjX3e70/0.jpg)](https://youtu.be/qS95OjX3e70)
[![LoRA Manager v0.8.10 - Checkpoint Management, Standalone Mode, and New Features!](https://img.youtube.com/vi/VKvTlCB78h4/0.jpg)](https://youtu.be/VKvTlCB78h4)
[![One-Click LoRA Integration Tutorial](https://img.youtube.com/vi/hvKw31YpE-U/0.jpg)](https://youtu.be/hvKw31YpE-U)
---
## Release Notes
### v0.8.17
* **Duplicate Model Detection** - Added "Find Duplicates" functionality for LoRAs and checkpoints using model file hash detection, enabling convenient viewing and batch deletion of duplicate models
* **Enhanced URL Recipe Imports** - Optimized import recipe via URL functionality using CivitAI API calls instead of web scraping, now supporting all rated images (including NSFW) for recipe imports
* **Improved TriggerWord Control** - Enhanced TriggerWord Toggle node with new default_active switch to set the initial state (active/inactive) when trigger words are added
* **Centralized Example Management** - Added "Migrate Existing Example Images" feature to consolidate downloaded example images from model folders into central storage with customizable naming patterns
* **Intelligent Word Suggestions** - Implemented smart trigger word suggestions by reading class tokens and tag frequency from safetensors files, displaying recommendations when editing trigger words
* **Model Version Management** - Added "Re-link to CivitAI" context menu option for connecting models to different CivitAI versions when needed
### v0.8.16
* **Dramatic Startup Speed Improvement** - Added cache serialization mechanism for significantly faster loading times, especially beneficial for large model collections
* **Enhanced Refresh Options** - Extended functionality with "Full Rebuild (complete)" option alongside "Quick Refresh (incremental)" to fix potential memory cache issues without requiring application restart
* **Customizable Display Density** - Replaced compact mode with adjustable display density settings for personalized layout customization
* **Model Creator Information** - Added creator details to model information panels for better attribution
* **Improved WebP Support** - Enhanced Save Image node with workflow embedding capability for WebP format images
* **Direct Example Access** - Added "Open Example Images Folder" button to card interfaces for convenient browsing of downloaded model examples
* **Enhanced Compatibility** - Full ComfyUI Desktop support for "Send lora or recipe to workflow" functionality
* **Cache Management** - Added settings to clear existing cache files when needed
* **Bug Fixes & Stability** - Various improvements for overall reliability and performance
### v0.8.15
* **Enhanced One-Click Integration** - Replaced copy button with direct send button allowing LoRAs/recipes to be sent directly to your current ComfyUI workflow without needing to paste
* **Flexible Workflow Integration** - Click to append LoRAs/recipes to existing loader nodes or Shift+click to replace content, with additional right-click menu options for "Send to Workflow (Append)" or "Send to Workflow (Replace)"
* **Improved LoRA Loader Controls** - Added header drag functionality for proportional strength adjustment of all LoRAs simultaneously (including CLIP strengths when expanded)
* **Keyboard Navigation Support** - Implemented Page Up/Down for page scrolling, Home key to jump to top, and End key to jump to bottom for faster browsing through large collections
### v0.8.14
* **Virtualized Scrolling** - Completely rebuilt rendering mechanism for smooth browsing with no lag or freezing, now supporting virtually unlimited model collections with optimized layouts for large displays, improving space utilization and user experience
* **Compact Display Mode** - Added space-efficient view option that displays more cards per row (7 on 1080p, 8 on 2K, 10 on 4K)
* **Enhanced LoRA Node Functionality** - Comprehensive improvements to LoRA loader/stacker nodes including real-time trigger word updates (reflecting any change anywhere in the LoRA chain for precise updates) and expanded context menu with "Copy Notes" and "Copy Trigger Words" options for faster workflow
### v0.8.13
* **Enhanced Recipe Management** - Added "Find duplicates" feature to identify and batch delete duplicate recipes with duplicate detection notifications during imports
* **Improved Source Tracking** - Source URLs are now saved with recipes imported via URL, allowing users to view original content with one click or manually edit links
* **Advanced LoRA Control** - Double-click LoRAs in Loader/Stacker nodes to access expanded CLIP strength controls for more precise adjustments of model and CLIP strength separately
* **Lycoris Model Support** - Added compatibility with Lycoris models for expanded creative options
* **Bug Fixes & UX Improvements** - Resolved various issues and enhanced overall user experience with numerous optimizations
### v0.8.12
* **Enhanced Model Discovery** - Added alphabetical navigation bar to LoRAs page for faster browsing through large collections
* **Optimized Example Images** - Improved download logic to automatically refresh stale metadata before fetching example images
* **Model Exclusion System** - New right-click option to exclude specific LoRAs or checkpoints from management
* **Improved Showcase Experience** - Enhanced interaction in LoRA and checkpoint showcase areas for better usability
### v0.8.11
* **Offline Image Support** - Added functionality to download and save all model example images locally, ensuring access even when offline or if images are removed from CivitAI or the site is down
* **Resilient Download System** - Implemented pause/resume capability with checkpoint recovery that persists through restarts or unexpected exits
@@ -31,71 +76,6 @@ Watch this quick tutorial to learn how to use the new one-click LoRA integration
* **Enhanced Metadata Collection** - Added support for SamplerCustomAdvanced node in the metadata collector module
* **Improved UI Organization** - Optimized Lora Loader node height to display up to 5 LoRAs at once with scrolling capability for larger collections
### v0.8.9
* **Favorites System** - New functionality to bookmark your favorite LoRAs and checkpoints for quick access and better organization
* **Enhanced UI Controls** - Increased model card button sizes for improved usability and easier interaction
* **Smoother Page Transitions** - Optimized interface switching between pages, eliminating flash issues particularly noticeable in dark theme
* **Bug Fixes & Stability** - Resolved various issues to enhance overall reliability and performance
### v0.8.8
* **Real-time TriggerWord Updates** - Enhanced TriggerWord Toggle node to instantly update when connected Lora Loader or Lora Stacker nodes change, without requiring workflow execution
* **Optimized Metadata Recovery** - Improved utilization of existing .civitai.info files for faster initialization and preservation of metadata from models deleted from CivitAI
* **Migration Acceleration** - Further speed improvements for users transitioning from A1111/Forge environments
* **Bug Fixes & Stability** - Resolved various issues to enhance overall reliability and performance
### v0.8.7
* **Enhanced Context Menu** - Added comprehensive context menu functionality to Recipes and Checkpoints pages for improved workflow
* **Interactive LoRA Strength Control** - Implemented drag functionality in LoRA Loader for intuitive strength adjustment
* **Metadata Collector Overhaul** - Rebuilt metadata collection system with optimized architecture for better performance
* **Improved Save Image Node** - Enhanced metadata capture and image saving performance with the new metadata collector
* **Streamlined Recipe Saving** - Optimized Save Recipe functionality to work independently without requiring Preview Image nodes
* **Bug Fixes & Stability** - Resolved various issues to enhance overall reliability and performance
### v0.8.6 Major Update
* **Checkpoint Management** - Added comprehensive management for model checkpoints including scanning, searching, filtering, and deletion
* **Enhanced Metadata Support** - New capabilities for retrieving and managing checkpoint metadata with improved operations
* **Improved Initial Loading** - Optimized cache initialization with visual progress indicators for better user experience
### v0.8.5
* **Enhanced LoRA & Recipe Connectivity** - Added Recipes tab in LoRA details to see all recipes using a specific LoRA
* **Improved Navigation** - New shortcuts to jump between related LoRAs and Recipes with one-click navigation
* **Video Preview Controls** - Added "Autoplay Videos on Hover" setting to optimize performance and reduce resource usage
* **UI Experience Refinements** - Smoother transitions between related content pages
### v0.8.4
* **Node Layout Improvements** - Fixed layout issues with LoRA Loader and Trigger Words Toggle nodes in newer ComfyUI frontend versions
* **Recipe LoRA Reconnection** - Added ability to reconnect deleted LoRAs in recipes by clicking the "deleted" badge in recipe details
* **Bug Fixes & Stability** - Resolved various issues for improved reliability
### v0.8.3
* **Enhanced Workflow Parser** - Rebuilt workflow analysis engine with improved support for ComfyUI core nodes and easier extensibility
* **Improved Recipe System** - Refined the experimental Save Recipe functionality with better workflow integration
* **New Save Image Node** - Added experimental node with metadata support for perfect CivitAI compatibility
* Supports dynamic filename prefixes with variables [1](https://github.com/nkchocoai/ComfyUI-SaveImageWithMetaData?tab=readme-ov-file#filename_prefix)
* **Default LoRA Root Setting** - Added configuration option for setting your preferred LoRA directory
### v0.8.2
* **Faster Initialization for Forge Users** - Improved first-run efficiency by utilizing existing `.json` and `.civitai.info` files from Forges CivitAI helper extension, making migration smoother.
* **LoRA Filename Editing** - Added support for renaming LoRA files directly within LoRA Manager.
* **Recipe Editing** - Users can now edit recipe names and tags.
* **Retain Deleted LoRAs in Recipes** - Deleted LoRAs will remain listed in recipes, allowing future functionality to reconnect them once re-obtained.
* **Download Missing LoRAs from Recipes** - Easily fetch missing LoRAs associated with a recipe.
### v0.8.1
* **Base Model Correction** - Added support for modifying base model associations to fix incorrect metadata for non-CivitAI LoRAs
* **LoRA Loader Flexibility** - Made CLIP input optional for model-only workflows like Hunyuan video generation
* **Expanded Recipe Support** - Added compatibility with 3 additional recipe metadata formats
* **Enhanced Showcase Images** - Generation parameters now displayed alongside LoRA preview images
* **UI Improvements & Bug Fixes** - Various interface refinements and stability enhancements
### v0.8.0
* **Introduced LoRA Recipes** - Create, import, save, and share your favorite LoRA combinations
* **Recipe Management System** - Easily browse, search, and organize your LoRA recipes
* **Workflow Integration** - Save recipes directly from your workflow with generation parameters preserved
* **Simplified Workflow Application** - Quickly apply saved recipes to new projects
* **Enhanced UI & UX** - Improved interface design and user experience
* **Bug Fixes & Stability** - Resolved various issues and enhanced overall performance
[View Update History](./update_logs.md)
---
@@ -160,7 +140,7 @@ Watch this quick tutorial to learn how to use the new one-click LoRA integration
### Option 2: **Portable Standalone Edition** (No ComfyUI required)
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.8.10/lora_manager_portable.7z)
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.8.15/lora_manager_portable.7z)
2. Copy the provided `settings.json.example` file to create a new file named `settings.json` in `comfyui-lora-manager` folder
3. Edit `settings.json` to include your correct model folder paths and CivitAI API key
4. Run run.bat
@@ -283,6 +263,8 @@ If you find this project helpful, consider supporting its development:
[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/pixelpawsai)
WeChat: [Click to view QR code](https://raw.githubusercontent.com/willmiao/ComfyUI-Lora-Manager/main/static/images/wechat-qr.webp)
## 💬 Community
Join our Discord community for support, discussions, and updates:

View File

@@ -7,6 +7,7 @@ from .routes.recipe_routes import RecipeRoutes
from .routes.checkpoints_routes import CheckpointsRoutes
from .routes.update_routes import UpdateRoutes
from .routes.misc_routes import MiscRoutes
from .routes.example_images_routes import ExampleImagesRoutes
from .services.service_registry import ServiceRegistry
from .services.settings_manager import settings
import logging
@@ -112,6 +113,7 @@ class LoraManager:
RecipeRoutes.setup_routes(app)
UpdateRoutes.setup_routes(app)
MiscRoutes.setup_routes(app) # Register miscellaneous routes
ExampleImagesRoutes.setup_routes(app) # Register example images routes
# Schedule service initialization
app.on_startup.append(lambda app: cls._initialize_services())

View File

@@ -1,7 +1,5 @@
"""Constants used by the metadata collector"""
# Metadata collection constants
# Metadata categories
MODELS = "models"
PROMPTS = "prompts"
@@ -9,6 +7,7 @@ SAMPLING = "sampling"
LORAS = "loras"
SIZE = "size"
IMAGES = "images"
IS_SAMPLER = "is_sampler" # New constant to mark sampler nodes
# Complete list of categories to track
METADATA_CATEGORIES = [MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES]

View File

@@ -4,33 +4,109 @@ import sys
# Check if running in standalone mode
standalone_mode = 'nodes' not in sys.modules
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IS_SAMPLER
class MetadataProcessor:
"""Process and format collected metadata"""
@staticmethod
def find_primary_sampler(metadata):
"""Find the primary KSampler node (with highest denoise value)"""
def find_primary_sampler(metadata, downstream_id=None):
"""
Find the primary KSampler node that executed before the given downstream node
Parameters:
- metadata: The workflow metadata
- downstream_id: Optional ID of a downstream node to help identify the specific primary sampler
"""
# If we have a downstream_id and execution_order, use it to narrow down potential samplers
if downstream_id and "execution_order" in metadata:
execution_order = metadata["execution_order"]
# Find the index of the downstream node in the execution order
if downstream_id in execution_order:
downstream_index = execution_order.index(downstream_id)
# Extract all sampler nodes that executed before the downstream node
candidate_samplers = {}
for i in range(downstream_index):
node_id = execution_order[i]
# Use IS_SAMPLER flag to identify true sampler nodes
if node_id in metadata.get(SAMPLING, {}) and metadata[SAMPLING][node_id].get(IS_SAMPLER, False):
candidate_samplers[node_id] = metadata[SAMPLING][node_id]
# If we found candidate samplers, apply primary sampler logic to these candidates only
if candidate_samplers:
# Collect potential primary samplers based on different criteria
custom_advanced_samplers = []
advanced_add_noise_samplers = []
high_denoise_samplers = []
max_denoise = -1
high_denoise_id = None
# First, check for SamplerCustomAdvanced among candidates
prompt = metadata.get("current_prompt")
if prompt and prompt.original_prompt:
for node_id in candidate_samplers:
node_info = prompt.original_prompt.get(node_id, {})
if node_info.get("class_type") == "SamplerCustomAdvanced":
custom_advanced_samplers.append(node_id)
# Next, check for KSamplerAdvanced with add_noise="enable" among candidates
for node_id, sampler_info in candidate_samplers.items():
parameters = sampler_info.get("parameters", {})
add_noise = parameters.get("add_noise")
if add_noise == "enable":
advanced_add_noise_samplers.append(node_id)
# Find the sampler with highest denoise value among candidates
for node_id, sampler_info in candidate_samplers.items():
parameters = sampler_info.get("parameters", {})
denoise = parameters.get("denoise")
if denoise is not None and denoise > max_denoise:
max_denoise = denoise
high_denoise_id = node_id
if high_denoise_id:
high_denoise_samplers.append(high_denoise_id)
# Combine all potential primary samplers
potential_samplers = custom_advanced_samplers + advanced_add_noise_samplers + high_denoise_samplers
# Find the most recent potential primary sampler (closest to downstream node)
for i in range(downstream_index - 1, -1, -1):
node_id = execution_order[i]
if node_id in potential_samplers:
return node_id, candidate_samplers[node_id]
# If no potential sampler found from our criteria, return the most recent sampler
if candidate_samplers:
for i in range(downstream_index - 1, -1, -1):
node_id = execution_order[i]
if node_id in candidate_samplers:
return node_id, candidate_samplers[node_id]
# If no downstream_id provided or no suitable sampler found, fall back to original logic
primary_sampler = None
primary_sampler_id = None
max_denoise = -1 # Track the highest denoise value
max_denoise = -1
# First, check for SamplerCustomAdvanced
prompt = metadata.get("current_prompt")
if prompt and prompt.original_prompt:
for node_id, node_info in prompt.original_prompt.items():
if node_info.get("class_type") == "SamplerCustomAdvanced":
# Found a SamplerCustomAdvanced node
if node_id in metadata.get(SAMPLING, {}):
# Check if the node is in SAMPLING and has IS_SAMPLER flag
if node_id in metadata.get(SAMPLING, {}) and metadata[SAMPLING][node_id].get(IS_SAMPLER, False):
return node_id, metadata[SAMPLING][node_id]
# Next, check for KSamplerAdvanced with add_noise="enable"
# Next, check for KSamplerAdvanced with add_noise="enable" using IS_SAMPLER flag
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
# Skip if not marked as a sampler
if not sampler_info.get(IS_SAMPLER, False):
continue
parameters = sampler_info.get("parameters", {})
add_noise = parameters.get("add_noise")
# If add_noise is "enable", this is likely the primary sampler for KSamplerAdvanced
if add_noise == "enable":
primary_sampler = sampler_info
primary_sampler_id = node_id
@@ -39,10 +115,12 @@ class MetadataProcessor:
# If no specialized sampler found, find the sampler with highest denoise value
if primary_sampler is None:
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
# Skip if not marked as a sampler
if not sampler_info.get(IS_SAMPLER, False):
continue
parameters = sampler_info.get("parameters", {})
denoise = parameters.get("denoise")
# If denoise exists and is higher than current max, use this sampler
if denoise is not None and denoise > max_denoise:
max_denoise = denoise
primary_sampler = sampler_info
@@ -74,13 +152,18 @@ class MetadataProcessor:
current_node_id = node_id
current_input = input_name
# If we're just tracing to origin (no target_class), keep track of the last valid node
last_valid_node = None
while current_depth < max_depth:
if current_node_id not in prompt.original_prompt:
return None
return last_valid_node if not target_class else None
node_inputs = prompt.original_prompt[current_node_id].get("inputs", {})
if current_input not in node_inputs:
return None
# We've reached a node without the specified input - this is our origin node
# if we're not looking for a specific target_class
return current_node_id if not target_class else None
input_value = node_inputs[current_input]
# Input connections are formatted as [node_id, output_index]
@@ -91,9 +174,9 @@ class MetadataProcessor:
if target_class and prompt.original_prompt[found_node_id].get("class_type") == target_class:
return found_node_id
# If we're not looking for a specific class or haven't found it yet
# If we're not looking for a specific class, update the last valid node
if not target_class:
return found_node_id
last_valid_node = found_node_id
# Continue tracing through intermediate nodes
current_node_id = found_node_id
@@ -101,16 +184,17 @@ class MetadataProcessor:
if "conditioning" in prompt.original_prompt[current_node_id].get("inputs", {}):
current_input = "conditioning"
else:
# If there's no "conditioning" input, we can't trace further
# If there's no "conditioning" input, return the current node
# if we're not looking for a specific target_class
return found_node_id if not target_class else None
else:
# We've reached a node with no further connections
return None
return last_valid_node if not target_class else None
current_depth += 1
# If we've reached max depth without finding target_class
return None
return last_valid_node if not target_class else None
@staticmethod
def find_primary_checkpoint(metadata):
@@ -126,8 +210,14 @@ class MetadataProcessor:
return None
@staticmethod
def extract_generation_params(metadata):
"""Extract generation parameters from metadata using node relationships"""
def extract_generation_params(metadata, id=None):
"""
Extract generation parameters from metadata using node relationships
Parameters:
- metadata: The workflow metadata
- id: Optional ID of a downstream node to help identify the specific primary sampler
"""
params = {
"prompt": "",
"negative_prompt": "",
@@ -147,13 +237,20 @@ class MetadataProcessor:
prompt = metadata.get("current_prompt")
# Find the primary KSampler node
primary_sampler_id, primary_sampler = MetadataProcessor.find_primary_sampler(metadata)
primary_sampler_id, primary_sampler = MetadataProcessor.find_primary_sampler(metadata, id)
# Directly get checkpoint from metadata instead of tracing
checkpoint = MetadataProcessor.find_primary_checkpoint(metadata)
if checkpoint:
params["checkpoint"] = checkpoint
# Check if guidance parameter exists in any sampling node
for node_id, sampler_info in metadata.get(SAMPLING, {}).items():
parameters = sampler_info.get("parameters", {})
if "guidance" in parameters and parameters["guidance"] is not None:
params["guidance"] = parameters["guidance"]
break
if primary_sampler:
# Extract sampling parameters
sampling_params = primary_sampler.get("parameters", {})
@@ -187,24 +284,34 @@ class MetadataProcessor:
sampler_params = metadata[SAMPLING][sampler_node_id].get("parameters", {})
params["sampler"] = sampler_params.get("sampler_name")
# 3. Trace guider input for FluxGuidance and CLIPTextEncode
# 3. Trace guider input for CFGGuider and CLIPTextEncode
guider_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "guider", max_depth=5)
if guider_node_id:
# Look for FluxGuidance along the guider path
flux_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", "FluxGuidance", max_depth=5)
if flux_node_id and flux_node_id in metadata.get(SAMPLING, {}):
flux_params = metadata[SAMPLING][flux_node_id].get("parameters", {})
params["guidance"] = flux_params.get("guidance")
# Find CLIPTextEncode for positive prompt (through conditioning)
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", "CLIPTextEncode", max_depth=10)
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
if guider_node_id and guider_node_id in prompt.original_prompt:
# Check if the guider node is a CFGGuider
if prompt.original_prompt[guider_node_id].get("class_type") == "CFGGuider":
# Extract cfg value from the CFGGuider
if guider_node_id in metadata.get(SAMPLING, {}):
cfg_params = metadata[SAMPLING][guider_node_id].get("parameters", {})
params["cfg_scale"] = cfg_params.get("cfg")
# Find CLIPTextEncode for positive prompt
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "positive", "CLIPTextEncode", max_depth=10)
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
# Find CLIPTextEncode for negative prompt
negative_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "negative", "CLIPTextEncode", max_depth=10)
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
else:
positive_node_id = MetadataProcessor.trace_node_input(prompt, guider_node_id, "conditioning", max_depth=10)
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
else:
# Original tracing for standard samplers
# Trace positive prompt - look specifically for CLIPTextEncode
positive_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", "CLIPTextEncode", max_depth=10)
positive_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", max_depth=10)
if positive_node_id and positive_node_id in metadata.get(PROMPTS, {}):
params["prompt"] = metadata[PROMPTS][positive_node_id].get("text", "")
else:
@@ -212,21 +319,9 @@ class MetadataProcessor:
positive_flux_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", "CLIPTextEncodeFlux", max_depth=10)
if positive_flux_node_id and positive_flux_node_id in metadata.get(PROMPTS, {}):
params["prompt"] = metadata[PROMPTS][positive_flux_node_id].get("text", "")
# Also extract guidance value if present in the sampling data
if positive_flux_node_id in metadata.get(SAMPLING, {}):
flux_params = metadata[SAMPLING][positive_flux_node_id].get("parameters", {})
if "guidance" in flux_params:
params["guidance"] = flux_params.get("guidance")
# Find any FluxGuidance nodes in the positive conditioning path
flux_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "positive", "FluxGuidance", max_depth=5)
if flux_node_id and flux_node_id in metadata.get(SAMPLING, {}):
flux_params = metadata[SAMPLING][flux_node_id].get("parameters", {})
params["guidance"] = flux_params.get("guidance")
# Trace negative prompt - look specifically for CLIPTextEncode
negative_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "negative", "CLIPTextEncode", max_depth=10)
negative_node_id = MetadataProcessor.trace_node_input(prompt, primary_sampler_id, "negative", max_depth=10)
if negative_node_id and negative_node_id in metadata.get(PROMPTS, {}):
params["negative_prompt"] = metadata[PROMPTS][negative_node_id].get("text", "")
@@ -256,13 +351,19 @@ class MetadataProcessor:
return params
@staticmethod
def to_dict(metadata):
"""Convert extracted metadata to the ComfyUI output.json format"""
def to_dict(metadata, id=None):
"""
Convert extracted metadata to the ComfyUI output.json format
Parameters:
- metadata: The workflow metadata
- id: Optional ID of a downstream node to help identify the specific primary sampler
"""
if standalone_mode:
# Return empty dictionary in standalone mode
return {}
params = MetadataProcessor.extract_generation_params(metadata)
params = MetadataProcessor.extract_generation_params(metadata, id)
# Convert all values to strings to match output.json format
for key in params:
@@ -272,7 +373,7 @@ class MetadataProcessor:
return params
@staticmethod
def to_json(metadata):
def to_json(metadata, id=None):
"""Convert metadata to JSON string"""
params = MetadataProcessor.to_dict(metadata)
params = MetadataProcessor.to_dict(metadata, id)
return json.dumps(params, indent=4)

View File

@@ -1,6 +1,6 @@
import os
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IMAGES, IS_SAMPLER
class NodeMetadataExtractor:
@@ -61,7 +61,8 @@ class SamplerExtractor(NodeMetadataExtractor):
metadata[SAMPLING][node_id] = {
"parameters": sampling_params,
"node_id": node_id
"node_id": node_id,
IS_SAMPLER: True # Add sampler flag
}
# Extract latent image dimensions if available
@@ -98,7 +99,8 @@ class KSamplerAdvancedExtractor(NodeMetadataExtractor):
metadata[SAMPLING][node_id] = {
"parameters": sampling_params,
"node_id": node_id
"node_id": node_id,
IS_SAMPLER: True # Add sampler flag
}
# Extract latent image dimensions if available
@@ -269,7 +271,8 @@ class KSamplerSelectExtractor(NodeMetadataExtractor):
metadata[SAMPLING][node_id] = {
"parameters": sampling_params,
"node_id": node_id
"node_id": node_id,
IS_SAMPLER: False # Mark as non-primary sampler
}
class BasicSchedulerExtractor(NodeMetadataExtractor):
@@ -285,7 +288,8 @@ class BasicSchedulerExtractor(NodeMetadataExtractor):
metadata[SAMPLING][node_id] = {
"parameters": sampling_params,
"node_id": node_id
"node_id": node_id,
IS_SAMPLER: False # Mark as non-primary sampler
}
class SamplerCustomAdvancedExtractor(NodeMetadataExtractor):
@@ -303,7 +307,8 @@ class SamplerCustomAdvancedExtractor(NodeMetadataExtractor):
metadata[SAMPLING][node_id] = {
"parameters": sampling_params,
"node_id": node_id
"node_id": node_id,
IS_SAMPLER: True # Add sampler flag
}
# Extract latent image dimensions if available
@@ -338,11 +343,20 @@ class CLIPTextEncodeFluxExtractor(NodeMetadataExtractor):
clip_l_text = inputs.get("clip_l", "")
t5xxl_text = inputs.get("t5xxl", "")
# Create JSON string with T5 content first, then CLIP-L
combined_text = json.dumps({
"T5": t5xxl_text,
"CLIP-L": clip_l_text
})
# If both are empty, use empty string
if not clip_l_text and not t5xxl_text:
combined_text = ""
# If one is empty, use the non-empty one
elif not clip_l_text:
combined_text = t5xxl_text
elif not t5xxl_text:
combined_text = clip_l_text
# If both have content, use JSON format
else:
combined_text = json.dumps({
"T5": t5xxl_text,
"CLIP-L": clip_l_text
})
metadata[PROMPTS][node_id] = {
"text": combined_text,
@@ -362,6 +376,23 @@ class CLIPTextEncodeFluxExtractor(NodeMetadataExtractor):
metadata[SAMPLING][node_id]["parameters"]["guidance"] = guidance_value
class CFGGuiderExtractor(NodeMetadataExtractor):
@staticmethod
def extract(node_id, inputs, outputs, metadata):
if not inputs or "cfg" not in inputs:
return
cfg_value = inputs.get("cfg")
# Store the cfg value in SAMPLING category
if SAMPLING not in metadata:
metadata[SAMPLING] = {}
if node_id not in metadata[SAMPLING]:
metadata[SAMPLING][node_id] = {"parameters": {}, "node_id": node_id}
metadata[SAMPLING][node_id]["parameters"]["cfg"] = cfg_value
# Registry of node-specific extractors
NODE_EXTRACTORS = {
# Sampling
@@ -374,15 +405,18 @@ NODE_EXTRACTORS = {
# Loaders
"CheckpointLoaderSimple": CheckpointLoaderExtractor,
"UNETLoader": UNETLoaderExtractor, # Updated to use dedicated extractor
"UnetLoaderGGUF": UNETLoaderExtractor, # Updated to use dedicated extractor
"LoraLoader": LoraLoaderExtractor,
"LoraManagerLoader": LoraLoaderManagerExtractor,
# Conditioning
"CLIPTextEncode": CLIPTextEncodeExtractor,
"CLIPTextEncodeFlux": CLIPTextEncodeFluxExtractor, # Add CLIPTextEncodeFlux
"WAS_Text_to_Conditioning": CLIPTextEncodeExtractor,
# Latent
"EmptyLatentImage": ImageSizeExtractor,
# Flux
"FluxGuidance": FluxGuidanceExtractor, # Add FluxGuidance
"CFGGuider": CFGGuiderExtractor, # Add CFGGuider
# Image
"VAEDecode": VAEDecodeExtractor, # Added VAEDecode extractor
# Add other nodes as needed

View File

@@ -14,20 +14,23 @@ class DebugMetadata:
"required": {
"images": ("IMAGE",),
},
"hidden": {
"id": "UNIQUE_ID",
},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("metadata_json",)
FUNCTION = "process_metadata"
def process_metadata(self, images):
def process_metadata(self, images, id):
try:
# Get the current execution context's metadata
from ..metadata_collector import get_metadata
metadata = get_metadata()
# Use the MetadataProcessor to convert it to JSON string
metadata_json = MetadataProcessor.to_json(metadata)
metadata_json = MetadataProcessor.to_json(metadata, id)
return (metadata_json,)
except Exception as e:

View File

@@ -1,10 +1,7 @@
import logging
from nodes import LoraLoader
from comfy.comfy_types import IO # type: ignore
from ..services.lora_scanner import LoraScanner
from ..config import config
import asyncio
import os
from .utils import FlexibleOptionalInputType, any_type, get_lora_info, extract_lora_name, get_loras_list
logger = logging.getLogger(__name__)
@@ -51,7 +48,11 @@ class LoraManagerLoader:
_, trigger_words = asyncio.run(get_lora_info(lora_name))
all_trigger_words.extend(trigger_words)
loaded_loras.append(f"{lora_name}: {model_strength}")
# Add clip strength to output if different from model strength
if abs(model_strength - clip_strength) > 0.001:
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
else:
loaded_loras.append(f"{lora_name}: {model_strength}")
# Then process loras from kwargs with support for both old and new formats
loras_list = get_loras_list(kwargs)
@@ -60,14 +61,21 @@ class LoraManagerLoader:
continue
lora_name = lora['name']
strength = float(lora['strength'])
model_strength = float(lora['strength'])
# Get clip strength - use model strength as default if not specified
clip_strength = float(lora.get('clipStrength', model_strength))
# Get lora path and trigger words
lora_path, trigger_words = asyncio.run(get_lora_info(lora_name))
# Apply the LoRA using the resolved path
model, clip = LoraLoader().load_lora(model, clip, lora_path, strength, strength)
loaded_loras.append(f"{lora_name}: {strength}")
# Apply the LoRA using the resolved path with separate strengths
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
# Include clip strength in output if different from model strength
if abs(model_strength - clip_strength) > 0.001:
loaded_loras.append(f"{lora_name}: {model_strength},{clip_strength}")
else:
loaded_loras.append(f"{lora_name}: {model_strength}")
# Add trigger words to collection
all_trigger_words.extend(trigger_words)
@@ -75,8 +83,23 @@ class LoraManagerLoader:
# use ',, ' to separate trigger words for group mode
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
# Format loaded_loras as <lora:lora_name:strength> separated by spaces
formatted_loras = " ".join([f"<lora:{name.split(':')[0].strip()}:{str(strength).strip()}>"
for name, strength in [item.split(':') for item in loaded_loras]])
# Format loaded_loras with support for both formats
formatted_loras = []
for item in loaded_loras:
parts = item.split(":")
lora_name = parts[0].strip()
strength_parts = parts[1].strip().split(",")
if len(strength_parts) > 1:
# Different model and clip strengths
model_str = strength_parts[0].strip()
clip_str = strength_parts[1].strip()
formatted_loras.append(f"<lora:{lora_name}:{model_str}:{clip_str}>")
else:
# Same strength for both
model_str = strength_parts[0].strip()
formatted_loras.append(f"<lora:{lora_name}:{model_str}>")
formatted_loras_text = " ".join(formatted_loras)
return (model, clip, trigger_words_text, formatted_loras)
return (model, clip, trigger_words_text, formatted_loras_text)

View File

@@ -38,7 +38,7 @@ class LoraStacker:
# Process existing lora_stack if available
lora_stack = kwargs.get('lora_stack', None)
if lora_stack:
if (lora_stack):
stack.extend(lora_stack)
# Get trigger words from existing stack entries
for lora_path, _, _ in lora_stack:
@@ -54,7 +54,8 @@ class LoraStacker:
lora_name = lora['name']
model_strength = float(lora['strength'])
clip_strength = model_strength # Using same strength for both as in the original loader
# Get clip strength - use model strength as default if not specified
clip_strength = float(lora.get('clipStrength', model_strength))
# Get lora path and trigger words
lora_path, trigger_words = asyncio.run(get_lora_info(lora_name))
@@ -62,15 +63,24 @@ class LoraStacker:
# Add to stack without loading
# replace '/' with os.sep to avoid different OS path format
stack.append((lora_path.replace('/', os.sep), model_strength, clip_strength))
active_loras.append((lora_name, model_strength))
active_loras.append((lora_name, model_strength, clip_strength))
# Add trigger words to collection
all_trigger_words.extend(trigger_words)
# use ',, ' to separate trigger words for group mode
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
# Format active_loras as <lora:lora_name:strength> separated by spaces
active_loras_text = " ".join([f"<lora:{name}:{str(strength).strip()}>"
for name, strength in active_loras])
# Format active_loras with support for both formats
formatted_loras = []
for name, model_strength, clip_strength in active_loras:
if abs(model_strength - clip_strength) > 0.001:
# Different model and clip strengths
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}:{str(clip_strength).strip()}>")
else:
# Same strength for both
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}>")
active_loras_text = " ".join(formatted_loras)
return (stack, trigger_words_text, active_loras_text)

View File

@@ -31,16 +31,36 @@ class SaveImage:
return {
"required": {
"images": ("IMAGE",),
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
"file_format": (["png", "jpeg", "webp"],),
"filename_prefix": ("STRING", {
"default": "ComfyUI",
"tooltip": "Base filename for saved images. Supports format patterns like %seed%, %width%, %height%, %model%, etc."
}),
"file_format": (["png", "jpeg", "webp"], {
"tooltip": "Image format to save as. PNG preserves quality, JPEG is smaller, WebP balances size and quality."
}),
},
"optional": {
"lossless_webp": ("BOOLEAN", {"default": False}),
"quality": ("INT", {"default": 100, "min": 1, "max": 100}),
"embed_workflow": ("BOOLEAN", {"default": False}),
"add_counter_to_filename": ("BOOLEAN", {"default": True}),
"lossless_webp": ("BOOLEAN", {
"default": False,
"tooltip": "When enabled, saves WebP images with lossless compression. Results in larger files but no quality loss."
}),
"quality": ("INT", {
"default": 100,
"min": 1,
"max": 100,
"tooltip": "Compression quality for JPEG and lossy WebP formats (1-100). Higher values mean better quality but larger files."
}),
"embed_workflow": ("BOOLEAN", {
"default": False,
"tooltip": "Embeds the complete workflow data into the image metadata. Only works with PNG and WebP formats."
}),
"add_counter_to_filename": ("BOOLEAN", {
"default": True,
"tooltip": "Adds an incremental counter to filenames to prevent overwriting previous images."
}),
},
"hidden": {
"id": "UNIQUE_ID",
"prompt": "PROMPT",
"extra_pnginfo": "EXTRA_PNGINFO",
},
@@ -223,7 +243,7 @@ class SaveImage:
if lora_hashes:
lora_hash_parts = []
for lora_name, hash_value in lora_hashes.items():
lora_hash_parts.append(f"{lora_name}: {hash_value}")
lora_hash_parts.append(f"{lora_name}: {hash_value[:10]}")
if lora_hash_parts:
params.append(f"Lora hashes: \"{', '.join(lora_hash_parts)}\"")
@@ -300,14 +320,14 @@ class SaveImage:
return filename
def save_images(self, images, filename_prefix, file_format, prompt=None, extra_pnginfo=None,
def save_images(self, images, filename_prefix, file_format, id, prompt=None, extra_pnginfo=None,
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True):
"""Save images with metadata"""
results = []
# Get metadata using the metadata collector
raw_metadata = get_metadata()
metadata_dict = MetadataProcessor.to_dict(raw_metadata)
metadata_dict = MetadataProcessor.to_dict(raw_metadata, id)
# Get or create metadata asynchronously
metadata = asyncio.run(self.format_metadata(metadata_dict))
@@ -378,14 +398,23 @@ class SaveImage:
print(f"Error adding EXIF data: {e}")
img.save(file_path, format="JPEG", **save_kwargs)
elif file_format == "webp":
# For WebP, also use piexif for metadata
if metadata:
try:
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
exif_bytes = piexif.dump(exif_dict)
save_kwargs["exif"] = exif_bytes
except Exception as e:
print(f"Error adding EXIF data: {e}")
try:
# For WebP, use piexif for metadata
exif_dict = {}
if metadata:
exif_dict['Exif'] = {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}
# Add workflow if needed
if embed_workflow and extra_pnginfo is not None:
workflow_json = json.dumps(extra_pnginfo["workflow"])
exif_dict['0th'] = {piexif.ImageIFD.ImageDescription: "Workflow:" + workflow_json}
exif_bytes = piexif.dump(exif_dict)
save_kwargs["exif"] = exif_bytes
except Exception as e:
print(f"Error adding EXIF data: {e}")
img.save(file_path, format="WEBP", **save_kwargs)
results.append({
@@ -399,7 +428,7 @@ class SaveImage:
return results
def process_image(self, images, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
def process_image(self, images, id, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True):
"""Process and save image with metadata"""
# Make sure the output directory exists
@@ -416,6 +445,7 @@ class SaveImage:
images,
filename_prefix,
file_format,
id,
prompt,
extra_pnginfo,
lossless_webp,

View File

@@ -16,11 +16,18 @@ class TriggerWordToggle:
def INPUT_TYPES(cls):
return {
"required": {
"group_mode": ("BOOLEAN", {"default": True}),
"group_mode": ("BOOLEAN", {
"default": True,
"tooltip": "When enabled, treats each group of trigger words as a single toggleable unit."
}),
"default_active": ("BOOLEAN", {
"default": True,
"tooltip": "Sets the default initial state (active or inactive) when trigger words are added."
}),
},
"optional": FlexibleOptionalInputType(any_type),
"hidden": {
"id": "UNIQUE_ID", # 会被 ComfyUI 自动替换为唯一ID
"id": "UNIQUE_ID",
},
}
@@ -41,7 +48,7 @@ class TriggerWordToggle:
else:
return data
def process_trigger_words(self, id, group_mode, **kwargs):
def process_trigger_words(self, id, group_mode, default_active, **kwargs):
# Handle both old and new formats for trigger_words
trigger_words_data = self._get_toggle_data(kwargs, 'trigger_words')
trigger_words = trigger_words_data if isinstance(trigger_words_data, str) else ""

24
py/recipes/__init__.py Normal file
View File

@@ -0,0 +1,24 @@
"""Recipe metadata parser package for ComfyUI-Lora-Manager."""
from .base import RecipeMetadataParser
from .factory import RecipeParserFactory
from .constants import GEN_PARAM_KEYS, VALID_LORA_TYPES
from .parsers import (
RecipeFormatParser,
ComfyMetadataParser,
MetaFormatParser,
AutomaticMetadataParser,
CivitaiApiMetadataParser
)
__all__ = [
'RecipeMetadataParser',
'RecipeParserFactory',
'GEN_PARAM_KEYS',
'VALID_LORA_TYPES',
'RecipeFormatParser',
'ComfyMetadataParser',
'MetaFormatParser',
'AutomaticMetadataParser',
'CivitaiApiMetadataParser'
]

181
py/recipes/base.py Normal file
View File

@@ -0,0 +1,181 @@
"""Base classes for recipe parsers."""
import json
import logging
import os
import re
from typing import Dict, List, Any, Optional, Tuple
from abc import ABC, abstractmethod
from ..config import config
from .constants import VALID_LORA_TYPES
logger = logging.getLogger(__name__)
class RecipeMetadataParser(ABC):
"""Interface for parsing recipe metadata from image user comments"""
METADATA_MARKER = None
@abstractmethod
def is_metadata_matching(self, user_comment: str) -> bool:
"""Check if the user comment matches the metadata format"""
pass
@abstractmethod
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""
Parse metadata from user comment and return structured recipe data
Args:
user_comment: The EXIF UserComment string from the image
recipe_scanner: Optional recipe scanner instance for local LoRA lookup
civitai_client: Optional Civitai client for fetching model information
Returns:
Dict containing parsed recipe data with standardized format
"""
pass
async def populate_lora_from_civitai(self, lora_entry: Dict[str, Any], civitai_info_tuple: Tuple[Dict[str, Any], Optional[str]],
recipe_scanner=None, base_model_counts=None, hash_value=None) -> Optional[Dict[str, Any]]:
"""
Populate a lora entry with information from Civitai API response
Args:
lora_entry: The lora entry to populate
civitai_info_tuple: The response tuple from Civitai API (data, error_msg)
recipe_scanner: Optional recipe scanner for local file lookup
base_model_counts: Optional dict to track base model counts
hash_value: Optional hash value to use if not available in civitai_info
Returns:
The populated lora_entry dict if type is valid, None otherwise
"""
try:
# Unpack the tuple to get the actual data
civitai_info, error_msg = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
if not civitai_info or civitai_info.get("error") == "Model not found":
# Model not found or deleted
lora_entry['isDeleted'] = True
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'
return lora_entry
# Get model type and validate
model_type = civitai_info.get('model', {}).get('type', '').lower()
lora_entry['type'] = model_type
if model_type not in VALID_LORA_TYPES:
logger.debug(f"Skipping non-LoRA model type: {model_type}")
return None
# Check if this is an early access lora
if civitai_info.get('earlyAccessEndsAt'):
# Convert earlyAccessEndsAt to a human-readable date
early_access_date = civitai_info.get('earlyAccessEndsAt', '')
lora_entry['isEarlyAccess'] = True
lora_entry['earlyAccessEndsAt'] = early_access_date
# Update model name if available
if 'model' in civitai_info and 'name' in civitai_info['model']:
lora_entry['name'] = civitai_info['model']['name']
# Update version if available
if 'name' in civitai_info:
lora_entry['version'] = civitai_info.get('name', '')
# Get thumbnail URL from first image
if 'images' in civitai_info and civitai_info['images']:
lora_entry['thumbnailUrl'] = civitai_info['images'][0].get('url', '')
# Get base model
current_base_model = civitai_info.get('baseModel', '')
lora_entry['baseModel'] = current_base_model
# Update base model counts if tracking them
if base_model_counts is not None and current_base_model:
base_model_counts[current_base_model] = base_model_counts.get(current_base_model, 0) + 1
# Get download URL
lora_entry['downloadUrl'] = civitai_info.get('downloadUrl', '')
# Process file information if available
if 'files' in civitai_info:
# Find the primary model file (type="Model" and primary=true) in the files list
model_file = next((file for file in civitai_info.get('files', [])
if file.get('type') == 'Model' and file.get('primary') == True), None)
if model_file:
# Get size
lora_entry['size'] = model_file.get('sizeKB', 0) * 1024
# Get SHA256 hash
sha256 = model_file.get('hashes', {}).get('SHA256', hash_value)
if sha256:
lora_entry['hash'] = sha256.lower()
# Check if exists locally
if recipe_scanner and lora_entry['hash']:
lora_scanner = recipe_scanner._lora_scanner
exists_locally = lora_scanner.has_lora_hash(lora_entry['hash'])
if exists_locally:
try:
local_path = lora_scanner.get_lora_path_by_hash(lora_entry['hash'])
lora_entry['existsLocally'] = True
lora_entry['localPath'] = local_path
lora_entry['file_name'] = os.path.splitext(os.path.basename(local_path))[0]
# Get thumbnail from local preview if available
lora_cache = await lora_scanner.get_cached_data()
lora_item = next((item for item in lora_cache.raw_data
if item['sha256'].lower() == lora_entry['hash'].lower()), None)
if lora_item and 'preview_url' in lora_item:
lora_entry['thumbnailUrl'] = config.get_preview_static_url(lora_item['preview_url'])
except Exception as e:
logger.error(f"Error getting local lora path: {e}")
else:
# For missing LoRAs, get file_name from model_file.name
file_name = model_file.get('name', '')
lora_entry['file_name'] = os.path.splitext(file_name)[0] if file_name else ''
except Exception as e:
logger.error(f"Error populating lora from Civitai info: {e}")
return lora_entry
async def populate_checkpoint_from_civitai(self, checkpoint: Dict[str, Any], civitai_info: Dict[str, Any]) -> Dict[str, Any]:
"""
Populate checkpoint information from Civitai API response
Args:
checkpoint: The checkpoint entry to populate
civitai_info: The response from Civitai API
Returns:
The populated checkpoint dict
"""
try:
if civitai_info and civitai_info.get("error") != "Model not found":
# Update model name if available
if 'model' in civitai_info and 'name' in civitai_info['model']:
checkpoint['name'] = civitai_info['model']['name']
# Update version if available
if 'name' in civitai_info:
checkpoint['version'] = civitai_info.get('name', '')
# Get thumbnail URL from first image
if 'images' in civitai_info and civitai_info['images']:
checkpoint['thumbnailUrl'] = civitai_info['images'][0].get('url', '')
# Get base model
checkpoint['baseModel'] = civitai_info.get('baseModel', '')
# Get download URL
checkpoint['downloadUrl'] = civitai_info.get('downloadUrl', '')
else:
# Model not found or deleted
checkpoint['isDeleted'] = True
except Exception as e:
logger.error(f"Error populating checkpoint from Civitai info: {e}")
return checkpoint

16
py/recipes/constants.py Normal file
View File

@@ -0,0 +1,16 @@
"""Constants used across recipe parsers."""
# Constants for generation parameters
GEN_PARAM_KEYS = [
'prompt',
'negative_prompt',
'steps',
'sampler',
'cfg_scale',
'seed',
'size',
'clip_skip',
]
# Valid Lora types
VALID_LORA_TYPES = ['lora', 'locon']

64
py/recipes/factory.py Normal file
View File

@@ -0,0 +1,64 @@
"""Factory for creating recipe metadata parsers."""
import logging
from .parsers import (
RecipeFormatParser,
ComfyMetadataParser,
MetaFormatParser,
AutomaticMetadataParser,
CivitaiApiMetadataParser
)
from .base import RecipeMetadataParser
logger = logging.getLogger(__name__)
class RecipeParserFactory:
"""Factory for creating recipe metadata parsers"""
@staticmethod
def create_parser(metadata) -> RecipeMetadataParser:
"""
Create appropriate parser based on the metadata content
Args:
metadata: The metadata from the image (dict or str)
Returns:
Appropriate RecipeMetadataParser implementation
"""
# First, try CivitaiApiMetadataParser for dict input
if isinstance(metadata, dict):
try:
if CivitaiApiMetadataParser().is_metadata_matching(metadata):
return CivitaiApiMetadataParser()
except Exception as e:
logger.debug(f"CivitaiApiMetadataParser check failed: {e}")
pass
# Convert dict to string for other parsers that expect string input
try:
import json
metadata_str = json.dumps(metadata)
except Exception as e:
logger.debug(f"Failed to convert dict to JSON string: {e}")
return None
else:
metadata_str = metadata
# Try ComfyMetadataParser which requires valid JSON
try:
if ComfyMetadataParser().is_metadata_matching(metadata_str):
return ComfyMetadataParser()
except Exception:
# If JSON parsing fails, move on to other parsers
pass
# Check other parsers that expect string input
if RecipeFormatParser().is_metadata_matching(metadata_str):
return RecipeFormatParser()
elif AutomaticMetadataParser().is_metadata_matching(metadata_str):
return AutomaticMetadataParser()
elif MetaFormatParser().is_metadata_matching(metadata_str):
return MetaFormatParser()
else:
return None

View File

@@ -0,0 +1,15 @@
"""Recipe parsers package."""
from .recipe_format import RecipeFormatParser
from .comfy import ComfyMetadataParser
from .meta_format import MetaFormatParser
from .automatic import AutomaticMetadataParser
from .civitai_image import CivitaiApiMetadataParser
__all__ = [
'RecipeFormatParser',
'ComfyMetadataParser',
'MetaFormatParser',
'AutomaticMetadataParser',
'CivitaiApiMetadataParser',
]

View File

@@ -0,0 +1,304 @@
"""Parser for Automatic1111 metadata format."""
import re
import json
import logging
from typing import Dict, Any
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
logger = logging.getLogger(__name__)
class AutomaticMetadataParser(RecipeMetadataParser):
"""Parser for Automatic1111 metadata format"""
METADATA_MARKER = r"Steps: \d+"
# Regular expressions for extracting specific metadata
HASHES_REGEX = r', Hashes:\s*({[^}]+})'
LORA_HASHES_REGEX = r', Lora hashes:\s*"([^"]+)"'
CIVITAI_RESOURCES_REGEX = r', Civitai resources:\s*(\[\{.*?\}\])'
CIVITAI_METADATA_REGEX = r', Civitai metadata:\s*(\{.*?\})'
EXTRANETS_REGEX = r'<(lora|hypernet):([a-zA-Z0-9_\.\-]+):([0-9.]+)>'
MODEL_HASH_PATTERN = r'Model hash: ([a-zA-Z0-9]+)'
VAE_HASH_PATTERN = r'VAE hash: ([a-zA-Z0-9]+)'
def is_metadata_matching(self, user_comment: str) -> bool:
"""Check if the user comment matches the Automatic1111 format"""
return re.search(self.METADATA_MARKER, user_comment) is not None
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from Automatic1111 format"""
try:
# Split on Negative prompt if it exists
if "Negative prompt:" in user_comment:
parts = user_comment.split('Negative prompt:', 1)
prompt = parts[0].strip()
negative_and_params = parts[1] if len(parts) > 1 else ""
else:
# No negative prompt section
param_start = re.search(self.METADATA_MARKER, user_comment)
if param_start:
prompt = user_comment[:param_start.start()].strip()
negative_and_params = user_comment[param_start.start():]
else:
prompt = user_comment.strip()
negative_and_params = ""
# Initialize metadata
metadata = {
"prompt": prompt,
"loras": []
}
# Extract negative prompt and parameters
if negative_and_params:
# If we split on "Negative prompt:", check for params section
if "Negative prompt:" in user_comment:
param_start = re.search(r'Steps: ', negative_and_params)
if param_start:
neg_prompt = negative_and_params[:param_start.start()].strip()
metadata["negative_prompt"] = neg_prompt
params_section = negative_and_params[param_start.start():]
else:
metadata["negative_prompt"] = negative_and_params.strip()
params_section = ""
else:
# No negative prompt, entire section is params
params_section = negative_and_params
# Extract generation parameters
if params_section:
# Extract Civitai resources
civitai_resources_match = re.search(self.CIVITAI_RESOURCES_REGEX, params_section)
if civitai_resources_match:
try:
civitai_resources = json.loads(civitai_resources_match.group(1))
metadata["civitai_resources"] = civitai_resources
params_section = params_section.replace(civitai_resources_match.group(0), '')
except json.JSONDecodeError:
logger.error("Error parsing Civitai resources JSON")
# Extract Hashes
hashes_match = re.search(self.HASHES_REGEX, params_section)
if hashes_match:
try:
hashes = json.loads(hashes_match.group(1))
# Process hash keys
processed_hashes = {}
for key, value in hashes.items():
# Convert Model: or LORA: prefix to lowercase if present
if ':' in key:
prefix, name = key.split(':', 1)
prefix = prefix.lower()
else:
prefix = ''
name = key
# Clean up the name part
if '/' in name:
name = name.split('/')[-1] # Get last part after /
if '.safetensors' in name:
name = name.split('.safetensors')[0] # Remove .safetensors
# Reconstruct the key
new_key = f"{prefix}:{name}" if prefix else name
processed_hashes[new_key] = value
metadata["hashes"] = processed_hashes
# Remove hashes from params section to not interfere with other parsing
params_section = params_section.replace(hashes_match.group(0), '')
except json.JSONDecodeError:
logger.error("Error parsing hashes JSON")
# Extract Lora hashes in alternative format
lora_hashes_match = re.search(self.LORA_HASHES_REGEX, params_section)
if not hashes_match and lora_hashes_match:
try:
lora_hashes_str = lora_hashes_match.group(1)
lora_hash_entries = lora_hashes_str.split(', ')
# Initialize hashes dict if it doesn't exist
if "hashes" not in metadata:
metadata["hashes"] = {}
# Parse each lora hash entry (format: "name: hash")
for entry in lora_hash_entries:
if ': ' in entry:
lora_name, lora_hash = entry.split(': ', 1)
# Add as lora type in the same format as regular hashes
metadata["hashes"][f"lora:{lora_name}"] = lora_hash.strip()
# Remove lora hashes from params section
params_section = params_section.replace(lora_hashes_match.group(0), '')
except Exception as e:
logger.error(f"Error parsing Lora hashes: {e}")
# Extract basic parameters
param_pattern = r'([A-Za-z\s]+): ([^,]+)'
params = re.findall(param_pattern, params_section)
gen_params = {}
for key, value in params:
clean_key = key.strip().lower().replace(' ', '_')
# Skip if not in recognized gen param keys
if clean_key not in GEN_PARAM_KEYS:
continue
# Convert numeric values
if clean_key in ['steps', 'seed']:
try:
gen_params[clean_key] = int(value.strip())
except ValueError:
gen_params[clean_key] = value.strip()
elif clean_key in ['cfg_scale']:
try:
gen_params[clean_key] = float(value.strip())
except ValueError:
gen_params[clean_key] = value.strip()
else:
gen_params[clean_key] = value.strip()
# Extract size if available and add to gen_params if a recognized key
size_match = re.search(r'Size: (\d+)x(\d+)', params_section)
if size_match and 'size' in GEN_PARAM_KEYS:
width, height = size_match.groups()
gen_params['size'] = f"{width}x{height}"
# Add prompt and negative_prompt to gen_params if they're in GEN_PARAM_KEYS
if 'prompt' in GEN_PARAM_KEYS and 'prompt' in metadata:
gen_params['prompt'] = metadata['prompt']
if 'negative_prompt' in GEN_PARAM_KEYS and 'negative_prompt' in metadata:
gen_params['negative_prompt'] = metadata['negative_prompt']
metadata["gen_params"] = gen_params
# Extract LoRA information
loras = []
base_model_counts = {}
# First use Civitai resources if available (more reliable source)
if metadata.get("civitai_resources"):
for resource in metadata.get("civitai_resources", []):
if resource.get("type") in ["lora", "lycoris", "hypernet"] and resource.get("modelVersionId"):
# Initialize lora entry
lora_entry = {
'id': str(resource.get("modelVersionId")),
'modelId': str(resource.get("modelId")) if resource.get("modelId") else None,
'name': resource.get("modelName", "Unknown LoRA"),
'version': resource.get("modelVersionName", ""),
'type': resource.get("type", "lora"),
'weight': round(float(resource.get("weight", 1.0)), 2),
'existsLocally': False,
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# Get additional info from Civitai
if civitai_client:
try:
civitai_info = await civitai_client.get_model_version_info(resource.get("modelVersionId"))
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA {lora_entry['name']}: {e}")
loras.append(lora_entry)
# If no LoRAs from Civitai resources or to supplement, extract from metadata["hashes"]
if not loras or len(loras) == 0:
# Extract lora weights from extranet tags in prompt (for later use)
lora_weights = {}
lora_matches = re.findall(self.EXTRANETS_REGEX, prompt)
for lora_type, lora_name, lora_weight in lora_matches:
key = f"{lora_type}:{lora_name}"
lora_weights[key] = round(float(lora_weight), 2)
# Use hashes from metadata as the primary source
if metadata.get("hashes"):
for hash_key, lora_hash in metadata.get("hashes", {}).items():
# Only process lora or hypernet types
if not hash_key.startswith(("lora:", "hypernet:")):
continue
lora_type, lora_name = hash_key.split(':', 1)
# Get weight from extranet tags if available, else default to 1.0
weight = lora_weights.get(hash_key, 1.0)
# Initialize lora entry
lora_entry = {
'name': lora_name,
'type': lora_type, # 'lora' or 'hypernet'
'weight': weight,
'hash': lora_hash,
'existsLocally': False,
'localPath': None,
'file_name': lora_name,
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# Try to get info from Civitai
if civitai_client:
try:
if lora_hash:
# If we have hash, use it for lookup
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
else:
civitai_info = None
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts,
lora_hash
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA {lora_name}: {e}")
loras.append(lora_entry)
# Try to get base model from resources or make educated guess
base_model = None
if base_model_counts:
# Use the most common base model from the loras
base_model = max(base_model_counts.items(), key=lambda x: x[1])[0]
# Prepare final result structure
# Make sure gen_params only contains recognized keys
filtered_gen_params = {}
for key in GEN_PARAM_KEYS:
if key in metadata.get("gen_params", {}):
filtered_gen_params[key] = metadata["gen_params"][key]
result = {
'base_model': base_model,
'loras': loras,
'gen_params': filtered_gen_params,
'from_automatic_metadata': True
}
return result
except Exception as e:
logger.error(f"Error parsing Automatic1111 metadata: {e}", exc_info=True)
return {"error": str(e), "loras": []}

View File

@@ -0,0 +1,248 @@
"""Parser for Civitai image metadata format."""
import json
import logging
from typing import Dict, Any, Union
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
logger = logging.getLogger(__name__)
class CivitaiApiMetadataParser(RecipeMetadataParser):
"""Parser for Civitai image metadata format"""
def is_metadata_matching(self, metadata) -> bool:
"""Check if the metadata matches the Civitai image metadata format
Args:
metadata: The metadata from the image (dict)
Returns:
bool: True if this parser can handle the metadata
"""
if not metadata or not isinstance(metadata, dict):
return False
# Check for key markers specific to Civitai image metadata
return any([
"resources" in metadata,
"civitaiResources" in metadata,
"additionalResources" in metadata
])
async def parse_metadata(self, metadata, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from Civitai image format
Args:
metadata: The metadata from the image (dict)
recipe_scanner: Optional recipe scanner service
civitai_client: Optional Civitai API client
Returns:
Dict containing parsed recipe data
"""
try:
# Initialize result structure
result = {
'base_model': None,
'loras': [],
'gen_params': {},
'from_civitai_image': True
}
# Extract prompt and negative prompt
if "prompt" in metadata:
result["gen_params"]["prompt"] = metadata["prompt"]
if "negativePrompt" in metadata:
result["gen_params"]["negative_prompt"] = metadata["negativePrompt"]
# Extract other generation parameters
param_mapping = {
"steps": "steps",
"sampler": "sampler",
"cfgScale": "cfg_scale",
"seed": "seed",
"Size": "size",
"clipSkip": "clip_skip",
}
for civitai_key, our_key in param_mapping.items():
if civitai_key in metadata and our_key in GEN_PARAM_KEYS:
result["gen_params"][our_key] = metadata[civitai_key]
# Extract base model information - directly if available
if "baseModel" in metadata:
result["base_model"] = metadata["baseModel"]
elif "Model hash" in metadata and civitai_client:
model_hash = metadata["Model hash"]
model_info = await civitai_client.get_model_by_hash(model_hash)
if model_info:
result["base_model"] = model_info.get("baseModel", "")
elif "Model" in metadata and isinstance(metadata.get("resources"), list):
# Try to find base model in resources
for resource in metadata.get("resources", []):
if resource.get("type") == "model" and resource.get("name") == metadata.get("Model"):
# This is likely the checkpoint model
if civitai_client and resource.get("hash"):
model_info = await civitai_client.get_model_by_hash(resource.get("hash"))
if model_info:
result["base_model"] = model_info.get("baseModel", "")
base_model_counts = {}
# Process standard resources array
if "resources" in metadata and isinstance(metadata["resources"], list):
for resource in metadata["resources"]:
# Modified to process resources without a type field as potential LoRAs
if resource.get("type", "lora") == "lora":
lora_entry = {
'name': resource.get("name", "Unknown LoRA"),
'type': "lora",
'weight': float(resource.get("weight", 1.0)),
'hash': resource.get("hash", ""),
'existsLocally': False,
'localPath': None,
'file_name': resource.get("name", "Unknown"),
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# Try to get info from Civitai if hash is available
if lora_entry['hash'] and civitai_client:
try:
lora_hash = lora_entry['hash']
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts,
lora_hash
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA hash {lora_entry['hash']}: {e}")
result["loras"].append(lora_entry)
# Process civitaiResources array
if "civitaiResources" in metadata and isinstance(metadata["civitaiResources"], list):
for resource in metadata["civitaiResources"]:
# Modified to process resources without a type field as potential LoRAs
if resource.get("type") in ["lora", "lycoris"] or "type" not in resource:
# Initialize lora entry with the same structure as in automatic.py
lora_entry = {
'id': str(resource.get("modelVersionId")),
'modelId': str(resource.get("modelId")) if resource.get("modelId") else None,
'name': resource.get("modelName", "Unknown LoRA"),
'version': resource.get("modelVersionName", ""),
'type': resource.get("type", "lora"),
'weight': round(float(resource.get("weight", 1.0)), 2),
'existsLocally': False,
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# Try to get info from Civitai if modelVersionId is available
if resource.get('modelVersionId') and civitai_client:
try:
version_id = str(resource.get('modelVersionId'))
# Use get_model_version_info instead of get_model_version
civitai_info, error = await civitai_client.get_model_version_info(version_id)
if error:
logger.warning(f"Error getting model version info: {error}")
continue
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for model version {resource.get('modelVersionId')}: {e}")
result["loras"].append(lora_entry)
# Process additionalResources array
if "additionalResources" in metadata and isinstance(metadata["additionalResources"], list):
for resource in metadata["additionalResources"]:
# Modified to process resources without a type field as potential LoRAs
if resource.get("type") in ["lora", "lycoris"] or "type" not in resource:
lora_type = resource.get("type", "lora")
name = resource.get("name", "")
# Extract ID from URN format if available
model_id = None
if name and "civitai:" in name:
parts = name.split("@")
if len(parts) > 1:
model_id = parts[1]
lora_entry = {
'name': name,
'type': lora_type,
'weight': float(resource.get("strength", 1.0)),
'hash': "",
'existsLocally': False,
'localPath': None,
'file_name': name,
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# If we have a model ID and civitai client, try to get more info
if model_id and civitai_client:
try:
# Use get_model_version_info with the model ID
civitai_info, error = await civitai_client.get_model_version_info(model_id)
if error:
logger.warning(f"Error getting model version info: {error}")
else:
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for model ID {model_id}: {e}")
result["loras"].append(lora_entry)
# If base model wasn't found earlier, use the most common one from LoRAs
if not result["base_model"] and base_model_counts:
result["base_model"] = max(base_model_counts.items(), key=lambda x: x[1])[0]
return result
except Exception as e:
logger.error(f"Error parsing Civitai image metadata: {e}", exc_info=True)
return {"error": str(e), "loras": []}

216
py/recipes/parsers/comfy.py Normal file
View File

@@ -0,0 +1,216 @@
"""Parser for ComfyUI metadata format."""
import re
import json
import logging
from typing import Dict, Any
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
logger = logging.getLogger(__name__)
class ComfyMetadataParser(RecipeMetadataParser):
"""Parser for Civitai ComfyUI metadata JSON format"""
METADATA_MARKER = r"class_type"
def is_metadata_matching(self, user_comment: str) -> bool:
"""Check if the user comment matches the ComfyUI metadata format"""
try:
data = json.loads(user_comment)
# Check if it contains class_type nodes typical of ComfyUI workflow
return isinstance(data, dict) and any(isinstance(v, dict) and 'class_type' in v for v in data.values())
except (json.JSONDecodeError, TypeError):
return False
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from Civitai ComfyUI metadata format"""
try:
data = json.loads(user_comment)
loras = []
# Find all LoraLoader nodes
lora_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and v.get('class_type') == 'LoraLoader'}
if not lora_nodes:
return {"error": "No LoRA information found in this ComfyUI workflow", "loras": []}
# Process each LoraLoader node
for node_id, node in lora_nodes.items():
if 'inputs' not in node or 'lora_name' not in node['inputs']:
continue
lora_name = node['inputs'].get('lora_name', '')
# Parse the URN to extract model ID and version ID
# Format: "urn:air:sdxl:lora:civitai:1107767@1253442"
lora_id_match = re.search(r'civitai:(\d+)@(\d+)', lora_name)
if not lora_id_match:
continue
model_id = lora_id_match.group(1)
model_version_id = lora_id_match.group(2)
# Get strength from node inputs
weight = node['inputs'].get('strength_model', 1.0)
# Initialize lora entry with default values
lora_entry = {
'id': model_version_id,
'modelId': model_id,
'name': f"Lora {model_id}", # Default name
'version': '',
'type': 'lora',
'weight': weight,
'existsLocally': False,
'localPath': None,
'file_name': '',
'hash': '',
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# Get additional info from Civitai if client is available
if civitai_client:
try:
civitai_info_tuple = await civitai_client.get_model_version_info(model_version_id)
# Populate lora entry with Civitai info
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info_tuple,
recipe_scanner
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA: {e}")
loras.append(lora_entry)
# Find checkpoint info
checkpoint_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and v.get('class_type') == 'CheckpointLoaderSimple'}
checkpoint = None
checkpoint_id = None
checkpoint_version_id = None
if checkpoint_nodes:
# Get the first checkpoint node
checkpoint_node = next(iter(checkpoint_nodes.values()))
if 'inputs' in checkpoint_node and 'ckpt_name' in checkpoint_node['inputs']:
checkpoint_name = checkpoint_node['inputs']['ckpt_name']
# Parse checkpoint URN
checkpoint_match = re.search(r'civitai:(\d+)@(\d+)', checkpoint_name)
if checkpoint_match:
checkpoint_id = checkpoint_match.group(1)
checkpoint_version_id = checkpoint_match.group(2)
checkpoint = {
'id': checkpoint_version_id,
'modelId': checkpoint_id,
'name': f"Checkpoint {checkpoint_id}",
'version': '',
'type': 'checkpoint'
}
# Get additional checkpoint info from Civitai
if civitai_client:
try:
civitai_info_tuple = await civitai_client.get_model_version_info(checkpoint_version_id)
civitai_info, _ = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
# Populate checkpoint with Civitai info
checkpoint = await self.populate_checkpoint_from_civitai(checkpoint, civitai_info)
except Exception as e:
logger.error(f"Error fetching Civitai info for checkpoint: {e}")
# Extract generation parameters
gen_params = {}
# First try to get from extraMetadata
if 'extraMetadata' in data:
try:
# extraMetadata is a JSON string that needs to be parsed
extra_metadata = json.loads(data['extraMetadata'])
# Map fields from extraMetadata to our standard format
mapping = {
'prompt': 'prompt',
'negativePrompt': 'negative_prompt',
'steps': 'steps',
'sampler': 'sampler',
'cfgScale': 'cfg_scale',
'seed': 'seed'
}
for src_key, dest_key in mapping.items():
if src_key in extra_metadata:
gen_params[dest_key] = extra_metadata[src_key]
# If size info is available, format as "width x height"
if 'width' in extra_metadata and 'height' in extra_metadata:
gen_params['size'] = f"{extra_metadata['width']}x{extra_metadata['height']}"
except Exception as e:
logger.error(f"Error parsing extraMetadata: {e}")
# If extraMetadata doesn't have all the info, try to get from nodes
if not gen_params or len(gen_params) < 3: # At least we want prompt, negative_prompt, and steps
# Find positive prompt node
positive_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and
v.get('class_type', '').endswith('CLIPTextEncode') and
v.get('_meta', {}).get('title') == 'Positive'}
if positive_nodes:
positive_node = next(iter(positive_nodes.values()))
if 'inputs' in positive_node and 'text' in positive_node['inputs']:
gen_params['prompt'] = positive_node['inputs']['text']
# Find negative prompt node
negative_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and
v.get('class_type', '').endswith('CLIPTextEncode') and
v.get('_meta', {}).get('title') == 'Negative'}
if negative_nodes:
negative_node = next(iter(negative_nodes.values()))
if 'inputs' in negative_node and 'text' in negative_node['inputs']:
gen_params['negative_prompt'] = negative_node['inputs']['text']
# Find KSampler node for other parameters
ksampler_nodes = {k: v for k, v in data.items() if isinstance(v, dict) and v.get('class_type') == 'KSampler'}
if ksampler_nodes:
ksampler_node = next(iter(ksampler_nodes.values()))
if 'inputs' in ksampler_node:
inputs = ksampler_node['inputs']
if 'sampler_name' in inputs:
gen_params['sampler'] = inputs['sampler_name']
if 'steps' in inputs:
gen_params['steps'] = inputs['steps']
if 'cfg' in inputs:
gen_params['cfg_scale'] = inputs['cfg']
if 'seed' in inputs:
gen_params['seed'] = inputs['seed']
# Determine base model from loras info
base_model = None
if loras:
# Use the most common base model from loras
base_models = [lora['baseModel'] for lora in loras if lora.get('baseModel')]
if base_models:
from collections import Counter
base_model_counts = Counter(base_models)
base_model = base_model_counts.most_common(1)[0][0]
return {
'base_model': base_model,
'loras': loras,
'checkpoint': checkpoint,
'gen_params': gen_params,
'from_comfy_metadata': True
}
except Exception as e:
logger.error(f"Error parsing ComfyUI metadata: {e}", exc_info=True)
return {"error": str(e), "loras": []}

View File

@@ -0,0 +1,174 @@
"""Parser for meta format (Lora_N Model hash) metadata."""
import re
import logging
from typing import Dict, Any
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
logger = logging.getLogger(__name__)
class MetaFormatParser(RecipeMetadataParser):
"""Parser for images with meta format metadata (Lora_N Model hash format)"""
METADATA_MARKER = r'Lora_\d+ Model hash:'
def is_metadata_matching(self, user_comment: str) -> bool:
"""Check if the user comment matches the metadata format"""
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from images with meta format metadata"""
try:
# Extract prompt and negative prompt
parts = user_comment.split('Negative prompt:', 1)
prompt = parts[0].strip()
# Initialize metadata
metadata = {"prompt": prompt, "loras": []}
# Extract negative prompt and parameters if available
if len(parts) > 1:
negative_and_params = parts[1]
# Extract negative prompt - everything until the first parameter (usually "Steps:")
param_start = re.search(r'([A-Za-z]+): ', negative_and_params)
if param_start:
neg_prompt = negative_and_params[:param_start.start()].strip()
metadata["negative_prompt"] = neg_prompt
params_section = negative_and_params[param_start.start():]
else:
params_section = negative_and_params
# Extract key-value parameters (Steps, Sampler, Seed, etc.)
param_pattern = r'([A-Za-z_0-9 ]+): ([^,]+)'
params = re.findall(param_pattern, params_section)
for key, value in params:
clean_key = key.strip().lower().replace(' ', '_')
metadata[clean_key] = value.strip()
# Extract LoRA information
# Pattern to match lora entries: Lora_0 Model name: ArtVador I.safetensors, Lora_0 Model hash: 08f7133a58, etc.
lora_pattern = r'Lora_(\d+) Model name: ([^,]+), Lora_\1 Model hash: ([^,]+), Lora_\1 Strength model: ([^,]+), Lora_\1 Strength clip: ([^,]+)'
lora_matches = re.findall(lora_pattern, user_comment)
# If the regular pattern doesn't match, try a more flexible approach
if not lora_matches:
# First find all Lora indices
lora_indices = set(re.findall(r'Lora_(\d+)', user_comment))
# For each index, extract the information
for idx in lora_indices:
lora_info = {}
# Extract model name
name_match = re.search(f'Lora_{idx} Model name: ([^,]+)', user_comment)
if name_match:
lora_info['name'] = name_match.group(1).strip()
# Extract model hash
hash_match = re.search(f'Lora_{idx} Model hash: ([^,]+)', user_comment)
if hash_match:
lora_info['hash'] = hash_match.group(1).strip()
# Extract strength model
strength_model_match = re.search(f'Lora_{idx} Strength model: ([^,]+)', user_comment)
if strength_model_match:
lora_info['strength_model'] = float(strength_model_match.group(1).strip())
# Extract strength clip
strength_clip_match = re.search(f'Lora_{idx} Strength clip: ([^,]+)', user_comment)
if strength_clip_match:
lora_info['strength_clip'] = float(strength_clip_match.group(1).strip())
# Only add if we have at least name and hash
if 'name' in lora_info and 'hash' in lora_info:
lora_matches.append((idx, lora_info['name'], lora_info['hash'],
str(lora_info.get('strength_model', 1.0)),
str(lora_info.get('strength_clip', 1.0))))
# Process LoRAs
base_model_counts = {}
loras = []
for match in lora_matches:
if len(match) == 5: # Regular pattern match
idx, name, hash_value, strength_model, strength_clip = match
else: # Flexible approach match
continue # Should not happen now
# Clean up the values
name = name.strip()
if name.endswith('.safetensors'):
name = name[:-12] # Remove .safetensors extension
hash_value = hash_value.strip()
weight = float(strength_model) # Use model strength as weight
# Initialize lora entry with default values
lora_entry = {
'name': name,
'type': 'lora',
'weight': weight,
'existsLocally': False,
'localPath': None,
'file_name': name,
'hash': hash_value,
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
# Get info from Civitai by hash if available
if civitai_client and hash_value:
try:
civitai_info = await civitai_client.get_model_by_hash(hash_value)
# Populate lora entry with Civitai info
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts,
hash_value
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA hash {hash_value}: {e}")
loras.append(lora_entry)
# Extract model information
model = None
if 'model' in metadata:
model = metadata['model']
# Set base_model to the most common one from civitai_info
base_model = None
if base_model_counts:
base_model = max(base_model_counts.items(), key=lambda x: x[1])[0]
# Extract generation parameters for recipe metadata
gen_params = {}
for key in GEN_PARAM_KEYS:
if key in metadata:
gen_params[key] = metadata.get(key, '')
# Try to extract size information if available
if 'width' in metadata and 'height' in metadata:
gen_params['size'] = f"{metadata['width']}x{metadata['height']}"
return {
'base_model': base_model,
'loras': loras,
'gen_params': gen_params,
'raw_metadata': metadata,
'from_meta_format': True
}
except Exception as e:
logger.error(f"Error parsing meta format metadata: {e}", exc_info=True)
return {"error": str(e), "loras": []}

View File

@@ -0,0 +1,114 @@
"""Parser for dedicated recipe metadata format."""
import re
import json
import logging
from typing import Dict, Any
from ...config import config
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
logger = logging.getLogger(__name__)
class RecipeFormatParser(RecipeMetadataParser):
"""Parser for images with dedicated recipe metadata format"""
# Regular expression pattern for extracting recipe metadata
METADATA_MARKER = r'Recipe metadata: (\{.*\})'
def is_metadata_matching(self, user_comment: str) -> bool:
"""Check if the user comment matches the metadata format"""
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from images with dedicated recipe metadata format"""
try:
# Extract recipe metadata from user comment
try:
# Look for recipe metadata section
recipe_match = re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL)
if not recipe_match:
recipe_metadata = None
else:
recipe_json = recipe_match.group(1)
recipe_metadata = json.loads(recipe_json)
except Exception as e:
logger.error(f"Error extracting recipe metadata: {e}")
recipe_metadata = None
if not recipe_metadata:
return {"error": "No recipe metadata found", "loras": []}
# Process the recipe metadata
loras = []
for lora in recipe_metadata.get('loras', []):
# Convert recipe lora format to frontend format
lora_entry = {
'id': lora.get('modelVersionId', ''),
'name': lora.get('modelName', ''),
'version': lora.get('modelVersionName', ''),
'type': 'lora',
'weight': lora.get('strength', 1.0),
'file_name': lora.get('file_name', ''),
'hash': lora.get('hash', '')
}
# Check if this LoRA exists locally by SHA256 hash
if lora.get('hash') and recipe_scanner:
lora_scanner = recipe_scanner._lora_scanner
exists_locally = lora_scanner.has_lora_hash(lora['hash'])
if exists_locally:
lora_cache = await lora_scanner.get_cached_data()
lora_item = next((item for item in lora_cache.raw_data if item['sha256'].lower() == lora['hash'].lower()), None)
if lora_item:
lora_entry['existsLocally'] = True
lora_entry['localPath'] = lora_item['file_path']
lora_entry['file_name'] = lora_item['file_name']
lora_entry['size'] = lora_item['size']
lora_entry['thumbnailUrl'] = config.get_preview_static_url(lora_item['preview_url'])
else:
lora_entry['existsLocally'] = False
lora_entry['localPath'] = None
# Try to get additional info from Civitai if we have a model version ID
if lora.get('modelVersionId') and civitai_client:
try:
civitai_info_tuple = await civitai_client.get_model_version_info(lora['modelVersionId'])
# Populate lora entry with Civitai info
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info_tuple,
recipe_scanner,
None, # No need to track base model counts
lora['hash']
)
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA: {e}")
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'
loras.append(lora_entry)
logger.info(f"Found {len(loras)} loras in recipe metadata")
# Filter gen_params to only include recognized keys
filtered_gen_params = {}
if 'gen_params' in recipe_metadata:
for key, value in recipe_metadata['gen_params'].items():
if key in GEN_PARAM_KEYS:
filtered_gen_params[key] = value
return {
'base_model': recipe_metadata.get('base_model', ''),
'loras': loras,
'gen_params': filtered_gen_params,
'tags': recipe_metadata.get('tags', []),
'title': recipe_metadata.get('title', ''),
'from_recipe_metadata': True
}
except Exception as e:
logger.error(f"Error parsing recipe format metadata: {e}", exc_info=True)
return {"error": str(e), "loras": []}

View File

@@ -43,7 +43,9 @@ class ApiRoutes:
app.on_startup.append(lambda _: routes.initialize_services())
app.router.add_post('/api/delete_model', routes.delete_model)
app.router.add_post('/api/loras/exclude', routes.exclude_model) # Add new exclude endpoint
app.router.add_post('/api/fetch-civitai', routes.fetch_civitai)
app.router.add_post('/api/relink-civitai', routes.relink_civitai) # Add new relink endpoint
app.router.add_post('/api/replace_preview', routes.replace_preview)
app.router.add_get('/api/loras', routes.get_loras)
app.router.add_post('/api/fetch-all-civitai', routes.fetch_all_civitai)
@@ -69,15 +71,35 @@ class ApiRoutes:
# Add the new trigger words route
app.router.add_post('/loramanager/get_trigger_words', routes.get_trigger_words)
# Add new endpoint for letter counts
app.router.add_get('/api/loras/letter-counts', routes.get_letter_counts)
# Add new endpoints for copying lora data
app.router.add_get('/api/loras/get-notes', routes.get_lora_notes)
app.router.add_get('/api/loras/get-trigger-words', routes.get_lora_trigger_words)
# Add update check routes
UpdateRoutes.setup_routes(app)
# Add new endpoints for finding duplicates
app.router.add_get('/api/loras/find-duplicates', routes.find_duplicate_loras)
app.router.add_get('/api/loras/find-filename-conflicts', routes.find_filename_conflicts)
# Add new endpoint for bulk deleting loras
app.router.add_post('/api/loras/bulk-delete', routes.bulk_delete_loras)
async def delete_model(self, request: web.Request) -> web.Response:
"""Handle model deletion request"""
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
return await ModelRouteUtils.handle_delete_model(request, self.scanner)
async def exclude_model(self, request: web.Request) -> web.Response:
"""Handle model exclusion request"""
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
return await ModelRouteUtils.handle_exclude_model(request, self.scanner)
async def fetch_civitai(self, request: web.Request) -> web.Response:
"""Handle CivitAI metadata fetch request"""
if self.scanner is None:
@@ -92,8 +114,11 @@ class ApiRoutes:
async def scan_loras(self, request: web.Request) -> web.Response:
"""Force a rescan of LoRA files"""
try:
await self.scanner.get_cached_data(force_refresh=True)
try:
# Get full_rebuild parameter from query string, default to false
full_rebuild = request.query.get('full_rebuild', 'false').lower() == 'true'
await self.scanner.get_cached_data(force_refresh=True, rebuild_cache=full_rebuild)
return web.json_response({"status": "success", "message": "LoRA scan completed"})
except Exception as e:
logger.error(f"Error in scan_loras: {e}", exc_info=True)
@@ -126,6 +151,9 @@ class ApiRoutes:
tags = request.query.get('tags', None)
favorites_only = request.query.get('favorites_only', 'false').lower() == 'true' # New parameter
# New parameter for alphabet filtering
first_letter = request.query.get('first_letter', None)
# New parameters for recipe filtering
lora_hash = request.query.get('lora_hash', None)
lora_hashes = request.query.get('lora_hashes', None)
@@ -156,7 +184,8 @@ class ApiRoutes:
tags=filters.get('tags', None),
search_options=search_options,
hash_filters=hash_filters,
favorites_only=favorites_only # Pass favorites_only parameter
favorites_only=favorites_only, # Pass favorites_only parameter
first_letter=first_letter # Pass the new first_letter parameter
)
# Get all available folders from cache
@@ -372,10 +401,10 @@ class ApiRoutes:
versions = response.get('modelVersions', [])
model_type = response.get('type', '')
# Check model type - should be LORA
if model_type.lower() != 'lora':
# Check model type - should be LORA or LoCon
if model_type.lower() not in ['lora', 'locon']:
return web.json_response({
'error': f"Model type mismatch. Expected LORA, got {model_type}"
'error': f"Model type mismatch. Expected LORA or LoCon, got {model_type}"
}, status=400)
# Check local availability for each version
@@ -494,7 +523,7 @@ class ApiRoutes:
logger.warning(f"Early access download failed: {error_message}")
return web.Response(
status=401, # Use 401 status code to match Civitai's response
text=f"Early Access Restriction: {error_message}"
text=error_message
)
return web.Response(status=500, text=error_message)
@@ -781,11 +810,13 @@ class ApiRoutes:
# Check if we already have the description stored in metadata
description = None
tags = []
creator = {}
if file_path:
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
description = metadata.get('modelDescription')
tags = metadata.get('tags', [])
creator = metadata.get('creator', {})
# If description is not in metadata, fetch from CivitAI
if not description:
@@ -795,6 +826,7 @@ class ApiRoutes:
if (model_metadata):
description = model_metadata.get('description')
tags = model_metadata.get('tags', [])
creator = model_metadata.get('creator', {})
# Save the metadata to file if we have a file path and got metadata
if file_path:
@@ -804,6 +836,7 @@ class ApiRoutes:
metadata['modelDescription'] = description
metadata['tags'] = tags
metadata['creator'] = creator
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
@@ -814,7 +847,8 @@ class ApiRoutes:
return web.json_response({
'success': True,
'description': description or "<p>No model description available.</p>",
'tags': tags
'tags': tags,
'creator': creator
})
except Exception as e:
@@ -1045,3 +1079,216 @@ class ApiRoutes:
"success": False,
"error": str(e)
}, status=500)
async def get_letter_counts(self, request: web.Request) -> web.Response:
"""Get count of loras for each letter of the alphabet"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
# Get letter counts
letter_counts = await self.scanner.get_letter_counts()
return web.json_response({
'success': True,
'letter_counts': letter_counts
})
except Exception as e:
logger.error(f"Error getting letter counts: {e}")
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def get_lora_notes(self, request: web.Request) -> web.Response:
"""Get notes for a specific LoRA file"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
# Get lora file name from query parameters
lora_name = request.query.get('name')
if not lora_name:
return web.Response(text='Lora file name is required', status=400)
# Get cache data
cache = await self.scanner.get_cached_data()
# Search for the lora in cache data
for lora in cache.raw_data:
file_name = lora['file_name']
if file_name == lora_name:
notes = lora.get('notes', '')
return web.json_response({
'success': True,
'notes': notes
})
# If lora not found
return web.json_response({
'success': False,
'error': 'LoRA not found in cache'
}, status=404)
except Exception as e:
logger.error(f"Error getting lora notes: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def get_lora_trigger_words(self, request: web.Request) -> web.Response:
"""Get trigger words for a specific LoRA file"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
# Get lora file name from query parameters
lora_name = request.query.get('name')
if not lora_name:
return web.Response(text='Lora file name is required', status=400)
# Get cache data
cache = await self.scanner.get_cached_data()
# Search for the lora in cache data
for lora in cache.raw_data:
file_name = lora['file_name']
if file_name == lora_name:
# Get trigger words from civitai data
civitai_data = lora.get('civitai', {})
trigger_words = civitai_data.get('trainedWords', [])
return web.json_response({
'success': True,
'trigger_words': trigger_words
})
# If lora not found
return web.json_response({
'success': False,
'error': 'LoRA not found in cache'
}, status=404)
except Exception as e:
logger.error(f"Error getting lora trigger words: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def find_duplicate_loras(self, request: web.Request) -> web.Response:
"""Find loras with duplicate SHA256 hashes"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
# Get duplicate hashes from hash index
duplicates = self.scanner._hash_index.get_duplicate_hashes()
# Format the response
result = []
cache = await self.scanner.get_cached_data()
for sha256, paths in duplicates.items():
group = {
"hash": sha256,
"models": []
}
# Find matching models for each duplicate path
for path in paths:
model = next((m for m in cache.raw_data if m['file_path'] == path), None)
if model:
group["models"].append(self._format_lora_response(model))
# Add the primary model too
primary_path = self.scanner._hash_index.get_path(sha256)
if primary_path and primary_path not in paths:
primary_model = next((m for m in cache.raw_data if m['file_path'] == primary_path), None)
if primary_model:
group["models"].insert(0, self._format_lora_response(primary_model))
if group["models"]: # Only include if we found models
result.append(group)
return web.json_response({
"success": True,
"duplicates": result,
"count": len(result)
})
except Exception as e:
logger.error(f"Error finding duplicate loras: {e}", exc_info=True)
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def find_filename_conflicts(self, request: web.Request) -> web.Response:
"""Find loras with conflicting filenames"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
# Get duplicate filenames from hash index
duplicates = self.scanner._hash_index.get_duplicate_filenames()
# Format the response
result = []
cache = await self.scanner.get_cached_data()
for filename, paths in duplicates.items():
group = {
"filename": filename,
"models": []
}
# Find matching models for each path
for path in paths:
model = next((m for m in cache.raw_data if m['file_path'] == path), None)
if model:
group["models"].append(self._format_lora_response(model))
# Find the model from the main index too
hash_val = self.scanner._hash_index.get_hash_by_filename(filename)
if hash_val:
main_path = self.scanner._hash_index.get_path(hash_val)
if main_path and main_path not in paths:
main_model = next((m for m in cache.raw_data if m['file_path'] == main_path), None)
if main_model:
group["models"].insert(0, self._format_lora_response(main_model))
if group["models"]: # Only include if we found models
result.append(group)
return web.json_response({
"success": True,
"conflicts": result,
"count": len(result)
})
except Exception as e:
logger.error(f"Error finding filename conflicts: {e}", exc_info=True)
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def bulk_delete_loras(self, request: web.Request) -> web.Response:
"""Handle bulk deletion of lora models"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
return await ModelRouteUtils.handle_bulk_delete_models(request, self.scanner)
except Exception as e:
logger.error(f"Error in bulk delete loras: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def relink_civitai(self, request: web.Request) -> web.Response:
"""Handle CivitAI metadata re-linking request by model version ID for LoRAs"""
if self.scanner is None:
self.scanner = await ServiceRegistry.get_lora_scanner()
return await ModelRouteUtils.handle_relink_civitai(request, self.scanner)

View File

@@ -49,7 +49,9 @@ class CheckpointsRoutes:
# Add new routes for model management similar to LoRA routes
app.router.add_post('/api/checkpoints/delete', self.delete_model)
app.router.add_post('/api/checkpoints/exclude', self.exclude_model) # Add new exclude endpoint
app.router.add_post('/api/checkpoints/fetch-civitai', self.fetch_civitai)
app.router.add_post('/api/checkpoints/relink-civitai', self.relink_civitai) # Add new relink endpoint
app.router.add_post('/api/checkpoints/replace-preview', self.replace_preview)
app.router.add_post('/api/checkpoints/download', self.download_checkpoint)
app.router.add_post('/api/checkpoints/save-metadata', self.save_metadata) # Add new route
@@ -57,6 +59,13 @@ class CheckpointsRoutes:
# Add new WebSocket endpoint for checkpoint progress
app.router.add_get('/ws/checkpoint-progress', ws_manager.handle_checkpoint_connection)
# Add new routes for finding duplicates and filename conflicts
app.router.add_get('/api/checkpoints/find-duplicates', self.find_duplicate_checkpoints)
app.router.add_get('/api/checkpoints/find-filename-conflicts', self.find_filename_conflicts)
# Add new endpoint for bulk deleting checkpoints
app.router.add_post('/api/checkpoints/bulk-delete', self.bulk_delete_checkpoints)
async def get_checkpoints(self, request):
"""Get paginated checkpoint data"""
try:
@@ -419,7 +428,10 @@ class CheckpointsRoutes:
async def scan_checkpoints(self, request):
"""Force a rescan of checkpoint files"""
try:
await self.scanner.get_cached_data(force_refresh=True)
# Get the full_rebuild parameter and convert to bool, default to False
full_rebuild = request.query.get('full_rebuild', 'false').lower() == 'true'
await self.scanner.get_cached_data(force_refresh=True, rebuild_cache=full_rebuild)
return web.json_response({"status": "success", "message": "Checkpoint scan completed"})
except Exception as e:
logger.error(f"Error in scan_checkpoints: {e}", exc_info=True)
@@ -429,7 +441,7 @@ class CheckpointsRoutes:
"""Get detailed information for a specific checkpoint by name"""
try:
name = request.match_info.get('name', '')
checkpoint_info = await self.scanner.get_checkpoint_info_by_name(name)
checkpoint_info = await self.scanner.get_model_info_by_name(name)
if checkpoint_info:
return web.json_response(checkpoint_info)
@@ -499,6 +511,10 @@ class CheckpointsRoutes:
async def delete_model(self, request: web.Request) -> web.Response:
"""Handle checkpoint model deletion request"""
return await ModelRouteUtils.handle_delete_model(request, self.scanner)
async def exclude_model(self, request: web.Request) -> web.Response:
"""Handle checkpoint model exclusion request"""
return await ModelRouteUtils.handle_exclude_model(request, self.scanner)
async def fetch_civitai(self, request: web.Request) -> web.Response:
"""Handle CivitAI metadata fetch request for checkpoints"""
@@ -653,7 +669,7 @@ class CheckpointsRoutes:
model_type = response.get('type', '')
# Check model type - should be Checkpoint
if model_type.lower() != 'checkpoint':
if (model_type.lower() != 'checkpoint'):
return web.json_response({
'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
}, status=400)
@@ -687,3 +703,116 @@ class CheckpointsRoutes:
except Exception as e:
logger.error(f"Error fetching checkpoint model versions: {e}")
return web.Response(status=500, text=str(e))
async def find_duplicate_checkpoints(self, request: web.Request) -> web.Response:
"""Find checkpoints with duplicate SHA256 hashes"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
# Get duplicate hashes from hash index
duplicates = self.scanner._hash_index.get_duplicate_hashes()
# Format the response
result = []
cache = await self.scanner.get_cached_data()
for sha256, paths in duplicates.items():
group = {
"hash": sha256,
"models": []
}
# Find matching models for each path
for path in paths:
model = next((m for m in cache.raw_data if m['file_path'] == path), None)
if model:
group["models"].append(self._format_checkpoint_response(model))
# Add the primary model too
primary_path = self.scanner._hash_index.get_path(sha256)
if primary_path and primary_path not in paths:
primary_model = next((m for m in cache.raw_data if m['file_path'] == primary_path), None)
if primary_model:
group["models"].insert(0, self._format_checkpoint_response(primary_model))
if group["models"]:
result.append(group)
return web.json_response({
"success": True,
"duplicates": result,
"count": len(result)
})
except Exception as e:
logger.error(f"Error finding duplicate checkpoints: {e}", exc_info=True)
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def find_filename_conflicts(self, request: web.Request) -> web.Response:
"""Find checkpoints with conflicting filenames"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
# Get duplicate filenames from hash index
duplicates = self.scanner._hash_index.get_duplicate_filenames()
# Format the response
result = []
cache = await self.scanner.get_cached_data()
for filename, paths in duplicates.items():
group = {
"filename": filename,
"models": []
}
# Find matching models for each path
for path in paths:
model = next((m for m in cache.raw_data if m['file_path'] == path), None)
if model:
group["models"].append(self._format_checkpoint_response(model))
# Find the model from the main index too
hash_val = self.scanner._hash_index.get_hash_by_filename(filename)
if hash_val:
main_path = self.scanner._hash_index.get_path(hash_val)
if main_path and main_path not in paths:
main_model = next((m for m in cache.raw_data if m['file_path'] == main_path), None)
if main_model:
group["models"].insert(0, self._format_checkpoint_response(main_model))
if group["models"]:
result.append(group)
return web.json_response({
"success": True,
"conflicts": result,
"count": len(result)
})
except Exception as e:
logger.error(f"Error finding filename conflicts: {e}", exc_info=True)
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def bulk_delete_checkpoints(self, request: web.Request) -> web.Response:
"""Handle bulk deletion of checkpoint models"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
return await ModelRouteUtils.handle_bulk_delete_models(request, self.scanner)
except Exception as e:
logger.error(f"Error in bulk delete checkpoints: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def relink_civitai(self, request: web.Request) -> web.Response:
"""Handle CivitAI metadata re-linking request by model version ID for checkpoints"""
return await ModelRouteUtils.handle_relink_civitai(request, self.scanner)

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,13 @@
import logging
import os
import asyncio
import json
import time
import tkinter as tk
from tkinter import filedialog
import aiohttp
from server import PromptServer # type: ignore
from aiohttp import web
from ..services.settings_manager import settings
from ..utils.usage_stats import UsageStats
from ..services.service_registry import ServiceRegistry
from ..utils.exif_utils import ExifUtils
from ..utils.constants import EXAMPLE_IMAGE_WIDTH, SUPPORTED_MEDIA_EXTENSIONS
from ..utils.lora_metadata import extract_trained_words
from ..config import config
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS
import re
logger = logging.getLogger(__name__)
@@ -27,7 +23,8 @@ download_progress = {
'last_error': None,
'start_time': None,
'end_time': None,
'processed_models': set() # Track models that have been processed
'processed_models': set(), # Track models that have been processed
'refreshed_models': set() # Track models that had metadata refreshed
}
class MiscRoutes:
@@ -37,17 +34,72 @@ class MiscRoutes:
def setup_routes(app):
"""Register miscellaneous routes"""
app.router.add_post('/api/settings', MiscRoutes.update_settings)
# Add new route for clearing cache
app.router.add_post('/api/clear-cache', MiscRoutes.clear_cache)
# Usage stats routes
app.router.add_post('/api/update-usage-stats', MiscRoutes.update_usage_stats)
app.router.add_get('/api/get-usage-stats', MiscRoutes.get_usage_stats)
# Example images download routes
app.router.add_post('/api/download-example-images', MiscRoutes.download_example_images)
app.router.add_get('/api/example-images-status', MiscRoutes.get_example_images_status)
app.router.add_post('/api/pause-example-images', MiscRoutes.pause_example_images)
app.router.add_post('/api/resume-example-images', MiscRoutes.resume_example_images)
# Lora code update endpoint
app.router.add_post('/api/update-lora-code', MiscRoutes.update_lora_code)
# Add new route for getting trained words
app.router.add_get('/api/trained-words', MiscRoutes.get_trained_words)
# Add new route for getting model example files
app.router.add_get('/api/model-example-files', MiscRoutes.get_model_example_files)
@staticmethod
async def clear_cache(request):
"""Clear all cache files from the cache folder"""
try:
# Get the cache folder path (relative to project directory)
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
cache_folder = os.path.join(project_dir, 'cache')
# Check if cache folder exists
if not os.path.exists(cache_folder):
logger.info("Cache folder does not exist, nothing to clear")
return web.json_response({'success': True, 'message': 'No cache folder found'})
# Get list of cache files before deleting for reporting
cache_files = [f for f in os.listdir(cache_folder) if os.path.isfile(os.path.join(cache_folder, f))]
deleted_files = []
# Delete each .msgpack file in the cache folder
for filename in cache_files:
if filename.endswith('.msgpack'):
file_path = os.path.join(cache_folder, filename)
try:
os.remove(file_path)
deleted_files.append(filename)
logger.info(f"Deleted cache file: {filename}")
except Exception as e:
logger.error(f"Failed to delete {filename}: {e}")
return web.json_response({
'success': False,
'error': f"Failed to delete {filename}: {str(e)}"
}, status=500)
# If we want to completely remove the cache folder too (optional,
# but we'll keep the folder structure in place here)
# shutil.rmtree(cache_folder)
return web.json_response({
'success': True,
'message': f"Successfully cleared {len(deleted_files)} cache files",
'deleted_files': deleted_files
})
except Exception as e:
logger.error(f"Error clearing cache files: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@staticmethod
async def update_settings(request):
"""Update application settings"""
@@ -120,10 +172,14 @@ class MiscRoutes:
usage_stats = UsageStats()
stats = await usage_stats.get_stats()
return web.json_response({
# Add version information to help clients handle format changes
stats_response = {
'success': True,
'data': stats
})
'data': stats,
'format_version': 2 # Indicate this is the new format with history
}
return web.json_response(stats_response)
except Exception as e:
logger.error(f"Failed to get usage stats: {e}", exc_info=True)
@@ -133,464 +189,217 @@ class MiscRoutes:
}, status=500)
@staticmethod
async def download_example_images(request):
async def update_lora_code(request):
"""
Download example images for models from Civitai
Update Lora code in ComfyUI nodes
Expects a JSON body with:
{
"output_dir": "path/to/output", # Base directory to save example images
"optimize": true, # Whether to optimize images (default: true)
"model_types": ["lora", "checkpoint"], # Model types to process (default: both)
"delay": 1.0 # Delay between downloads to avoid rate limiting (default: 1.0)
"node_ids": [123, 456], # Optional - List of node IDs to update (for browser mode)
"lora_code": "<lora:modelname:1.0>", # The Lora code to send
"mode": "append" # or "replace" - whether to append or replace existing code
}
"""
global download_task, is_downloading, download_progress
if is_downloading:
# Create a copy for JSON serialization
response_progress = download_progress.copy()
response_progress['processed_models'] = list(download_progress['processed_models'])
return web.json_response({
'success': False,
'error': 'Download already in progress',
'status': response_progress
}, status=400)
try:
# Parse the request body
data = await request.json()
output_dir = data.get('output_dir')
optimize = data.get('optimize', True)
model_types = data.get('model_types', ['lora', 'checkpoint'])
delay = float(data.get('delay', 0.2))
node_ids = data.get('node_ids')
lora_code = data.get('lora_code', '')
mode = data.get('mode', 'append')
if not output_dir:
if not lora_code:
return web.json_response({
'success': False,
'error': 'Missing output_dir parameter'
'error': 'Missing lora_code parameter'
}, status=400)
# Create the output directory
os.makedirs(output_dir, exist_ok=True)
results = []
# Initialize progress tracking
download_progress['total'] = 0
download_progress['completed'] = 0
download_progress['current_model'] = ''
download_progress['status'] = 'running'
download_progress['errors'] = []
download_progress['last_error'] = None
download_progress['start_time'] = time.time()
download_progress['end_time'] = None
# Get the processed models list from a file if it exists
progress_file = os.path.join(output_dir, '.download_progress.json')
if os.path.exists(progress_file):
# Desktop mode: no specific node_ids provided
if node_ids is None:
try:
with open(progress_file, 'r', encoding='utf-8') as f:
saved_progress = json.load(f)
download_progress['processed_models'] = set(saved_progress.get('processed_models', []))
logger.info(f"Loaded previous progress, {len(download_progress['processed_models'])} models already processed")
# Send broadcast message with id=-1 to all Lora Loader nodes
PromptServer.instance.send_sync("lora_code_update", {
"id": -1,
"lora_code": lora_code,
"mode": mode
})
results.append({
'node_id': 'broadcast',
'success': True
})
except Exception as e:
logger.error(f"Failed to load progress file: {e}")
download_progress['processed_models'] = set()
logger.error(f"Error broadcasting lora code: {e}")
results.append({
'node_id': 'broadcast',
'success': False,
'error': str(e)
})
else:
download_progress['processed_models'] = set()
# Start the download task
is_downloading = True
download_task = asyncio.create_task(
MiscRoutes._download_all_example_images(
output_dir,
optimize,
model_types,
delay
)
)
# Create a copy for JSON serialization
response_progress = download_progress.copy()
response_progress['processed_models'] = list(download_progress['processed_models'])
# Browser mode: send to specific nodes
for node_id in node_ids:
try:
# Send the message to the frontend
PromptServer.instance.send_sync("lora_code_update", {
"id": node_id,
"lora_code": lora_code,
"mode": mode
})
results.append({
'node_id': node_id,
'success': True
})
except Exception as e:
logger.error(f"Error sending lora code to node {node_id}: {e}")
results.append({
'node_id': node_id,
'success': False,
'error': str(e)
})
return web.json_response({
'success': True,
'message': 'Download started',
'status': response_progress
'results': results
})
except Exception as e:
logger.error(f"Failed to start example images download: {e}", exc_info=True)
logger.error(f"Failed to update lora code: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@staticmethod
async def get_example_images_status(request):
"""Get the current status of example images download"""
global download_progress
async def get_trained_words(request):
"""
Get trained words from a safetensors file, sorted by frequency
# Create a copy of the progress dict with the set converted to a list for JSON serialization
response_progress = download_progress.copy()
response_progress['processed_models'] = list(download_progress['processed_models'])
return web.json_response({
'success': True,
'is_downloading': is_downloading,
'status': response_progress
})
@staticmethod
async def pause_example_images(request):
"""Pause the example images download"""
global download_progress
if not is_downloading:
Expects a query parameter:
file_path: Path to the safetensors file
"""
try:
# Get file path from query parameters
file_path = request.query.get('file_path')
if not file_path:
return web.json_response({
'success': False,
'error': 'Missing file_path parameter'
}, status=400)
# Check if file exists and is a safetensors file
if not os.path.exists(file_path):
return web.json_response({
'success': False,
'error': f"File not found: {file_path}"
}, status=404)
if not file_path.lower().endswith('.safetensors'):
return web.json_response({
'success': False,
'error': 'File is not a safetensors file'
}, status=400)
# Extract trained words and class_tokens
trained_words, class_tokens = await extract_trained_words(file_path)
# Return result with both trained words and class tokens
return web.json_response({
'success': True,
'trained_words': trained_words,
'class_tokens': class_tokens
})
except Exception as e:
logger.error(f"Failed to get trained words: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': 'No download in progress'
}, status=400)
download_progress['status'] = 'paused'
return web.json_response({
'success': True,
'message': 'Download paused'
})
'error': str(e)
}, status=500)
@staticmethod
async def resume_example_images(request):
"""Resume the example images download"""
global download_progress
async def get_model_example_files(request):
"""
Get list of example image files for a specific model based on file path
if not is_downloading:
return web.json_response({
'success': False,
'error': 'No download in progress'
}, status=400)
Expects:
- file_path in query parameters
if download_progress['status'] == 'paused':
download_progress['status'] = 'running'
Returns:
- List of image files with their paths as static URLs
"""
try:
# Get the model file path from query parameters
file_path = request.query.get('file_path')
if not file_path:
return web.json_response({
'success': False,
'error': 'Missing file_path parameter'
}, status=400)
# Extract directory and base filename
model_dir = os.path.dirname(file_path)
model_filename = os.path.basename(file_path)
model_name = os.path.splitext(model_filename)[0]
# Check if the directory exists
if not os.path.exists(model_dir):
return web.json_response({
'success': False,
'error': 'Model directory not found',
'files': []
}, status=404)
# Look for files matching the pattern modelname.example.<index>.<ext>
files = []
pattern = f"{model_name}.example."
for file in os.listdir(model_dir):
file_lower = file.lower()
if file_lower.startswith(pattern.lower()):
file_full_path = os.path.join(model_dir, file)
if os.path.isfile(file_full_path):
# Check if the file is a supported media file
file_ext = os.path.splitext(file)[1].lower()
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
# Extract the index from the filename
try:
# Extract the part after '.example.' and before file extension
index_part = file[len(pattern):].split('.')[0]
# Try to parse it as an integer
index = int(index_part)
except (ValueError, IndexError):
# If we can't parse the index, use infinity to sort at the end
index = float('inf')
# Convert file path to static URL
static_url = config.get_preview_static_url(file_full_path)
files.append({
'name': file,
'path': static_url,
'extension': file_ext,
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'],
'index': index
})
# Sort files by their index for consistent ordering
files.sort(key=lambda x: x['index'])
# Remove the index field as it's only used for sorting
for file in files:
file.pop('index', None)
return web.json_response({
'success': True,
'message': 'Download resumed'
'files': files
})
else:
except Exception as e:
logger.error(f"Failed to get model example files: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': f"Download is in '{download_progress['status']}' state, cannot resume"
}, status=400)
@staticmethod
async def _download_all_example_images(output_dir, optimize, model_types, delay):
"""Download example images for all models
Args:
output_dir: Base directory to save example images
optimize: Whether to optimize images
model_types: List of model types to process
delay: Delay between downloads to avoid rate limiting
"""
global is_downloading, download_progress
# Create an independent session for downloading example images
# This avoids interference with the CivitAI client's session
connector = aiohttp.TCPConnector(
ssl=True,
limit=3,
force_close=False,
enable_cleanup_closed=True
)
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
# Create a dedicated session just for this download task
independent_session = aiohttp.ClientSession(
connector=connector,
trust_env=True,
timeout=timeout
)
try:
# Get the scanners
scanners = []
if 'lora' in model_types:
lora_scanner = await ServiceRegistry.get_lora_scanner()
scanners.append(('lora', lora_scanner))
if 'checkpoint' in model_types:
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
scanners.append(('checkpoint', checkpoint_scanner))
# Get all models from all scanners
all_models = []
for scanner_type, scanner in scanners:
cache = await scanner.get_cached_data()
if cache and cache.raw_data:
for model in cache.raw_data:
# Only process models with images and a valid sha256
if model.get('civitai') and model.get('civitai', {}).get('images') and model.get('sha256'):
all_models.append((scanner_type, model))
# Update total count
download_progress['total'] = len(all_models)
logger.info(f"Found {download_progress['total']} models with example images")
# Process each model
for scanner_type, model in all_models:
# Check if download is paused
while download_progress['status'] == 'paused':
await asyncio.sleep(1)
# Check if download should continue
if download_progress['status'] != 'running':
logger.info(f"Download stopped: {download_progress['status']}")
break
model_success = True # Track if all images for this model download successfully
try:
# Update current model info
model_hash = model.get('sha256', '').lower()
model_name = model.get('model_name', 'Unknown')
model_file_path = model.get('file_path', '')
model_file_name = model.get('file_name', '')
download_progress['current_model'] = f"{model_name} ({model_hash[:8]})"
# Skip if already processed
if model_hash in download_progress['processed_models']:
logger.debug(f"Skipping already processed model: {model_name}")
download_progress['completed'] += 1
continue
# Create model directory
model_dir = os.path.join(output_dir, model_hash)
os.makedirs(model_dir, exist_ok=True)
# Process images for this model
images = model.get('civitai', {}).get('images', [])
if not images:
logger.debug(f"No images found for model: {model_name}")
download_progress['processed_models'].add(model_hash)
download_progress['completed'] += 1
continue
# First check if we have local example images for this model
local_images_processed = False
if model_file_path:
try:
model_dir_path = os.path.dirname(model_file_path)
local_images = []
# Look for files with pattern: filename.example.*.ext
if model_file_name:
example_prefix = f"{model_file_name}.example."
if os.path.exists(model_dir_path):
for file in os.listdir(model_dir_path):
file_lower = file.lower()
if file_lower.startswith(example_prefix.lower()):
file_ext = os.path.splitext(file_lower)[1]
is_supported = (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'])
if is_supported:
local_images.append(os.path.join(model_dir_path, file))
# Process local images if found
if local_images:
logger.info(f"Found {len(local_images)} local example images for {model_name}")
for i, local_image_path in enumerate(local_images, 1):
local_ext = os.path.splitext(local_image_path)[1].lower()
save_filename = f"image_{i}{local_ext}"
save_path = os.path.join(model_dir, save_filename)
# Skip if already exists in output directory
if os.path.exists(save_path):
logger.debug(f"File already exists in output: {save_path}")
continue
# Handle image processing based on file type and optimize setting
is_image = local_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
if is_image and optimize:
# Optimize the image
with open(local_image_path, 'rb') as img_file:
image_data = img_file.read()
optimized_data, ext = ExifUtils.optimize_image(
image_data,
target_width=EXAMPLE_IMAGE_WIDTH,
format='webp',
quality=85,
preserve_metadata=False
)
# Update save filename if format changed
if ext == '.webp':
save_filename = os.path.splitext(save_filename)[0] + '.webp'
save_path = os.path.join(model_dir, save_filename)
# Save the optimized image
with open(save_path, 'wb') as f:
f.write(optimized_data)
else:
# For videos or unoptimized images, copy directly
with open(local_image_path, 'rb') as src_file:
with open(save_path, 'wb') as dst_file:
dst_file.write(src_file.read())
# Mark as successfully processed if all local images were processed
download_progress['processed_models'].add(model_hash)
local_images_processed = True
logger.info(f"Successfully processed local examples for {model_name}")
except Exception as e:
error_msg = f"Error processing local examples for {model_name}: {str(e)}"
logger.error(error_msg)
download_progress['errors'].append(error_msg)
download_progress['last_error'] = error_msg
# Continue to remote download if local processing fails
# If we didn't process local images, download from remote
if not local_images_processed:
# Download example images
for i, image in enumerate(images, 1):
image_url = image.get('url')
if not image_url:
continue
# Get image filename from URL
image_filename = os.path.basename(image_url.split('?')[0])
image_ext = os.path.splitext(image_filename)[1].lower()
# Handle both images and videos
is_image = image_ext in SUPPORTED_MEDIA_EXTENSIONS['images']
is_video = image_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']
if not (is_image or is_video):
logger.debug(f"Skipping unsupported file type: {image_filename}")
continue
save_filename = f"image_{i}{image_ext}"
# Check if already downloaded
save_path = os.path.join(model_dir, save_filename)
if os.path.exists(save_path):
logger.debug(f"File already exists: {save_path}")
continue
# Download the file
try:
logger.debug(f"Downloading {save_filename} for {model_name}")
# Direct download using the independent session
async with independent_session.get(image_url, timeout=60) as response:
if response.status == 200:
if is_image and optimize:
# For images, optimize if requested
image_data = await response.read()
optimized_data, ext = ExifUtils.optimize_image(
image_data,
target_width=EXAMPLE_IMAGE_WIDTH,
format='webp',
quality=85,
preserve_metadata=False
)
# Update save filename if format changed
if ext == '.webp':
save_filename = os.path.splitext(save_filename)[0] + '.webp'
save_path = os.path.join(model_dir, save_filename)
# Save the optimized image
with open(save_path, 'wb') as f:
f.write(optimized_data)
else:
# For videos or unoptimized images, save directly
with open(save_path, 'wb') as f:
async for chunk in response.content.iter_chunked(8192):
if chunk:
f.write(chunk)
else:
error_msg = f"Failed to download file: {image_url}, status code: {response.status}"
logger.warning(error_msg)
download_progress['errors'].append(error_msg)
download_progress['last_error'] = error_msg
model_success = False # Mark model as failed
# Add a delay between downloads for remote files only
await asyncio.sleep(delay)
except Exception as e:
error_msg = f"Error downloading file {image_url}: {str(e)}"
logger.error(error_msg)
download_progress['errors'].append(error_msg)
download_progress['last_error'] = error_msg
model_success = False # Mark model as failed
# Only mark model as processed if all images downloaded successfully
if model_success:
download_progress['processed_models'].add(model_hash)
else:
logger.warning(f"Model {model_name} had download errors, will not mark as completed")
# Save progress to file periodically
if download_progress['completed'] % 10 == 0 or download_progress['completed'] == download_progress['total'] - 1:
progress_file = os.path.join(output_dir, '.download_progress.json')
with open(progress_file, 'w', encoding='utf-8') as f:
json.dump({
'processed_models': list(download_progress['processed_models']),
'completed': download_progress['completed'],
'total': download_progress['total'],
'last_update': time.time()
}, f, indent=2)
except Exception as e:
error_msg = f"Error processing model {model.get('model_name')}: {str(e)}"
logger.error(error_msg, exc_info=True)
download_progress['errors'].append(error_msg)
download_progress['last_error'] = error_msg
# Update progress
download_progress['completed'] += 1
# Mark as completed
download_progress['status'] = 'completed'
download_progress['end_time'] = time.time()
logger.info(f"Example images download completed: {download_progress['completed']}/{download_progress['total']} models processed")
except Exception as e:
error_msg = f"Error during example images download: {str(e)}"
logger.error(error_msg, exc_info=True)
download_progress['errors'].append(error_msg)
download_progress['last_error'] = error_msg
download_progress['status'] = 'error'
download_progress['end_time'] = time.time()
finally:
# Close the independent session
try:
await independent_session.close()
except Exception as e:
logger.error(f"Error closing download session: {e}")
# Save final progress to file
try:
progress_file = os.path.join(output_dir, '.download_progress.json')
with open(progress_file, 'w', encoding='utf-8') as f:
json.dump({
'processed_models': list(download_progress['processed_models']),
'completed': download_progress['completed'],
'total': download_progress['total'],
'last_update': time.time(),
'status': download_progress['status']
}, f, indent=2)
except Exception as e:
logger.error(f"Failed to save progress file: {e}")
# Set download status to not downloading
is_downloading = False
'error': str(e)
}, status=500)

View File

@@ -1,5 +1,6 @@
import os
import time
import base64
import numpy as np
from PIL import Image
import torch
@@ -12,7 +13,7 @@ import json
import asyncio
import sys
from ..utils.exif_utils import ExifUtils
from ..utils.recipe_parsers import RecipeParserFactory
from ..recipes import RecipeParserFactory
from ..utils.constants import CARD_PREVIEW_WIDTH
from ..config import config
@@ -56,6 +57,7 @@ class RecipeRoutes:
app.router.add_get('/api/recipes', routes.get_recipes)
app.router.add_get('/api/recipe/{recipe_id}', routes.get_recipe_detail)
app.router.add_post('/api/recipes/analyze-image', routes.analyze_recipe_image)
app.router.add_post('/api/recipes/analyze-local-image', routes.analyze_local_image)
app.router.add_post('/api/recipes/save', routes.save_recipe)
app.router.add_delete('/api/recipe/{recipe_id}', routes.delete_recipe)
@@ -70,12 +72,18 @@ class RecipeRoutes:
# Add new endpoint for getting recipe syntax
app.router.add_get('/api/recipe/{recipe_id}/syntax', routes.get_recipe_syntax)
# Add new endpoint for updating recipe metadata (name and tags)
# Add new endpoint for updating recipe metadata (name, tags and source_path)
app.router.add_put('/api/recipe/{recipe_id}/update', routes.update_recipe)
# Add new endpoint for reconnecting deleted LoRAs
app.router.add_post('/api/recipe/lora/reconnect', routes.reconnect_lora)
# Add new endpoint for finding duplicate recipes
app.router.add_get('/api/recipes/find-duplicates', routes.find_duplicates)
# Add new endpoint for bulk deletion of recipes
app.router.add_post('/api/recipes/bulk-delete', routes.bulk_delete)
# Start cache initialization
app.on_startup.append(routes._init_cache)
@@ -246,6 +254,7 @@ class RecipeRoutes:
content_type = request.headers.get('Content-Type', '')
is_url_mode = False
metadata = None # Initialize metadata variable
if 'multipart/form-data' in content_type:
# Handle image upload
@@ -279,17 +288,63 @@ class RecipeRoutes:
"loras": []
}, status=400)
# Download image from URL
temp_path = download_civitai_image(url)
# Check if this is a Civitai image URL
import re
civitai_image_match = re.match(r'https://civitai\.com/images/(\d+)', url)
if not temp_path:
return web.json_response({
"error": "Failed to download image from URL",
"loras": []
}, status=400)
if civitai_image_match:
# Extract image ID and fetch image info using get_image_info
image_id = civitai_image_match.group(1)
image_info = await self.civitai_client.get_image_info(image_id)
if not image_info:
return web.json_response({
"error": "Failed to fetch image information from Civitai",
"loras": []
}, status=400)
# Get image URL from response
image_url = image_info.get('url')
if not image_url:
return web.json_response({
"error": "No image URL found in Civitai response",
"loras": []
}, status=400)
# Download image directly from URL
session = await self.civitai_client.session
# Create a temporary file to save the downloaded image
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
temp_path = temp_file.name
async with session.get(image_url) as response:
if response.status != 200:
return web.json_response({
"error": f"Failed to download image from URL: HTTP {response.status}",
"loras": []
}, status=400)
with open(temp_path, 'wb') as f:
f.write(await response.read())
# Use meta field from image_info as metadata
if 'meta' in image_info:
metadata = image_info['meta']
else:
# Not a Civitai image URL, use the original download method
temp_path = download_civitai_image(url)
if not temp_path:
return web.json_response({
"error": "Failed to download image from URL",
"loras": []
}, status=400)
# Extract metadata from the image using ExifUtils
metadata = ExifUtils.extract_image_metadata(temp_path)
# If metadata wasn't obtained from Civitai API, extract it from the image
if metadata is None:
# Extract metadata from the image using ExifUtils
metadata = ExifUtils.extract_image_metadata(temp_path)
# If no metadata found, return a more specific error
if not metadata:
@@ -300,7 +355,6 @@ class RecipeRoutes:
# For URL mode, include the image data as base64
if is_url_mode and temp_path:
import base64
with open(temp_path, "rb") as image_file:
result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
@@ -317,7 +371,6 @@ class RecipeRoutes:
# For URL mode, include the image data as base64
if is_url_mode and temp_path:
import base64
with open(temp_path, "rb") as image_file:
result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
@@ -332,7 +385,6 @@ class RecipeRoutes:
# For URL mode, include the image data as base64
if is_url_mode and temp_path:
import base64
with open(temp_path, "rb") as image_file:
result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
@@ -340,6 +392,21 @@ class RecipeRoutes:
if "error" in result and not result.get("loras"):
return web.json_response(result, status=200)
# Calculate fingerprint from parsed loras
from ..utils.utils import calculate_recipe_fingerprint
fingerprint = calculate_recipe_fingerprint(result.get("loras", []))
# Add fingerprint to result
result["fingerprint"] = fingerprint
# Find matching recipes with the same fingerprint
matching_recipes = []
if fingerprint:
matching_recipes = await self.recipe_scanner.find_recipes_by_fingerprint(fingerprint)
# Add matching recipes to result
result["matching_recipes"] = matching_recipes
return web.json_response(result)
except Exception as e:
@@ -355,7 +422,100 @@ class RecipeRoutes:
os.unlink(temp_path)
except Exception as e:
logger.error(f"Error deleting temporary file: {e}")
async def analyze_local_image(self, request: web.Request) -> web.Response:
"""Analyze a local image file for recipe metadata"""
try:
# Ensure services are initialized
await self.init_services()
# Get JSON data from request
data = await request.json()
file_path = data.get('path')
if not file_path:
return web.json_response({
'error': 'No file path provided',
'loras': []
}, status=400)
# Normalize file path for cross-platform compatibility
file_path = os.path.normpath(file_path.strip('"').strip("'"))
# Validate that the file exists
if not os.path.isfile(file_path):
return web.json_response({
'error': 'File not found',
'loras': []
}, status=404)
# Extract metadata from the image using ExifUtils
metadata = ExifUtils.extract_image_metadata(file_path)
# If no metadata found, return error
if not metadata:
# Get base64 image data
with open(file_path, "rb") as image_file:
image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
return web.json_response({
"error": "No metadata found in this image",
"loras": [], # Return empty loras array to prevent client-side errors
"image_base64": image_base64
}, status=200)
# Use the parser factory to get the appropriate parser
parser = RecipeParserFactory.create_parser(metadata)
if parser is None:
# Get base64 image data
with open(file_path, "rb") as image_file:
image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
return web.json_response({
"error": "No parser found for this image",
"loras": [], # Return empty loras array to prevent client-side errors
"image_base64": image_base64
}, status=200)
# Parse the metadata
result = await parser.parse_metadata(
metadata,
recipe_scanner=self.recipe_scanner,
civitai_client=self.civitai_client
)
# Add base64 image data to result
with open(file_path, "rb") as image_file:
result["image_base64"] = base64.b64encode(image_file.read()).decode('utf-8')
# Check for errors
if "error" in result and not result.get("loras"):
return web.json_response(result, status=200)
# Calculate fingerprint from parsed loras
from ..utils.utils import calculate_recipe_fingerprint
fingerprint = calculate_recipe_fingerprint(result.get("loras", []))
# Add fingerprint to result
result["fingerprint"] = fingerprint
# Find matching recipes with the same fingerprint
matching_recipes = []
if fingerprint:
matching_recipes = await self.recipe_scanner.find_recipes_by_fingerprint(fingerprint)
# Add matching recipes to result
result["matching_recipes"] = matching_recipes
return web.json_response(result)
except Exception as e:
logger.error(f"Error analyzing local image: {e}", exc_info=True)
return web.json_response({
'error': str(e),
'loras': [] # Return empty loras array to prevent client-side errors
}, status=500)
async def save_recipe(self, request: web.Request) -> web.Response:
"""Save a recipe to the recipes folder"""
@@ -425,7 +585,6 @@ class RecipeRoutes:
if not image:
if image_base64:
# Convert base64 to binary
import base64
try:
# Remove potential data URL prefix
if ',' in image_base64:
@@ -474,7 +633,7 @@ class RecipeRoutes:
with open(image_path, 'wb') as f:
f.write(optimized_image)
# Create the recipe JSON
# Create the recipe data structure
current_time = time.time()
# Format loras data according to the recipe.json format
@@ -514,6 +673,10 @@ class RecipeRoutes:
"clip_skip": raw_metadata.get("clip_skip", "")
}
# Calculate recipe fingerprint
from ..utils.utils import calculate_recipe_fingerprint
fingerprint = calculate_recipe_fingerprint(loras_data)
# Create the recipe data structure
recipe_data = {
"id": recipe_id,
@@ -523,13 +686,18 @@ class RecipeRoutes:
"created_date": current_time,
"base_model": metadata.get("base_model", ""),
"loras": loras_data,
"gen_params": gen_params
"gen_params": gen_params,
"fingerprint": fingerprint
}
# Add tags if provided
if tags:
recipe_data["tags"] = tags
# Add source_path if provided in metadata
if metadata.get("source_path"):
recipe_data["source_path"] = metadata.get("source_path")
# Save the recipe JSON
json_filename = f"{recipe_id}.recipe.json"
json_path = os.path.join(recipes_dir, json_filename)
@@ -539,6 +707,14 @@ class RecipeRoutes:
# Add recipe metadata to the image
ExifUtils.append_recipe_metadata(image_path, recipe_data)
# Check for duplicates
matching_recipes = []
if fingerprint:
matching_recipes = await self.recipe_scanner.find_recipes_by_fingerprint(fingerprint)
# Remove current recipe from matches
if recipe_id in matching_recipes:
matching_recipes.remove(recipe_id)
# Simplified cache update approach
# Instead of trying to update the cache directly, just set it to None
# to force a refresh on the next get_cached_data call
@@ -554,7 +730,8 @@ class RecipeRoutes:
'success': True,
'recipe_id': recipe_id,
'image_path': image_path,
'json_path': json_path
'json_path': json_path,
'matching_recipes': matching_recipes
})
except Exception as e:
@@ -1089,9 +1266,9 @@ class RecipeRoutes:
data = await request.json()
# Validate required fields
if 'title' not in data and 'tags' not in data:
if 'title' not in data and 'tags' not in data and 'source_path' not in data:
return web.json_response({
"error": "At least one field to update must be provided (title or tags)"
"error": "At least one field to update must be provided (title or tags or source_path)"
}, status=400)
# Use the recipe scanner's update method
@@ -1186,6 +1363,10 @@ class RecipeRoutes:
if not found:
return web.json_response({"error": "Could not find matching deleted LoRA in recipe"}, status=404)
# Recalculate recipe fingerprint after updating LoRA
from ..utils.utils import calculate_recipe_fingerprint
recipe_data['fingerprint'] = calculate_recipe_fingerprint(recipe_data.get('loras', []))
# Save updated recipe
with open(recipe_path, 'w', encoding='utf-8') as f:
@@ -1201,6 +1382,8 @@ class RecipeRoutes:
if cache_item.get('id') == recipe_id:
# Replace loras array with updated version
cache_item['loras'] = recipe_data['loras']
# Update fingerprint in cache
cache_item['fingerprint'] = recipe_data['fingerprint']
# Resort the cache
asyncio.create_task(scanner._cache.resort())
@@ -1211,11 +1394,20 @@ class RecipeRoutes:
if image_path and os.path.exists(image_path):
from ..utils.exif_utils import ExifUtils
ExifUtils.append_recipe_metadata(image_path, recipe_data)
# Find other recipes with the same fingerprint
matching_recipes = []
if 'fingerprint' in recipe_data:
matching_recipes = await scanner.find_recipes_by_fingerprint(recipe_data['fingerprint'])
# Remove current recipe from matches
if recipe_id in matching_recipes:
matching_recipes.remove(recipe_id)
return web.json_response({
"success": True,
"recipe_id": recipe_id,
"updated_lora": updated_lora
"updated_lora": updated_lora,
"matching_recipes": matching_recipes
})
except Exception as e:
@@ -1291,3 +1483,150 @@ class RecipeRoutes:
'success': False,
'error': str(e)
}, status=500)
async def find_duplicates(self, request: web.Request) -> web.Response:
"""Find all duplicate recipes based on fingerprints"""
try:
# Ensure services are initialized
await self.init_services()
# Get all duplicate recipes
duplicate_groups = await self.recipe_scanner.find_all_duplicate_recipes()
# Create response data with additional recipe information
response_data = []
for fingerprint, recipe_ids in duplicate_groups.items():
# Skip groups with only one recipe (not duplicates)
if len(recipe_ids) <= 1:
continue
# Get recipe details for each recipe in the group
recipes = []
for recipe_id in recipe_ids:
recipe = await self.recipe_scanner.get_recipe_by_id(recipe_id)
if recipe:
# Add only needed fields to keep response size manageable
recipes.append({
'id': recipe.get('id'),
'title': recipe.get('title'),
'file_url': recipe.get('file_url') or self._format_recipe_file_url(recipe.get('file_path', '')),
'modified': recipe.get('modified'),
'created_date': recipe.get('created_date'),
'lora_count': len(recipe.get('loras', [])),
})
# Only include groups with at least 2 valid recipes
if len(recipes) >= 2:
# Sort recipes by modified date (newest first)
recipes.sort(key=lambda x: x.get('modified', 0), reverse=True)
response_data.append({
'fingerprint': fingerprint,
'count': len(recipes),
'recipes': recipes
})
# Sort groups by count (highest first)
response_data.sort(key=lambda x: x['count'], reverse=True)
return web.json_response({
'success': True,
'duplicate_groups': response_data
})
except Exception as e:
logger.error(f"Error finding duplicate recipes: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def bulk_delete(self, request: web.Request) -> web.Response:
"""Delete multiple recipes by ID"""
try:
# Ensure services are initialized
await self.init_services()
# Parse request data
data = await request.json()
recipe_ids = data.get('recipe_ids', [])
if not recipe_ids:
return web.json_response({
'success': False,
'error': 'No recipe IDs provided'
}, status=400)
# Get recipes directory
recipes_dir = self.recipe_scanner.recipes_dir
if not recipes_dir or not os.path.exists(recipes_dir):
return web.json_response({
'success': False,
'error': 'Recipes directory not found'
}, status=404)
# Track deleted and failed recipes
deleted_recipes = []
failed_recipes = []
# Process each recipe ID
for recipe_id in recipe_ids:
# Find recipe JSON file
recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
if not os.path.exists(recipe_json_path):
failed_recipes.append({
'id': recipe_id,
'reason': 'Recipe not found'
})
continue
try:
# Load recipe data to get image path
with open(recipe_json_path, 'r', encoding='utf-8') as f:
recipe_data = json.load(f)
# Get image path
image_path = recipe_data.get('file_path')
# Delete recipe JSON file
os.remove(recipe_json_path)
# Delete recipe image if it exists
if image_path and os.path.exists(image_path):
os.remove(image_path)
deleted_recipes.append(recipe_id)
except Exception as e:
failed_recipes.append({
'id': recipe_id,
'reason': str(e)
})
# Update cache if any recipes were deleted
if deleted_recipes and self.recipe_scanner._cache is not None:
# Remove deleted recipes from raw_data
self.recipe_scanner._cache.raw_data = [
r for r in self.recipe_scanner._cache.raw_data
if r.get('id') not in deleted_recipes
]
# Resort the cache
asyncio.create_task(self.recipe_scanner._cache.resort())
logger.info(f"Removed {len(deleted_recipes)} recipes from cache")
return web.json_response({
'success': True,
'deleted': deleted_recipes,
'failed': failed_recipes,
'total_deleted': len(deleted_recipes),
'total_failed': len(failed_recipes)
})
except Exception as e:
logger.error(f"Error performing bulk delete: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)

View File

@@ -150,11 +150,16 @@ class UpdateRoutes:
"""
Compare two semantic version strings
Returns True if version2 is newer than version1
Ignores any suffixes after '-' (e.g., -bugfix, -alpha)
"""
try:
# Clean version strings - remove any suffix after '-'
v1_clean = version1.split('-')[0]
v2_clean = version2.split('-')[0]
# Split versions into components
v1_parts = [int(x) for x in version1.split('.')]
v2_parts = [int(x) for x in version2.split('.')]
v1_parts = [int(x) for x in v1_clean.split('.')]
v2_parts = [int(x) for x in v2_clean.split('.')]
# Ensure both have 3 components (major.minor.patch)
while len(v1_parts) < 3:

View File

@@ -1,26 +0,0 @@
from aiohttp import web
from server import PromptServer
from .nodes.utils import get_lora_info
@PromptServer.instance.routes.post("/loramanager/get_trigger_words")
async def get_trigger_words(request):
json_data = await request.json()
lora_names = json_data.get("lora_names", [])
node_ids = json_data.get("node_ids", [])
all_trigger_words = []
for lora_name in lora_names:
_, trigger_words = await get_lora_info(lora_name)
all_trigger_words.extend(trigger_words)
# Format the trigger words
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
# Send update to all connected trigger word toggle nodes
for node_id in node_ids:
PromptServer.instance.send_sync("trigger_word_update", {
"id": node_id,
"message": trigger_words_text
})
return web.json_response({"success": True})

View File

@@ -267,7 +267,7 @@ class CivitaiClient:
return None, error_msg
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
"""Fetch model metadata (description and tags) from Civitai API
"""Fetch model metadata (description, tags, and creator info) from Civitai API
Args:
model_id: The Civitai model ID
@@ -294,10 +294,14 @@ class CivitaiClient:
# Extract relevant metadata
metadata = {
"description": data.get("description") or "No model description available",
"tags": data.get("tags", [])
"tags": data.get("tags", []),
"creator": {
"username": data.get("creator", {}).get("username"),
"image": data.get("creator", {}).get("image")
}
}
if metadata["description"] or metadata["tags"]:
if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
return metadata, status_code
else:
logger.warning(f"No metadata found for model {model_id}")
@@ -342,3 +346,34 @@ class CivitaiClient:
except Exception as e:
logger.error(f"Error getting hash from Civitai: {e}")
return None
async def get_image_info(self, image_id: str) -> Optional[Dict]:
"""Fetch image information from Civitai API
Args:
image_id: The Civitai image ID
Returns:
Optional[Dict]: The image data or None if not found
"""
try:
session = await self._ensure_fresh_session()
headers = self._get_request_headers()
url = f"{self.base_url}/images?imageId={image_id}&nsfw=X"
logger.debug(f"Fetching image info for ID: {image_id}")
async with session.get(url, headers=headers) as response:
if response.status == 200:
data = await response.json()
if data and "items" in data and len(data["items"]) > 0:
logger.debug(f"Successfully fetched image info for ID: {image_id}")
return data["items"][0]
logger.warning(f"No image found with ID: {image_id}")
return None
logger.error(f"Failed to fetch image info for ID: {image_id} (status {response.status})")
return None
except Exception as e:
error_msg = f"Error fetching image info: {e}"
logger.error(error_msg)
return None

View File

@@ -2,8 +2,7 @@ import logging
import os
import json
import asyncio
from typing import Optional, Dict, Any
from .civitai_client import CivitaiClient
from typing import Dict
from ..utils.models import LoraMetadata, CheckpointMetadata
from ..utils.constants import CARD_PREVIEW_WIDTH
from ..utils.exif_utils import ExifUtils
@@ -136,15 +135,9 @@ class DownloadManager:
# 3. Prepare download
file_name = file_info['name']
save_path = os.path.join(save_dir, file_name)
file_size = file_info.get('sizeKB', 0) * 1024
# 4. Notify file monitor - use normalized path and file size
file_monitor = await self._get_lora_monitor() if model_type == "lora" else await self._get_checkpoint_monitor()
if file_monitor and file_monitor.handler:
file_monitor.handler.add_ignore_path(
save_path.replace(os.sep, '/'),
file_size
)
# file monitor is despreted, so we don't need to use it
# 5. Prepare metadata based on model type
if model_type == "checkpoint":
@@ -154,7 +147,7 @@ class DownloadManager:
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
logger.info(f"Creating LoraMetadata for {file_name}")
# 5.1 Get and update model tags and description
# 5.1 Get and update model tags, description and creator info
model_id = version_info.get('modelId')
if model_id:
model_metadata, _ = await civitai_client.get_model_metadata(str(model_id))
@@ -163,6 +156,8 @@ class DownloadManager:
metadata.tags = model_metadata.get("tags", [])
if model_metadata.get("description"):
metadata.modelDescription = model_metadata.get("description", "")
if model_metadata.get("creator"):
metadata.civitai["creator"] = model_metadata.get("creator")
# 6. Start download process
result = await self._execute_download(
@@ -285,17 +280,11 @@ class DownloadManager:
scanner = await self._get_lora_scanner()
logger.info(f"Updating lora cache for {save_path}")
cache = await scanner.get_cached_data()
# Convert metadata to dictionary
metadata_dict = metadata.to_dict()
metadata_dict['folder'] = relative_path
cache.raw_data.append(metadata_dict)
await cache.resort()
all_folders = set(cache.folders)
all_folders.add(relative_path)
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
# Update the hash index with the new model entry
scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
# Add model to cache and save to disk in a single operation
await scanner.add_model_to_cache(metadata_dict, relative_path)
# Report 100% completion
if progress_callback:

View File

@@ -2,6 +2,7 @@ import asyncio
from typing import List, Dict
from dataclasses import dataclass
from operator import itemgetter
from natsort import natsorted
@dataclass
class LoraCache:
@@ -17,7 +18,7 @@ class LoraCache:
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
self.sorted_by_name = sorted(
self.sorted_by_name = natsorted(
self.raw_data,
key=lambda x: x['model_name'].lower() # Case-insensitive sort
)

View File

@@ -1,54 +0,0 @@
from typing import Dict, Optional
import logging
from dataclasses import dataclass
logger = logging.getLogger(__name__)
@dataclass
class LoraHashIndex:
"""Index for mapping LoRA file hashes to their file paths"""
def __init__(self):
self._hash_to_path: Dict[str, str] = {}
def add_entry(self, sha256: str, file_path: str) -> None:
"""Add or update a hash -> path mapping"""
if not sha256 or not file_path:
return
# Always store lowercase hashes for consistency
self._hash_to_path[sha256.lower()] = file_path
def remove_entry(self, sha256: str) -> None:
"""Remove a hash entry"""
if sha256:
self._hash_to_path.pop(sha256.lower(), None)
def remove_by_path(self, file_path: str) -> None:
"""Remove entry by file path"""
for sha256, path in list(self._hash_to_path.items()):
if path == file_path:
del self._hash_to_path[sha256]
break
def get_path(self, sha256: str) -> Optional[str]:
"""Get file path for a given hash"""
if not sha256:
return None
return self._hash_to_path.get(sha256.lower())
def get_hash(self, file_path: str) -> Optional[str]:
"""Get hash for a given file path"""
for sha256, path in self._hash_to_path.items():
if path == file_path:
return sha256
return None
def has_hash(self, sha256: str) -> bool:
"""Check if hash exists in index"""
if not sha256:
return False
return sha256.lower() in self._hash_to_path
def clear(self) -> None:
"""Clear all entries"""
self._hash_to_path.clear()

View File

@@ -4,6 +4,7 @@ import logging
import asyncio
import shutil
import time
import re
from typing import List, Dict, Optional, Set
from ..utils.models import LoraMetadata
@@ -123,7 +124,7 @@ class LoraScanner(ModelScanner):
folder: str = None, search: str = None, fuzzy_search: bool = False,
base_models: list = None, tags: list = None,
search_options: dict = None, hash_filters: dict = None,
favorites_only: bool = False) -> Dict:
favorites_only: bool = False, first_letter: str = None) -> Dict:
"""Get paginated and filtered lora data
Args:
@@ -138,6 +139,7 @@ class LoraScanner(ModelScanner):
search_options: Dictionary with search options (filename, modelname, tags, recursive)
hash_filters: Dictionary with hash filtering options (single_hash or multiple_hashes)
favorites_only: Filter for favorite models only
first_letter: Filter by first letter of model name
"""
cache = await self.get_cached_data()
@@ -202,6 +204,10 @@ class LoraScanner(ModelScanner):
lora for lora in filtered_data
if lora.get('favorite', False) is True
]
# Apply first letter filtering
if first_letter:
filtered_data = self._filter_by_first_letter(filtered_data, first_letter)
# Apply folder filtering
if folder is not None:
@@ -273,6 +279,101 @@ class LoraScanner(ModelScanner):
return result
def _filter_by_first_letter(self, data, letter):
"""Filter data by first letter of model name
Special handling:
- '#': Numbers (0-9)
- '@': Special characters (not alphanumeric)
- '': CJK characters
"""
filtered_data = []
for lora in data:
model_name = lora.get('model_name', '')
if not model_name:
continue
first_char = model_name[0].upper()
if letter == '#' and first_char.isdigit():
filtered_data.append(lora)
elif letter == '@' and not first_char.isalnum():
# Special characters (not alphanumeric)
filtered_data.append(lora)
elif letter == '' and self._is_cjk_character(first_char):
# CJK characters
filtered_data.append(lora)
elif letter.upper() == first_char:
# Regular alphabet matching
filtered_data.append(lora)
return filtered_data
def _is_cjk_character(self, char):
"""Check if character is a CJK character"""
# Define Unicode ranges for CJK characters
cjk_ranges = [
(0x4E00, 0x9FFF), # CJK Unified Ideographs
(0x3400, 0x4DBF), # CJK Unified Ideographs Extension A
(0x20000, 0x2A6DF), # CJK Unified Ideographs Extension B
(0x2A700, 0x2B73F), # CJK Unified Ideographs Extension C
(0x2B740, 0x2B81F), # CJK Unified Ideographs Extension D
(0x2B820, 0x2CEAF), # CJK Unified Ideographs Extension E
(0x2CEB0, 0x2EBEF), # CJK Unified Ideographs Extension F
(0x30000, 0x3134F), # CJK Unified Ideographs Extension G
(0xF900, 0xFAFF), # CJK Compatibility Ideographs
(0x3300, 0x33FF), # CJK Compatibility
(0x3200, 0x32FF), # Enclosed CJK Letters and Months
(0x3100, 0x312F), # Bopomofo
(0x31A0, 0x31BF), # Bopomofo Extended
(0x3040, 0x309F), # Hiragana
(0x30A0, 0x30FF), # Katakana
(0x31F0, 0x31FF), # Katakana Phonetic Extensions
(0xAC00, 0xD7AF), # Hangul Syllables
(0x1100, 0x11FF), # Hangul Jamo
(0xA960, 0xA97F), # Hangul Jamo Extended-A
(0xD7B0, 0xD7FF), # Hangul Jamo Extended-B
]
code_point = ord(char)
return any(start <= code_point <= end for start, end in cjk_ranges)
async def get_letter_counts(self):
"""Get count of models for each letter of the alphabet"""
cache = await self.get_cached_data()
data = cache.sorted_by_name
# Define letter categories
letters = {
'#': 0, # Numbers
'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0,
'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0,
'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0,
'Y': 0, 'Z': 0,
'@': 0, # Special characters
'': 0 # CJK characters
}
# Count models for each letter
for lora in data:
model_name = lora.get('model_name', '')
if not model_name:
continue
first_char = model_name[0].upper()
if first_char.isdigit():
letters['#'] += 1
elif first_char in letters:
letters[first_char] += 1
elif self._is_cjk_character(first_char):
letters[''] += 1
elif not first_char.isalnum():
letters['@'] += 1
return letters
async def _update_metadata_paths(self, metadata_path: str, lora_path: str) -> Dict:
"""Update file paths in metadata file"""
try:

View File

@@ -2,6 +2,7 @@ import asyncio
from typing import List, Dict
from dataclasses import dataclass
from operator import itemgetter
from natsort import natsorted
@dataclass
class ModelCache:
@@ -17,7 +18,7 @@ class ModelCache:
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
self.sorted_by_name = sorted(
self.sorted_by_name = natsorted(
self.raw_data,
key=lambda x: x['model_name'].lower() # Case-insensitive sort
)

View File

@@ -1,12 +1,15 @@
from typing import Dict, Optional, Set
from typing import Dict, Optional, Set, List
import os
class ModelHashIndex:
"""Index for looking up models by hash or path"""
"""Index for looking up models by hash or filename"""
def __init__(self):
self._hash_to_path: Dict[str, str] = {}
self._filename_to_hash: Dict[str, str] = {} # Changed from path_to_hash to filename_to_hash
self._filename_to_hash: Dict[str, str] = {}
# New data structures for tracking duplicates
self._duplicate_hashes: Dict[str, List[str]] = {} # sha256 -> list of paths
self._duplicate_filenames: Dict[str, List[str]] = {} # filename -> list of paths
def add_entry(self, sha256: str, file_path: str) -> None:
"""Add or update hash index entry"""
@@ -19,6 +22,26 @@ class ModelHashIndex:
# Extract filename without extension
filename = self._get_filename_from_path(file_path)
# Track duplicates by hash
if sha256 in self._hash_to_path:
old_path = self._hash_to_path[sha256]
if old_path != file_path: # Only record if it's actually a different path
if sha256 not in self._duplicate_hashes:
self._duplicate_hashes[sha256] = [old_path]
if file_path not in self._duplicate_hashes.get(sha256, []):
self._duplicate_hashes.setdefault(sha256, []).append(file_path)
# Track duplicates by filename
if filename in self._filename_to_hash:
old_hash = self._filename_to_hash[filename]
if old_hash != sha256: # Different models with the same name
old_path = self._hash_to_path.get(old_hash)
if old_path:
if filename not in self._duplicate_filenames:
self._duplicate_filenames[filename] = [old_path]
if file_path not in self._duplicate_filenames.get(filename, []):
self._duplicate_filenames.setdefault(filename, []).append(file_path)
# Remove old path mapping if hash exists
if sha256 in self._hash_to_path:
old_path = self._hash_to_path[sha256]
@@ -43,21 +66,123 @@ class ModelHashIndex:
def remove_by_path(self, file_path: str) -> None:
"""Remove entry by file path"""
filename = self._get_filename_from_path(file_path)
if filename in self._filename_to_hash:
hash_val = self._filename_to_hash[filename]
if hash_val in self._hash_to_path:
hash_val = None
# Find the hash for this file path
for h, p in self._hash_to_path.items():
if p == file_path:
hash_val = h
break
# If we didn't find a hash, nothing to do
if not hash_val:
return
# Update duplicates tracking for hash
if hash_val in self._duplicate_hashes:
# Remove the current path from duplicates
self._duplicate_hashes[hash_val] = [p for p in self._duplicate_hashes[hash_val] if p != file_path]
# Update or remove hash mapping based on remaining duplicates
if len(self._duplicate_hashes[hash_val]) > 0:
# Replace with one of the remaining paths
new_path = self._duplicate_hashes[hash_val][0]
new_filename = self._get_filename_from_path(new_path)
# Update hash-to-path mapping
self._hash_to_path[hash_val] = new_path
# IMPORTANT: Update filename-to-hash mapping for consistency
# Remove old filename mapping if it points to this hash
if filename in self._filename_to_hash and self._filename_to_hash[filename] == hash_val:
del self._filename_to_hash[filename]
# Add new filename mapping
self._filename_to_hash[new_filename] = hash_val
# If only one duplicate left, remove from duplicates tracking
if len(self._duplicate_hashes[hash_val]) == 1:
del self._duplicate_hashes[hash_val]
else:
# No duplicates left, remove hash entry completely
del self._duplicate_hashes[hash_val]
del self._hash_to_path[hash_val]
del self._filename_to_hash[filename]
# Remove corresponding filename entry if it points to this hash
if filename in self._filename_to_hash and self._filename_to_hash[filename] == hash_val:
del self._filename_to_hash[filename]
else:
# No duplicates, simply remove the hash entry
del self._hash_to_path[hash_val]
# Remove corresponding filename entry if it points to this hash
if filename in self._filename_to_hash and self._filename_to_hash[filename] == hash_val:
del self._filename_to_hash[filename]
# Update duplicates tracking for filename
if filename in self._duplicate_filenames:
# Remove the current path from duplicates
self._duplicate_filenames[filename] = [p for p in self._duplicate_filenames[filename] if p != file_path]
# Update or remove filename mapping based on remaining duplicates
if len(self._duplicate_filenames[filename]) > 0:
# Get the hash for the first remaining duplicate path
first_dup_path = self._duplicate_filenames[filename][0]
first_dup_hash = None
for h, p in self._hash_to_path.items():
if p == first_dup_path:
first_dup_hash = h
break
# Update the filename to hash mapping if we found a hash
if first_dup_hash:
self._filename_to_hash[filename] = first_dup_hash
# If only one duplicate left, remove from duplicates tracking
if len(self._duplicate_filenames[filename]) == 1:
del self._duplicate_filenames[filename]
else:
# No duplicates left, remove filename entry completely
del self._duplicate_filenames[filename]
if filename in self._filename_to_hash:
del self._filename_to_hash[filename]
def remove_by_hash(self, sha256: str) -> None:
"""Remove entry by hash"""
sha256 = sha256.lower()
if sha256 in self._hash_to_path:
path = self._hash_to_path[sha256]
filename = self._get_filename_from_path(path)
if filename in self._filename_to_hash:
del self._filename_to_hash[filename]
del self._hash_to_path[sha256]
if sha256 not in self._hash_to_path:
return
# Get the path and filename
path = self._hash_to_path[sha256]
filename = self._get_filename_from_path(path)
# Get all paths for this hash (including duplicates)
paths_to_remove = [path]
if sha256 in self._duplicate_hashes:
paths_to_remove.extend(self._duplicate_hashes[sha256])
del self._duplicate_hashes[sha256]
# Remove hash-to-path mapping
del self._hash_to_path[sha256]
# Update filename-to-hash and duplicate filenames for all paths
for path_to_remove in paths_to_remove:
fname = self._get_filename_from_path(path_to_remove)
# If this filename maps to the hash we're removing, remove it
if fname in self._filename_to_hash and self._filename_to_hash[fname] == sha256:
del self._filename_to_hash[fname]
# Update duplicate filenames tracking
if fname in self._duplicate_filenames:
self._duplicate_filenames[fname] = [p for p in self._duplicate_filenames[fname] if p != path_to_remove]
if not self._duplicate_filenames[fname]:
del self._duplicate_filenames[fname]
elif len(self._duplicate_filenames[fname]) == 1:
# If only one entry remains, it's no longer a duplicate
del self._duplicate_filenames[fname]
def has_hash(self, sha256: str) -> bool:
"""Check if hash exists in index"""
@@ -82,6 +207,8 @@ class ModelHashIndex:
"""Clear all entries"""
self._hash_to_path.clear()
self._filename_to_hash.clear()
self._duplicate_hashes.clear()
self._duplicate_filenames.clear()
def get_all_hashes(self) -> Set[str]:
"""Get all hashes in the index"""
@@ -91,6 +218,14 @@ class ModelHashIndex:
"""Get all filenames in the index"""
return set(self._filename_to_hash.keys())
def get_duplicate_hashes(self) -> Dict[str, List[str]]:
"""Get dictionary of duplicate hashes and their paths"""
return self._duplicate_hashes
def get_duplicate_filenames(self) -> Dict[str, List[str]]:
"""Get dictionary of duplicate filenames and their paths"""
return self._duplicate_filenames
def __len__(self) -> int:
"""Get number of entries"""
return len(self._hash_to_path)

View File

@@ -5,6 +5,7 @@ import asyncio
import time
import shutil
from typing import List, Dict, Optional, Type, Set
import msgpack # Add MessagePack import for efficient serialization
from ..utils.models import BaseModelMetadata
from ..config import config
@@ -17,6 +18,13 @@ from .websocket_manager import ws_manager
logger = logging.getLogger(__name__)
# Define cache version to handle future format changes
# Version history:
# 1 - Initial version
# 2 - Added duplicate_filenames and duplicate_hashes tracking
# 3 - Added _excluded_models list to cache
CACHE_VERSION = 3
class ModelScanner:
"""Base service for scanning and managing model files"""
@@ -38,6 +46,8 @@ class ModelScanner:
self._hash_index = hash_index or ModelHashIndex()
self._tags_count = {} # Dictionary to store tag counts
self._is_initializing = False # Flag to track initialization state
self._excluded_models = [] # List to track excluded models
self._dirs_last_modified = {} # Track directory modification times
# Register this service
asyncio.create_task(self._register_service())
@@ -47,6 +57,183 @@ class ModelScanner:
service_name = f"{self.model_type}_scanner"
await ServiceRegistry.register_service(service_name, self)
def _get_cache_file_path(self) -> Optional[str]:
"""Get the path to the cache file"""
# Get the directory where this module is located
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
# Create a cache directory within the project if it doesn't exist
cache_dir = os.path.join(current_dir, "cache")
os.makedirs(cache_dir, exist_ok=True)
# Create filename based on model type
cache_filename = f"lm_{self.model_type}_cache.msgpack"
return os.path.join(cache_dir, cache_filename)
def _prepare_for_msgpack(self, data):
"""Preprocess data to accommodate MessagePack serialization limitations
Converts integers exceeding safe range to strings
Args:
data: Any type of data structure
Returns:
Preprocessed data structure with large integers converted to strings
"""
if isinstance(data, dict):
return {k: self._prepare_for_msgpack(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._prepare_for_msgpack(item) for item in data]
elif isinstance(data, int) and (data > 9007199254740991 or data < -9007199254740991):
# Convert integers exceeding JavaScript's safe integer range (2^53-1) to strings
return str(data)
else:
return data
async def _save_cache_to_disk(self) -> bool:
"""Save cache data to disk using MessagePack"""
if self._cache is None or not self._cache.raw_data:
logger.debug(f"No {self.model_type} cache data to save")
return False
cache_path = self._get_cache_file_path()
if not cache_path:
logger.warning(f"Cannot determine {self.model_type} cache file location")
return False
try:
# Create cache data structure
cache_data = {
"version": CACHE_VERSION,
"timestamp": time.time(),
"model_type": self.model_type,
"raw_data": self._cache.raw_data,
"hash_index": {
"hash_to_path": self._hash_index._hash_to_path,
"filename_to_hash": self._hash_index._filename_to_hash, # Fix: changed from path_to_hash to filename_to_hash
"duplicate_hashes": self._hash_index._duplicate_hashes,
"duplicate_filenames": self._hash_index._duplicate_filenames
},
"tags_count": self._tags_count,
"dirs_last_modified": self._get_dirs_last_modified(),
"excluded_models": self._excluded_models # Add excluded_models to cache data
}
# Preprocess data to handle large integers
processed_cache_data = self._prepare_for_msgpack(cache_data)
# Write to temporary file first (atomic operation)
temp_path = f"{cache_path}.tmp"
with open(temp_path, 'wb') as f:
msgpack.pack(processed_cache_data, f)
# Replace the old file with the new one
if os.path.exists(cache_path):
os.replace(temp_path, cache_path)
else:
os.rename(temp_path, cache_path)
logger.info(f"Saved {self.model_type} cache with {len(self._cache.raw_data)} models to {cache_path}")
logger.debug(f"Hash index stats - hash_to_path: {len(self._hash_index._hash_to_path)}, filename_to_hash: {len(self._hash_index._filename_to_hash)}, duplicate_hashes: {len(self._hash_index._duplicate_hashes)}, duplicate_filenames: {len(self._hash_index._duplicate_filenames)}")
return True
except Exception as e:
logger.error(f"Error saving {self.model_type} cache to disk: {e}")
# Try to clean up temp file if it exists
if 'temp_path' in locals() and os.path.exists(temp_path):
try:
os.remove(temp_path)
except:
pass
return False
def _get_dirs_last_modified(self) -> Dict[str, float]:
"""Get last modified time for all model directories"""
dirs_info = {}
for root in self.get_model_roots():
if os.path.exists(root):
dirs_info[root] = os.path.getmtime(root)
# Also check immediate subdirectories for changes
try:
with os.scandir(root) as it:
for entry in it:
if entry.is_dir(follow_symlinks=True):
dirs_info[entry.path] = entry.stat().st_mtime
except Exception as e:
logger.error(f"Error getting directory info for {root}: {e}")
return dirs_info
def _is_cache_valid(self, cache_data: Dict) -> bool:
"""Validate if the loaded cache is still valid"""
if not cache_data or cache_data.get("version") != CACHE_VERSION:
logger.info(f"Cache invalid - version mismatch. Got: {cache_data.get('version')}, Expected: {CACHE_VERSION}")
return False
if cache_data.get("model_type") != self.model_type:
logger.info(f"Cache invalid - model type mismatch. Got: {cache_data.get('model_type')}, Expected: {self.model_type}")
return False
# Check if directories have changed
# stored_dirs = cache_data.get("dirs_last_modified", {})
# current_dirs = self._get_dirs_last_modified()
# If directory structure has changed, cache is invalid
# if set(stored_dirs.keys()) != set(current_dirs.keys()):
# logger.info(f"Cache invalid - directory structure changed. Stored: {set(stored_dirs.keys())}, Current: {set(current_dirs.keys())}")
# return False
# Remove the modification time check to make cache validation less strict
# This allows the cache to be valid even when files have changed
# Users can explicitly refresh the cache when needed
return True
async def _load_cache_from_disk(self) -> bool:
"""Load cache data from disk using MessagePack"""
start_time = time.time()
cache_path = self._get_cache_file_path()
if not cache_path or not os.path.exists(cache_path):
return False
try:
with open(cache_path, 'rb') as f:
cache_data = msgpack.unpack(f)
# Validate cache data
if not self._is_cache_valid(cache_data):
logger.info(f"{self.model_type.capitalize()} cache file found but invalid or outdated")
return False
# Load data into memory
self._cache = ModelCache(
raw_data=cache_data["raw_data"],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
# Load hash index
hash_index_data = cache_data.get("hash_index", {})
self._hash_index._hash_to_path = hash_index_data.get("hash_to_path", {})
self._hash_index._filename_to_hash = hash_index_data.get("filename_to_hash", {}) # Fix: changed from path_to_hash to filename_to_hash
self._hash_index._duplicate_hashes = hash_index_data.get("duplicate_hashes", {})
self._hash_index._duplicate_filenames = hash_index_data.get("duplicate_filenames", {})
# Load tags count
self._tags_count = cache_data.get("tags_count", {})
# Load excluded models
self._excluded_models = cache_data.get("excluded_models", [])
# Resort the cache
await self._cache.resort()
logger.info(f"Loaded {self.model_type} cache from disk with {len(self._cache.raw_data)} models in {time.time() - start_time:.2f} seconds")
return True
except Exception as e:
logger.error(f"Error loading {self.model_type} cache from disk: {e}")
return False
async def initialize_in_background(self) -> None:
"""Initialize cache in background using thread pool"""
try:
@@ -65,7 +252,31 @@ class ModelScanner:
# Determine the page type based on model type
page_type = 'loras' if self.model_type == 'lora' else 'checkpoints'
# First, count all model files to track progress
# First, try to load from cache
await ws_manager.broadcast_init_progress({
'stage': 'loading_cache',
'progress': 0,
'details': f"Loading {self.model_type} cache...",
'scanner_type': self.model_type,
'pageType': page_type
})
cache_loaded = await self._load_cache_from_disk()
if cache_loaded:
# Cache loaded successfully, broadcast complete message
await ws_manager.broadcast_init_progress({
'stage': 'finalizing',
'progress': 100,
'status': 'complete',
'details': f"Loaded {len(self._cache.raw_data)} {self.model_type} files from cache.",
'scanner_type': self.model_type,
'pageType': page_type
})
self._is_initializing = False
return
# If cache loading failed, proceed with full scan
await ws_manager.broadcast_init_progress({
'stage': 'scan_folders',
'progress': 0,
@@ -110,6 +321,9 @@ class ModelScanner:
logger.info(f"{self.model_type.capitalize()} cache initialized in {time.time() - start_time:.2f} seconds. Found {len(self._cache.raw_data)} models")
# Save the cache to disk after initialization
await self._save_cache_to_disk()
# Send completion message
await asyncio.sleep(0.5) # Small delay to ensure final progress message is sent
await ws_manager.broadcast_init_progress({
@@ -279,8 +493,13 @@ class ModelScanner:
# Clean up the event loop
loop.close()
async def get_cached_data(self, force_refresh: bool = False) -> ModelCache:
"""Get cached model data, refresh if needed"""
async def get_cached_data(self, force_refresh: bool = False, rebuild_cache: bool = False) -> ModelCache:
"""Get cached model data, refresh if needed
Args:
force_refresh: Whether to refresh the cache
rebuild_cache: Whether to completely rebuild the cache by reloading from disk first
"""
# If cache is not initialized, return an empty cache
# Actual initialization should be done via initialize_in_background
if self._cache is None and not force_refresh:
@@ -292,10 +511,25 @@ class ModelScanner:
)
# If force refresh is requested, initialize the cache directly
if (force_refresh):
if force_refresh:
# If rebuild_cache is True, try to reload from disk before reconciliation
if rebuild_cache:
logger.info(f"{self.model_type.capitalize()} Scanner: Attempting to rebuild cache from disk...")
cache_loaded = await self._load_cache_from_disk()
if cache_loaded:
logger.info(f"{self.model_type.capitalize()} Scanner: Successfully reloaded cache from disk")
else:
logger.info(f"{self.model_type.capitalize()} Scanner: Could not reload cache from disk, proceeding with complete rebuild")
# If loading from disk failed, do a complete rebuild and save to disk
await self._initialize_cache()
await self._save_cache_to_disk()
return self._cache
if self._cache is None:
# For initial creation, do a full initialization
await self._initialize_cache()
# Save the newly built cache
await self._save_cache_to_disk()
else:
# For subsequent refreshes, use fast reconciliation
await self._reconcile_cache()
@@ -394,6 +628,9 @@ class ModelScanner:
if file_path in cached_paths:
found_paths.add(file_path)
continue
if file_path in self._excluded_models:
continue
# Try case-insensitive match on Windows
if os.name == 'nt':
@@ -406,7 +643,7 @@ class ModelScanner:
break
if matched:
continue
# This is a new file to process
new_files.append(file_path)
@@ -480,6 +717,9 @@ class ModelScanner:
# Resort cache
await self._cache.resort()
# Save updated cache to disk
await self._save_cache_to_disk()
logger.info(f"{self.model_type.capitalize()} Scanner: Cache reconciliation completed in {time.time() - start_time:.2f} seconds. Added {total_added}, removed {total_removed} models.")
except Exception as e:
logger.error(f"{self.model_type.capitalize()} Scanner: Error reconciling cache: {e}", exc_info=True)
@@ -586,6 +826,11 @@ class ModelScanner:
model_data = metadata.to_dict()
# Skip excluded models
if model_data.get('exclude', False):
self._excluded_models.append(model_data['file_path'])
return None
await self._fetch_missing_metadata(file_path, model_data)
rel_path = os.path.relpath(file_path, root_path)
folder = os.path.dirname(rel_path)
@@ -610,7 +855,10 @@ class ModelScanner:
model_id = str(model_id)
tags_missing = not model_data.get('tags') or len(model_data.get('tags', [])) == 0
desc_missing = not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")
needs_metadata_update = tags_missing or desc_missing
# TODO: not for now, but later we should check if the creator is missing
# creator_missing = not model_data.get('civitai', {}).get('creator')
creator_missing = False
needs_metadata_update = tags_missing or desc_missing or creator_missing
if needs_metadata_update and model_id:
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
@@ -636,6 +884,8 @@ class ModelScanner:
if model_metadata.get('description') and (not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")):
model_data['modelDescription'] = model_metadata['description']
model_data['civitai']['creator'] = model_metadata['creator']
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
with open(metadata_path, 'w', encoding='utf-8') as f:
@@ -684,6 +934,44 @@ class ModelScanner:
models_list.append(result)
except Exception as e:
logger.error(f"Error processing {file_path}: {e}")
async def add_model_to_cache(self, metadata_dict: Dict, folder: str = '') -> bool:
"""Add a model to the cache and save to disk
Args:
metadata_dict: The model metadata dictionary
folder: The relative folder path for the model
Returns:
bool: True if successful, False otherwise
"""
try:
if self._cache is None:
await self.get_cached_data()
# Update folder in metadata
metadata_dict['folder'] = folder
# Add to cache
self._cache.raw_data.append(metadata_dict)
# Resort cache data
await self._cache.resort()
# Update folders list
all_folders = set(self._cache.folders)
all_folders.add(folder)
self._cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
# Update the hash index
self._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
# Save to disk
await self._save_cache_to_disk()
return True
except Exception as e:
logger.error(f"Error adding model to cache: {e}")
return False
async def move_model(self, source_path: str, target_path: str) -> bool:
"""Move a model and its associated files to a new location"""
@@ -729,25 +1017,40 @@ class ModelScanner:
shutil.move(real_source, real_target)
source_metadata = os.path.join(source_dir, f"{base_name}.metadata.json")
# Move all associated files with the same base name
source_metadata = None
moved_metadata_path = None
# Find all files with the same base name in the source directory
files_to_move = []
try:
for file in os.listdir(source_dir):
if file.startswith(base_name + ".") and file != os.path.basename(source_path):
source_file_path = os.path.join(source_dir, file)
# Store metadata file path for special handling
if file == f"{base_name}.metadata.json":
source_metadata = source_file_path
moved_metadata_path = os.path.join(target_path, file)
else:
files_to_move.append((source_file_path, os.path.join(target_path, file)))
except Exception as e:
logger.error(f"Error listing files in {source_dir}: {e}")
# Move all associated files
metadata = None
if os.path.exists(source_metadata):
target_metadata = os.path.join(target_path, f"{base_name}.metadata.json")
shutil.move(source_metadata, target_metadata)
metadata = await self._update_metadata_paths(target_metadata, target_file)
for source_file, target_file_path in files_to_move:
try:
shutil.move(source_file, target_file_path)
except Exception as e:
logger.error(f"Error moving associated file {source_file}: {e}")
# Move civitai.info file if exists
source_civitai = os.path.join(source_dir, f"{base_name}.civitai.info")
if os.path.exists(source_civitai):
target_civitai = os.path.join(target_path, f"{base_name}.civitai.info")
shutil.move(source_civitai, target_civitai)
for ext in PREVIEW_EXTENSIONS:
source_preview = os.path.join(source_dir, f"{base_name}{ext}")
if os.path.exists(source_preview):
target_preview = os.path.join(target_path, f"{base_name}{ext}")
shutil.move(source_preview, target_preview)
break
# Handle metadata file specially to update paths
if source_metadata and os.path.exists(source_metadata):
try:
shutil.move(source_metadata, moved_metadata_path)
metadata = await self._update_metadata_paths(moved_metadata_path, target_file)
except Exception as e:
logger.error(f"Error moving metadata file: {e}")
await self.update_single_model_cache(source_path, target_file, metadata)
@@ -825,6 +1128,9 @@ class ModelScanner:
await cache.resort()
# Save the updated cache
await self._save_cache_to_disk()
return True
def has_hash(self, sha256: str) -> bool:
@@ -900,6 +1206,10 @@ class ModelScanner:
logger.error(f"Error getting model info by name: {e}", exc_info=True)
return None
def get_excluded_models(self) -> List[str]:
"""Get list of excluded model file paths"""
return self._excluded_models.copy()
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
"""Update preview URL in cache for a specific lora
@@ -913,4 +1223,171 @@ class ModelScanner:
if self._cache is None:
return False
return await self._cache.update_preview_url(file_path, preview_url)
updated = await self._cache.update_preview_url(file_path, preview_url)
if updated:
# Save updated cache to disk
await self._save_cache_to_disk()
return updated
async def bulk_delete_models(self, file_paths: List[str]) -> Dict:
"""Delete multiple models and update cache in a batch operation
Args:
file_paths: List of file paths to delete
Returns:
Dict containing results of the operation
"""
try:
if not file_paths:
return {
'success': False,
'error': 'No file paths provided for deletion',
'results': []
}
# Get the file monitor
file_monitor = getattr(self, 'file_monitor', None)
# Keep track of success and failures
results = []
total_deleted = 0
cache_updated = False
# Get cache data
cache = await self.get_cached_data()
# Track deleted models to update cache once
deleted_models = []
for file_path in file_paths:
try:
target_dir = os.path.dirname(file_path)
file_name = os.path.splitext(os.path.basename(file_path))[0]
# Delete all associated files for the model
from ..utils.routes_common import ModelRouteUtils
deleted_files = await ModelRouteUtils.delete_model_files(
target_dir,
file_name,
file_monitor
)
if deleted_files:
deleted_models.append(file_path)
results.append({
'file_path': file_path,
'success': True,
'deleted_files': deleted_files
})
total_deleted += 1
else:
results.append({
'file_path': file_path,
'success': False,
'error': 'No files deleted'
})
except Exception as e:
logger.error(f"Error deleting file {file_path}: {e}")
results.append({
'file_path': file_path,
'success': False,
'error': str(e)
})
# Batch update cache if any models were deleted
if deleted_models:
# Update the cache in a batch operation
cache_updated = await self._batch_update_cache_for_deleted_models(deleted_models)
return {
'success': True,
'total_deleted': total_deleted,
'total_attempted': len(file_paths),
'cache_updated': cache_updated,
'results': results
}
except Exception as e:
logger.error(f"Error in bulk delete: {e}", exc_info=True)
return {
'success': False,
'error': str(e),
'results': []
}
async def _batch_update_cache_for_deleted_models(self, file_paths: List[str]) -> bool:
"""Update cache after multiple models have been deleted
Args:
file_paths: List of file paths that were deleted
Returns:
bool: True if cache was updated and saved successfully
"""
if not file_paths or self._cache is None:
return False
try:
# Get all models that need to be removed from cache
models_to_remove = [item for item in self._cache.raw_data if item['file_path'] in file_paths]
if not models_to_remove:
return False
# Update tag counts
for model in models_to_remove:
for tag in model.get('tags', []):
if tag in self._tags_count:
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
if self._tags_count[tag] == 0:
del self._tags_count[tag]
# Update hash index
for model in models_to_remove:
file_path = model['file_path']
if hasattr(self, '_hash_index') and self._hash_index:
# Get the hash and filename before removal for duplicate checking
file_name = os.path.splitext(os.path.basename(file_path))[0]
hash_val = model.get('sha256', '').lower()
# Remove from hash index
self._hash_index.remove_by_path(file_path)
# Check and clean up duplicates
self._cleanup_duplicates_after_removal(hash_val, file_name)
# Update cache data
self._cache.raw_data = [item for item in self._cache.raw_data if item['file_path'] not in file_paths]
# Resort cache
await self._cache.resort()
# Save updated cache to disk
await self._save_cache_to_disk()
return True
except Exception as e:
logger.error(f"Error updating cache after bulk delete: {e}", exc_info=True)
return False
def _cleanup_duplicates_after_removal(self, hash_val: str, file_name: str) -> None:
"""Clean up duplicate entries in hash index after removing a model
Args:
hash_val: SHA256 hash of the removed model
file_name: File name of the removed model without extension
"""
if not hash_val or not file_name or not hasattr(self, '_hash_index'):
return
# Clean up hash duplicates if only 0 or 1 entries remain
if hash_val in self._hash_index._duplicate_hashes:
if len(self._hash_index._duplicate_hashes[hash_val]) <= 1:
del self._hash_index._duplicate_hashes[hash_val]
# Clean up filename duplicates if only 0 or 1 entries remain
if file_name in self._hash_index._duplicate_filenames:
if len(self._hash_index._duplicate_filenames[file_name]) <= 1:
del self._hash_index._duplicate_filenames[file_name]

View File

@@ -2,6 +2,7 @@ import asyncio
from typing import List, Dict
from dataclasses import dataclass
from operator import itemgetter
from natsort import natsorted
@dataclass
class RecipeCache:
@@ -16,7 +17,7 @@ class RecipeCache:
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
self.sorted_by_name = sorted(
self.sorted_by_name = natsorted(
self.raw_data,
key=lambda x: x.get('title', '').lower() # Case-insensitive sort
)

View File

@@ -9,6 +9,7 @@ from .recipe_cache import RecipeCache
from .service_registry import ServiceRegistry
from .lora_scanner import LoraScanner
from ..utils.utils import fuzzy_match
from natsort import natsorted
import sys
logger = logging.getLogger(__name__)
@@ -164,7 +165,7 @@ class RecipeScanner:
if hasattr(self._cache, "resort"):
try:
# Sort by name
self._cache.sorted_by_name = sorted(
self._cache.sorted_by_name = natsorted(
self._cache.raw_data,
key=lambda x: x.get('title', '').lower()
)
@@ -321,6 +322,20 @@ class RecipeScanner:
# Update lora information with local paths and availability
await self._update_lora_information(recipe_data)
# Calculate and update fingerprint if missing
if 'loras' in recipe_data and 'fingerprint' not in recipe_data:
from ..utils.utils import calculate_recipe_fingerprint
fingerprint = calculate_recipe_fingerprint(recipe_data['loras'])
recipe_data['fingerprint'] = fingerprint
# Write updated recipe data back to file
try:
with open(recipe_path, 'w', encoding='utf-8') as f:
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
logger.info(f"Added fingerprint to recipe: {recipe_path}")
except Exception as e:
logger.error(f"Error writing updated recipe with fingerprint: {e}")
return recipe_data
except Exception as e:
@@ -801,3 +816,60 @@ class RecipeScanner:
logger.info(f"Resorted recipe cache after updating {cache_updated_count} items")
return file_updated_count, cache_updated_count
async def find_recipes_by_fingerprint(self, fingerprint: str) -> list:
"""Find recipes with a matching fingerprint
Args:
fingerprint: The recipe fingerprint to search for
Returns:
List of recipe details that match the fingerprint
"""
if not fingerprint:
return []
# Get all recipes from cache
cache = await self.get_cached_data()
# Find recipes with matching fingerprint
matching_recipes = []
for recipe in cache.raw_data:
if recipe.get('fingerprint') == fingerprint:
recipe_details = {
'id': recipe.get('id'),
'title': recipe.get('title'),
'file_url': self._format_file_url(recipe.get('file_path')),
'modified': recipe.get('modified'),
'created_date': recipe.get('created_date'),
'lora_count': len(recipe.get('loras', []))
}
matching_recipes.append(recipe_details)
return matching_recipes
async def find_all_duplicate_recipes(self) -> dict:
"""Find all recipe duplicates based on fingerprints
Returns:
Dictionary where keys are fingerprints and values are lists of recipe IDs
"""
# Get all recipes from cache
cache = await self.get_cached_data()
# Group recipes by fingerprint
fingerprint_groups = {}
for recipe in cache.raw_data:
fingerprint = recipe.get('fingerprint')
if not fingerprint:
continue
if fingerprint not in fingerprint_groups:
fingerprint_groups[fingerprint] = []
fingerprint_groups[fingerprint].append(recipe.get('id'))
# Filter to only include groups with more than one recipe
duplicate_groups = {k: v for k, v in fingerprint_groups.items() if len(v) > 1}
return duplicate_groups

View File

@@ -233,6 +233,17 @@ async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = L
data['usage_tips'] = "{}"
needs_update = True
# Update preview_nsfw_level if needed
civitai_data = data.get('civitai', {})
civitai_images = civitai_data.get('images', []) if civitai_data else []
if (data.get('preview_url') and
data.get('preview_nsfw_level', 0) == 0 and
civitai_images and
civitai_images[0].get('nsfwLevel', 0) != 0):
data['preview_nsfw_level'] = civitai_images[0]['nsfwLevel']
# TODO: write to metadata file
# needs_update = True
if needs_update:
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)

View File

@@ -1,7 +1,11 @@
from safetensors import safe_open
from typing import Dict
from typing import Dict, List, Tuple
from .model_utils import determine_base_model
import os
import logging
import json
logger = logging.getLogger(__name__)
async def extract_lora_metadata(file_path: str) -> Dict:
"""Extract essential metadata from safetensors file"""
@@ -77,4 +81,53 @@ async def extract_checkpoint_metadata(file_path: str) -> dict:
except Exception as e:
logger.error(f"Error extracting checkpoint metadata for {file_path}: {e}")
# Return default values
return {'base_model': 'Unknown', 'model_type': 'checkpoint'}
return {'base_model': 'Unknown', 'model_type': 'checkpoint'}
async def extract_trained_words(file_path: str) -> Tuple[List[Tuple[str, int]], str]:
"""Extract trained words from a safetensors file and sort by frequency
Args:
file_path: Path to the safetensors file
Returns:
Tuple of:
- List of (word, frequency) tuples sorted by frequency (highest first)
- class_tokens value (or None if not found)
"""
class_tokens = None
try:
with safe_open(file_path, framework="pt", device="cpu") as f:
metadata = f.metadata()
# Extract class_tokens from ss_datasets if present
if metadata and "ss_datasets" in metadata:
try:
datasets_data = json.loads(metadata["ss_datasets"])
# Look for class_tokens in the first subset
if datasets_data and isinstance(datasets_data, list) and datasets_data[0].get("subsets"):
subsets = datasets_data[0].get("subsets", [])
if subsets and isinstance(subsets, list) and len(subsets) > 0:
class_tokens = subsets[0].get("class_tokens")
except Exception as e:
logger.error(f"Error parsing ss_datasets for class_tokens: {str(e)}")
# Extract tag frequency as before
if metadata and "ss_tag_frequency" in metadata:
# Parse the JSON string into a dictionary
tag_data = json.loads(metadata["ss_tag_frequency"])
# The structure may have an outer key (like "image_dir" or "img")
# We need to get the inner dictionary with the actual word frequencies
if tag_data:
# Get the first key (usually "image_dir" or "img")
first_key = list(tag_data.keys())[0]
words_dict = tag_data[first_key]
# Sort words by frequency (highest first)
sorted_words = sorted(words_dict.items(), key=lambda x: x[1], reverse=True)
return sorted_words, class_tokens
except Exception as e:
logger.error(f"Error extracting trained words from {file_path}: {str(e)}")
return [], class_tokens

View File

@@ -23,6 +23,7 @@ class BaseModelMetadata:
modelDescription: str = "" # Full model description
civitai_deleted: bool = False # Whether deleted from Civitai
favorite: bool = False # Whether the model is a favorite
exclude: bool = False # Whether to exclude this model from the cache
def __post_init__(self):
# Initialize empty lists to avoid mutable default parameter issue

File diff suppressed because it is too large Load Diff

View File

@@ -40,6 +40,7 @@ class ModelRouteUtils:
civitai_metadata: Dict, client: CivitaiClient) -> None:
"""Update local metadata with CivitAI data"""
local_metadata['civitai'] = civitai_metadata
local_metadata['from_civitai'] = True
# Update model name if available
if 'model' in civitai_metadata:
@@ -50,9 +51,10 @@ class ModelRouteUtils:
model_id = civitai_metadata['modelId']
if model_id:
model_metadata, _ = await client.get_model_metadata(str(model_id))
if model_metadata:
if (model_metadata):
local_metadata['modelDescription'] = model_metadata.get('description', '')
local_metadata['tags'] = model_metadata.get('tags', [])
local_metadata['civitai']['creator'] = model_metadata['creator']
# Update base model
local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
@@ -60,7 +62,7 @@ class ModelRouteUtils:
# Update preview if needed
if not local_metadata.get('preview_url') or not os.path.exists(local_metadata['preview_url']):
first_preview = next((img for img in civitai_metadata.get('images', [])), None)
if first_preview:
if (first_preview):
# Determine if content is video or image
is_video = first_preview['type'] == 'video'
@@ -142,6 +144,11 @@ class ModelRouteUtils:
"""
client = CivitaiClient()
try:
# Validate input parameters
if not isinstance(model_data, dict):
logger.error(f"Invalid model_data type: {type(model_data)}")
return False
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
# Check if model metadata exists
@@ -165,21 +172,25 @@ class ModelRouteUtils:
client
)
# Update cache object directly
model_data.update({
# Update cache object directly using safe .get() method
update_dict = {
'model_name': local_metadata.get('model_name'),
'preview_url': local_metadata.get('preview_url'),
'from_civitai': True,
'civitai': civitai_metadata
})
}
model_data.update(update_dict)
# Update cache using the provided function
await update_cache_func(file_path, file_path, local_metadata)
return True
except KeyError as e:
logger.error(f"Error fetching CivitAI data - Missing key: {e} in model_data={model_data}")
return False
except Exception as e:
logger.error(f"Error fetching CivitAI data: {e}")
logger.error(f"Error fetching CivitAI data: {str(e)}", exc_info=True) # Include stack trace
return False
finally:
await client.close()
@@ -193,7 +204,7 @@ class ModelRouteUtils:
fields = [
"id", "modelId", "name", "createdAt", "updatedAt",
"publishedAt", "trainedWords", "baseModel", "description",
"model", "images"
"model", "images", "creator"
]
return {k: data[k] for k in fields if k in data}
@@ -292,6 +303,8 @@ class ModelRouteUtils:
# Update hash index if available
if hasattr(scanner, '_hash_index') and scanner._hash_index:
scanner._hash_index.remove_by_path(file_path)
await scanner._save_cache_to_disk()
return web.json_response({
'success': True,
@@ -424,6 +437,67 @@ class ModelRouteUtils:
logger.error(f"Error replacing preview: {e}", exc_info=True)
return web.Response(text=str(e), status=500)
@staticmethod
async def handle_exclude_model(request: web.Request, scanner) -> web.Response:
"""Handle model exclusion request
Args:
request: The aiohttp request
scanner: The model scanner instance with cache management methods
Returns:
web.Response: The HTTP response
"""
try:
data = await request.json()
file_path = data.get('file_path')
if not file_path:
return web.Response(text='Model path is required', status=400)
# Update metadata to mark as excluded
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
metadata['exclude'] = True
# Save updated metadata
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
# Update cache
cache = await scanner.get_cached_data()
# Find and remove model from cache
model_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
if model_to_remove:
# Update tags count
for tag in model_to_remove.get('tags', []):
if tag in scanner._tags_count:
scanner._tags_count[tag] = max(0, scanner._tags_count[tag] - 1)
if scanner._tags_count[tag] == 0:
del scanner._tags_count[tag]
# Remove from hash index if available
if hasattr(scanner, '_hash_index') and scanner._hash_index:
scanner._hash_index.remove_by_path(file_path)
# Remove from cache data
cache.raw_data = [item for item in cache.raw_data if item['file_path'] != file_path]
await cache.resort()
# Add to excluded models list
scanner._excluded_models.append(file_path)
await scanner._save_cache_to_disk()
return web.json_response({
'success': True,
'message': f"Model {os.path.basename(file_path)} excluded"
})
except Exception as e:
logger.error(f"Error excluding model: {e}", exc_info=True)
return web.Response(text=str(e), status=500)
@staticmethod
async def handle_download_model(request: web.Request, download_manager: DownloadManager, model_type="lora") -> web.Response:
"""Handle model download request
@@ -500,4 +574,107 @@ class ModelRouteUtils:
)
logger.error(f"Error downloading {model_type}: {error_message}")
return web.Response(status=500, text=error_message)
return web.Response(status=500, text=error_message)
@staticmethod
async def handle_bulk_delete_models(request: web.Request, scanner) -> web.Response:
"""Handle bulk deletion of models
Args:
request: The aiohttp request
scanner: The model scanner instance with cache management methods
Returns:
web.Response: The HTTP response
"""
try:
data = await request.json()
file_paths = data.get('file_paths', [])
if not file_paths:
return web.json_response({
'success': False,
'error': 'No file paths provided for deletion'
}, status=400)
# Use the scanner's bulk delete method to handle all cache and file operations
result = await scanner.bulk_delete_models(file_paths)
return web.json_response({
'success': result.get('success', False),
'total_deleted': result.get('total_deleted', 0),
'total_attempted': result.get('total_attempted', len(file_paths)),
'results': result.get('results', [])
})
except Exception as e:
logger.error(f"Error in bulk delete: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@staticmethod
async def handle_relink_civitai(request: web.Request, scanner) -> web.Response:
"""Handle CivitAI metadata re-linking request by model version ID
Args:
request: The aiohttp request
scanner: The model scanner instance with cache management methods
Returns:
web.Response: The HTTP response
"""
try:
data = await request.json()
file_path = data.get('file_path')
model_version_id = data.get('model_version_id')
if not file_path or not model_version_id:
return web.json_response({"success": False, "error": "Both file_path and model_version_id are required"}, status=400)
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
# Check if model metadata exists
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
# Create a client for fetching from Civitai
client = await CivitaiClient.get_instance()
try:
# Fetch metadata by model version ID
civitai_metadata, error = await client.get_model_version_info(model_version_id)
if not civitai_metadata:
error_msg = error or "Model version not found on CivitAI"
return web.json_response({"success": False, "error": error_msg}, status=404)
# Find the primary model file to get the correct SHA256 hash
primary_model_file = None
for file in civitai_metadata.get('files', []):
if file.get('primary', False) and file.get('type') == 'Model':
primary_model_file = file
break
if not primary_model_file or not primary_model_file.get('hashes', {}).get('SHA256'):
return web.json_response({"success": False, "error": "No SHA256 hash found in model metadata"}, status=404)
# Update the SHA256 hash in local metadata (convert to lowercase)
local_metadata['sha256'] = primary_model_file['hashes']['SHA256'].lower()
# Update metadata with CivitAI information
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
# Update the cache
await scanner.update_single_model_cache(file_path, file_path, local_metadata)
return web.json_response({
"success": True,
"message": f"Model successfully re-linked to Civitai version {model_version_id}",
"hash": local_metadata['sha256']
})
finally:
await client.close()
except Exception as e:
logger.error(f"Error re-linking to CivitAI: {e}", exc_info=True)
return web.json_response({"success": False, "error": str(e)}, status=500)

View File

@@ -4,6 +4,8 @@ import sys
import time
import asyncio
import logging
import datetime
import shutil
from typing import Dict, Set
from ..config import config
@@ -26,6 +28,7 @@ class UsageStats:
# Default stats file name
STATS_FILENAME = "lora_manager_stats.json"
BACKUP_SUFFIX = ".backup"
def __new__(cls):
if cls._instance is None:
@@ -39,8 +42,8 @@ class UsageStats:
# Initialize stats storage
self.stats = {
"checkpoints": {}, # sha256 -> count
"loras": {}, # sha256 -> count
"checkpoints": {}, # sha256 -> { total: count, history: { date: count } }
"loras": {}, # sha256 -> { total: count, history: { date: count } }
"total_executions": 0,
"last_save_time": 0
}
@@ -70,6 +73,68 @@ class UsageStats:
# Use the first lora root
return os.path.join(config.loras_roots[0], self.STATS_FILENAME)
def _backup_old_stats(self):
"""Backup the old stats file before conversion"""
if os.path.exists(self._stats_file_path):
backup_path = f"{self._stats_file_path}{self.BACKUP_SUFFIX}"
try:
shutil.copy2(self._stats_file_path, backup_path)
logger.info(f"Backed up old stats file to {backup_path}")
return True
except Exception as e:
logger.error(f"Failed to backup stats file: {e}")
return False
def _convert_old_format(self, old_stats):
"""Convert old stats format to new format with history"""
new_stats = {
"checkpoints": {},
"loras": {},
"total_executions": old_stats.get("total_executions", 0),
"last_save_time": old_stats.get("last_save_time", time.time())
}
# Get today's date in YYYY-MM-DD format
today = datetime.datetime.now().strftime("%Y-%m-%d")
# Convert checkpoint stats
if "checkpoints" in old_stats and isinstance(old_stats["checkpoints"], dict):
for hash_id, count in old_stats["checkpoints"].items():
new_stats["checkpoints"][hash_id] = {
"total": count,
"history": {
today: count
}
}
# Convert lora stats
if "loras" in old_stats and isinstance(old_stats["loras"], dict):
for hash_id, count in old_stats["loras"].items():
new_stats["loras"][hash_id] = {
"total": count,
"history": {
today: count
}
}
logger.info("Successfully converted stats from old format to new format with history")
return new_stats
def _is_old_format(self, stats):
"""Check if the stats are in the old format (direct count values)"""
# Check if any lora or checkpoint entry is a direct number instead of an object
if "loras" in stats and isinstance(stats["loras"], dict):
for hash_id, data in stats["loras"].items():
if isinstance(data, (int, float)):
return True
if "checkpoints" in stats and isinstance(stats["checkpoints"], dict):
for hash_id, data in stats["checkpoints"].items():
if isinstance(data, (int, float)):
return True
return False
def _load_stats(self):
"""Load existing statistics from file"""
try:
@@ -77,18 +142,27 @@ class UsageStats:
with open(self._stats_file_path, 'r', encoding='utf-8') as f:
loaded_stats = json.load(f)
# Update our stats with loaded data
if isinstance(loaded_stats, dict):
# Update individual sections to maintain structure
if "checkpoints" in loaded_stats and isinstance(loaded_stats["checkpoints"], dict):
self.stats["checkpoints"] = loaded_stats["checkpoints"]
if "loras" in loaded_stats and isinstance(loaded_stats["loras"], dict):
self.stats["loras"] = loaded_stats["loras"]
if "total_executions" in loaded_stats:
self.stats["total_executions"] = loaded_stats["total_executions"]
# Check if old format and needs conversion
if self._is_old_format(loaded_stats):
logger.info("Detected old stats format, performing conversion")
self._backup_old_stats()
self.stats = self._convert_old_format(loaded_stats)
else:
# Update our stats with loaded data (already in new format)
if isinstance(loaded_stats, dict):
# Update individual sections to maintain structure
if "checkpoints" in loaded_stats and isinstance(loaded_stats["checkpoints"], dict):
self.stats["checkpoints"] = loaded_stats["checkpoints"]
if "loras" in loaded_stats and isinstance(loaded_stats["loras"], dict):
self.stats["loras"] = loaded_stats["loras"]
if "total_executions" in loaded_stats:
self.stats["total_executions"] = loaded_stats["total_executions"]
if "last_save_time" in loaded_stats:
self.stats["last_save_time"] = loaded_stats["last_save_time"]
logger.info(f"Loaded usage statistics from {self._stats_file_path}")
except Exception as e:
logger.error(f"Error loading usage statistics: {e}")
@@ -174,15 +248,18 @@ class UsageStats:
# Increment total executions count
self.stats["total_executions"] += 1
# Get today's date in YYYY-MM-DD format
today = datetime.datetime.now().strftime("%Y-%m-%d")
# Process checkpoints
if MODELS in metadata and isinstance(metadata[MODELS], dict):
await self._process_checkpoints(metadata[MODELS])
await self._process_checkpoints(metadata[MODELS], today)
# Process loras
if LORAS in metadata and isinstance(metadata[LORAS], dict):
await self._process_loras(metadata[LORAS])
await self._process_loras(metadata[LORAS], today)
async def _process_checkpoints(self, models_data):
async def _process_checkpoints(self, models_data, today_date):
"""Process checkpoint models from metadata"""
try:
# Get checkpoint scanner service
@@ -208,12 +285,24 @@ class UsageStats:
# Get hash for this checkpoint
model_hash = checkpoint_scanner.get_hash_by_filename(model_filename)
if model_hash:
# Update stats for this checkpoint
self.stats["checkpoints"][model_hash] = self.stats["checkpoints"].get(model_hash, 0) + 1
# Update stats for this checkpoint with date tracking
if model_hash not in self.stats["checkpoints"]:
self.stats["checkpoints"][model_hash] = {
"total": 0,
"history": {}
}
# Increment total count
self.stats["checkpoints"][model_hash]["total"] += 1
# Increment today's count
if today_date not in self.stats["checkpoints"][model_hash]["history"]:
self.stats["checkpoints"][model_hash]["history"][today_date] = 0
self.stats["checkpoints"][model_hash]["history"][today_date] += 1
except Exception as e:
logger.error(f"Error processing checkpoint usage: {e}", exc_info=True)
async def _process_loras(self, loras_data):
async def _process_loras(self, loras_data, today_date):
"""Process LoRA models from metadata"""
try:
# Get LoRA scanner service
@@ -239,8 +328,20 @@ class UsageStats:
# Get hash for this LoRA
lora_hash = lora_scanner.get_hash_by_filename(lora_name)
if lora_hash:
# Update stats for this LoRA
self.stats["loras"][lora_hash] = self.stats["loras"].get(lora_hash, 0) + 1
# Update stats for this LoRA with date tracking
if lora_hash not in self.stats["loras"]:
self.stats["loras"][lora_hash] = {
"total": 0,
"history": {}
}
# Increment total count
self.stats["loras"][lora_hash]["total"] += 1
# Increment today's count
if today_date not in self.stats["loras"][lora_hash]["history"]:
self.stats["loras"][lora_hash]["history"][today_date] = 0
self.stats["loras"][lora_hash]["history"][today_date] += 1
except Exception as e:
logger.error(f"Error processing LoRA usage: {e}", exc_info=True)
@@ -251,9 +352,11 @@ class UsageStats:
async def get_model_usage_count(self, model_type, sha256):
"""Get usage count for a specific model by hash"""
if model_type == "checkpoint":
return self.stats["checkpoints"].get(sha256, 0)
if sha256 in self.stats["checkpoints"]:
return self.stats["checkpoints"][sha256]["total"]
elif model_type == "lora":
return self.stats["loras"].get(sha256, 0)
if sha256 in self.stats["loras"]:
return self.stats["loras"][sha256]["total"]
return 0
async def process_execution(self, prompt_id):

View File

@@ -114,3 +114,49 @@ def fuzzy_match(text: str, pattern: str, threshold: float = 0.7) -> bool:
# All words found either as substrings or fuzzy matches
return True
def calculate_recipe_fingerprint(loras):
"""
Calculate a unique fingerprint for a recipe based on its LoRAs.
The fingerprint is created by sorting LoRA hashes, filtering invalid entries,
normalizing strength values to 2 decimal places, and joining in format:
hash1:strength1|hash2:strength2|...
Args:
loras (list): List of LoRA dictionaries with hash and strength values
Returns:
str: The calculated fingerprint
"""
if not loras:
return ""
# Filter valid entries and extract hash and strength
valid_loras = []
for lora in loras:
# Skip excluded loras
if lora.get("exclude", False):
continue
# Get the hash - use modelVersionId as fallback if hash is empty
hash_value = lora.get("hash", "").lower()
if not hash_value and lora.get("isDeleted", False) and lora.get("modelVersionId"):
hash_value = lora.get("modelVersionId")
# Skip entries without a valid hash
if not hash_value:
continue
# Normalize strength to 2 decimal places (check both strength and weight fields)
strength = round(float(lora.get("strength", lora.get("weight", 1.0))), 2)
valid_loras.append((hash_value, strength))
# Sort by hash
valid_loras.sort()
# Join in format hash1:strength1|hash2:strength2|...
fingerprint = "|".join([f"{hash_value}:{strength}" for hash_value, strength in valid_loras])
return fingerprint

View File

@@ -1,3 +0,0 @@
"""
ComfyUI workflow parsing module to extract generation parameters
"""

View File

@@ -1,58 +0,0 @@
"""
Command-line interface for the ComfyUI workflow parser
"""
import argparse
import json
import os
import logging
import sys
from .parser import parse_workflow
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
def main():
"""Entry point for the CLI"""
parser = argparse.ArgumentParser(description='Parse ComfyUI workflow files')
parser.add_argument('input', help='Input workflow JSON file path')
parser.add_argument('-o', '--output', help='Output JSON file path')
parser.add_argument('-p', '--pretty', action='store_true', help='Pretty print JSON output')
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
args = parser.parse_args()
# Set logging level
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
# Validate input file
if not os.path.isfile(args.input):
logger.error(f"Input file not found: {args.input}")
sys.exit(1)
# Parse workflow
try:
result = parse_workflow(args.input, args.output)
# Print result to console if output file not specified
if not args.output:
if args.pretty:
print(json.dumps(result, indent=4))
else:
print(json.dumps(result))
else:
logger.info(f"Output saved to: {args.output}")
except Exception as e:
logger.error(f"Error parsing workflow: {e}")
if args.debug:
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,3 +0,0 @@
"""
Extension directory for custom node mappers
"""

View File

@@ -1,285 +0,0 @@
"""
ComfyUI Core nodes mappers extension for workflow parsing
"""
import logging
from typing import Dict, Any, List
logger = logging.getLogger(__name__)
# =============================================================================
# Transform Functions
# =============================================================================
def transform_random_noise(inputs: Dict) -> Dict:
"""Transform function for RandomNoise node"""
return {"seed": str(inputs.get("noise_seed", ""))}
def transform_ksampler_select(inputs: Dict) -> Dict:
"""Transform function for KSamplerSelect node"""
return {"sampler": inputs.get("sampler_name", "")}
def transform_basic_scheduler(inputs: Dict) -> Dict:
"""Transform function for BasicScheduler node"""
result = {
"scheduler": inputs.get("scheduler", ""),
"denoise": str(inputs.get("denoise", "1.0"))
}
# Get steps from inputs or steps input
if "steps" in inputs:
if isinstance(inputs["steps"], str):
result["steps"] = inputs["steps"]
elif isinstance(inputs["steps"], dict) and "value" in inputs["steps"]:
result["steps"] = str(inputs["steps"]["value"])
else:
result["steps"] = str(inputs["steps"])
return result
def transform_basic_guider(inputs: Dict) -> Dict:
"""Transform function for BasicGuider node"""
result = {}
# Process conditioning
if "conditioning" in inputs:
if isinstance(inputs["conditioning"], str):
result["prompt"] = inputs["conditioning"]
elif isinstance(inputs["conditioning"], dict):
result["conditioning"] = inputs["conditioning"]
# Get model information if needed
if "model" in inputs and isinstance(inputs["model"], dict):
result["model"] = inputs["model"]
return result
def transform_model_sampling_flux(inputs: Dict) -> Dict:
"""Transform function for ModelSamplingFlux - mostly a pass-through node"""
# This node is primarily used for routing, so we mostly pass through values
return inputs["model"]
def transform_sampler_custom_advanced(inputs: Dict) -> Dict:
"""Transform function for SamplerCustomAdvanced node"""
result = {}
# Extract seed from noise
if "noise" in inputs and isinstance(inputs["noise"], dict):
result["seed"] = str(inputs["noise"].get("seed", ""))
# Extract sampler info
if "sampler" in inputs and isinstance(inputs["sampler"], dict):
sampler = inputs["sampler"].get("sampler", "")
if sampler:
result["sampler"] = sampler
# Extract scheduler, steps, denoise from sigmas
if "sigmas" in inputs and isinstance(inputs["sigmas"], dict):
sigmas = inputs["sigmas"]
result["scheduler"] = sigmas.get("scheduler", "")
result["steps"] = str(sigmas.get("steps", ""))
result["denoise"] = str(sigmas.get("denoise", "1.0"))
# Extract prompt and guidance from guider
if "guider" in inputs and isinstance(inputs["guider"], dict):
guider = inputs["guider"]
# Get prompt from conditioning
if "conditioning" in guider and isinstance(guider["conditioning"], str):
result["prompt"] = guider["conditioning"]
elif "conditioning" in guider and isinstance(guider["conditioning"], dict):
result["guidance"] = guider["conditioning"].get("guidance", "")
result["prompt"] = guider["conditioning"].get("prompt", "")
if "model" in guider and isinstance(guider["model"], dict):
result["checkpoint"] = guider["model"].get("checkpoint", "")
result["loras"] = guider["model"].get("loras", "")
result["clip_skip"] = str(int(guider["model"].get("clip_skip", "-1")) * -1)
# Extract dimensions from latent_image
if "latent_image" in inputs and isinstance(inputs["latent_image"], dict):
latent = inputs["latent_image"]
width = latent.get("width", 0)
height = latent.get("height", 0)
if width and height:
result["width"] = width
result["height"] = height
result["size"] = f"{width}x{height}"
return result
def transform_ksampler(inputs: Dict) -> Dict:
"""Transform function for KSampler nodes"""
result = {
"seed": str(inputs.get("seed", "")),
"steps": str(inputs.get("steps", "")),
"cfg": str(inputs.get("cfg", "")),
"sampler": inputs.get("sampler_name", ""),
"scheduler": inputs.get("scheduler", ""),
}
# Process positive prompt
if "positive" in inputs:
result["prompt"] = inputs["positive"]
# Process negative prompt
if "negative" in inputs:
result["negative_prompt"] = inputs["negative"]
# Get dimensions from latent image
if "latent_image" in inputs and isinstance(inputs["latent_image"], dict):
width = inputs["latent_image"].get("width", 0)
height = inputs["latent_image"].get("height", 0)
if width and height:
result["size"] = f"{width}x{height}"
# Add clip_skip if present
if "clip_skip" in inputs:
result["clip_skip"] = str(inputs.get("clip_skip", ""))
# Add guidance if present
if "guidance" in inputs:
result["guidance"] = str(inputs.get("guidance", ""))
# Add model if present
if "model" in inputs:
result["checkpoint"] = inputs.get("model", {}).get("checkpoint", "")
result["loras"] = inputs.get("model", {}).get("loras", "")
result["clip_skip"] = str(inputs.get("model", {}).get("clip_skip", -1) * -1)
return result
def transform_empty_latent(inputs: Dict) -> Dict:
"""Transform function for EmptyLatentImage nodes"""
width = inputs.get("width", 0)
height = inputs.get("height", 0)
return {"width": width, "height": height, "size": f"{width}x{height}"}
def transform_clip_text(inputs: Dict) -> Any:
"""Transform function for CLIPTextEncode nodes"""
return inputs.get("text", "")
def transform_flux_guidance(inputs: Dict) -> Dict:
"""Transform function for FluxGuidance nodes"""
result = {}
if "guidance" in inputs:
result["guidance"] = inputs["guidance"]
if "conditioning" in inputs:
conditioning = inputs["conditioning"]
if isinstance(conditioning, str):
result["prompt"] = conditioning
else:
result["prompt"] = "Unknown prompt"
return result
def transform_unet_loader(inputs: Dict) -> Dict:
"""Transform function for UNETLoader node"""
unet_name = inputs.get("unet_name", "")
return {"checkpoint": unet_name} if unet_name else {}
def transform_checkpoint_loader(inputs: Dict) -> Dict:
"""Transform function for CheckpointLoaderSimple node"""
ckpt_name = inputs.get("ckpt_name", "")
return {"checkpoint": ckpt_name} if ckpt_name else {}
def transform_latent_upscale_by(inputs: Dict) -> Dict:
"""Transform function for LatentUpscaleBy node"""
result = {}
width = inputs["samples"].get("width", 0) * inputs["scale_by"]
height = inputs["samples"].get("height", 0) * inputs["scale_by"]
result["width"] = width
result["height"] = height
result["size"] = f"{width}x{height}"
return result
def transform_clip_set_last_layer(inputs: Dict) -> Dict:
"""Transform function for CLIPSetLastLayer node"""
result = {}
if "stop_at_clip_layer" in inputs:
result["clip_skip"] = inputs["stop_at_clip_layer"]
return result
# =============================================================================
# Node Mapper Definitions
# =============================================================================
# Define the mappers for ComfyUI core nodes not in main mapper
NODE_MAPPERS_EXT = {
# KSamplers
"SamplerCustomAdvanced": {
"inputs_to_track": ["noise", "guider", "sampler", "sigmas", "latent_image"],
"transform_func": transform_sampler_custom_advanced
},
"KSampler": {
"inputs_to_track": [
"seed", "steps", "cfg", "sampler_name", "scheduler",
"denoise", "positive", "negative", "latent_image",
"model", "clip_skip"
],
"transform_func": transform_ksampler
},
# ComfyUI core nodes
"EmptyLatentImage": {
"inputs_to_track": ["width", "height", "batch_size"],
"transform_func": transform_empty_latent
},
"EmptySD3LatentImage": {
"inputs_to_track": ["width", "height", "batch_size"],
"transform_func": transform_empty_latent
},
"CLIPTextEncode": {
"inputs_to_track": ["text", "clip"],
"transform_func": transform_clip_text
},
"FluxGuidance": {
"inputs_to_track": ["guidance", "conditioning"],
"transform_func": transform_flux_guidance
},
"RandomNoise": {
"inputs_to_track": ["noise_seed"],
"transform_func": transform_random_noise
},
"KSamplerSelect": {
"inputs_to_track": ["sampler_name"],
"transform_func": transform_ksampler_select
},
"BasicScheduler": {
"inputs_to_track": ["scheduler", "steps", "denoise", "model"],
"transform_func": transform_basic_scheduler
},
"BasicGuider": {
"inputs_to_track": ["model", "conditioning"],
"transform_func": transform_basic_guider
},
"ModelSamplingFlux": {
"inputs_to_track": ["max_shift", "base_shift", "width", "height", "model"],
"transform_func": transform_model_sampling_flux
},
"UNETLoader": {
"inputs_to_track": ["unet_name"],
"transform_func": transform_unet_loader
},
"CheckpointLoaderSimple": {
"inputs_to_track": ["ckpt_name"],
"transform_func": transform_checkpoint_loader
},
"LatentUpscale": {
"inputs_to_track": ["width", "height"],
"transform_func": transform_empty_latent
},
"LatentUpscaleBy": {
"inputs_to_track": ["samples", "scale_by"],
"transform_func": transform_latent_upscale_by
},
"CLIPSetLastLayer": {
"inputs_to_track": ["clip", "stop_at_clip_layer"],
"transform_func": transform_clip_set_last_layer
}
}

View File

@@ -1,74 +0,0 @@
"""
KJNodes mappers extension for ComfyUI workflow parsing
"""
import logging
import re
from typing import Dict, Any
logger = logging.getLogger(__name__)
# =============================================================================
# Transform Functions
# =============================================================================
def transform_join_strings(inputs: Dict) -> str:
"""Transform function for JoinStrings nodes"""
string1 = inputs.get("string1", "")
string2 = inputs.get("string2", "")
delimiter = inputs.get("delimiter", "")
return f"{string1}{delimiter}{string2}"
def transform_string_constant(inputs: Dict) -> str:
"""Transform function for StringConstant nodes"""
return inputs.get("string", "")
def transform_empty_latent_presets(inputs: Dict) -> Dict:
"""Transform function for EmptyLatentImagePresets nodes"""
dimensions = inputs.get("dimensions", "")
invert = inputs.get("invert", False)
# Extract width and height from dimensions string
# Expected format: "width x height (ratio)" or similar
width = 0
height = 0
if dimensions:
# Try to extract dimensions using regex
match = re.search(r'(\d+)\s*x\s*(\d+)', dimensions)
if match:
width = int(match.group(1))
height = int(match.group(2))
# If invert is True, swap width and height
if invert and width and height:
width, height = height, width
return {"width": width, "height": height, "size": f"{width}x{height}"}
def transform_int_constant(inputs: Dict) -> int:
"""Transform function for INTConstant nodes"""
return inputs.get("value", 0)
# =============================================================================
# Node Mapper Definitions
# =============================================================================
# Define the mappers for KJNodes
NODE_MAPPERS_EXT = {
"JoinStrings": {
"inputs_to_track": ["string1", "string2", "delimiter"],
"transform_func": transform_join_strings
},
"StringConstantMultiline": {
"inputs_to_track": ["string"],
"transform_func": transform_string_constant
},
"EmptyLatentImagePresets": {
"inputs_to_track": ["dimensions", "invert", "batch_size"],
"transform_func": transform_empty_latent_presets
},
"INTConstant": {
"inputs_to_track": ["value"],
"transform_func": transform_int_constant
}
}

View File

@@ -1,37 +0,0 @@
"""
Main entry point for the workflow parser module
"""
import os
import sys
import logging
from typing import Dict, Optional, Union
# Add the parent directory to sys.path to enable imports
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
sys.path.insert(0, os.path.dirname(SCRIPT_DIR))
from .parser import parse_workflow
logger = logging.getLogger(__name__)
def parse_comfyui_workflow(
workflow_path: str,
output_path: Optional[str] = None
) -> Dict:
"""
Parse a ComfyUI workflow file and extract generation parameters
Args:
workflow_path: Path to the workflow JSON file
output_path: Optional path to save the output JSON
Returns:
Dictionary containing extracted parameters
"""
return parse_workflow(workflow_path, output_path)
if __name__ == "__main__":
# If run directly, use the CLI
from .cli import main
main()

View File

@@ -1,282 +0,0 @@
"""
Node mappers for ComfyUI workflow parsing
"""
import logging
import os
import importlib.util
import inspect
from typing import Dict, List, Any, Optional, Union, Type, Callable, Tuple
logger = logging.getLogger(__name__)
# Global mapper registry
_MAPPER_REGISTRY: Dict[str, Dict] = {}
# =============================================================================
# Mapper Definition Functions
# =============================================================================
def create_mapper(
node_type: str,
inputs_to_track: List[str],
transform_func: Callable[[Dict], Any] = None
) -> Dict:
"""Create a mapper definition for a node type"""
mapper = {
"node_type": node_type,
"inputs_to_track": inputs_to_track,
"transform": transform_func or (lambda inputs: inputs)
}
return mapper
def register_mapper(mapper: Dict) -> None:
"""Register a node mapper in the global registry"""
_MAPPER_REGISTRY[mapper["node_type"]] = mapper
logger.debug(f"Registered mapper for node type: {mapper['node_type']}")
def get_mapper(node_type: str) -> Optional[Dict]:
"""Get a mapper for the specified node type"""
return _MAPPER_REGISTRY.get(node_type)
def get_all_mappers() -> Dict[str, Dict]:
"""Get all registered mappers"""
return _MAPPER_REGISTRY.copy()
# =============================================================================
# Node Processing Function
# =============================================================================
def process_node(node_id: str, node_data: Dict, workflow: Dict, parser: 'WorkflowParser') -> Any: # type: ignore
"""Process a node using its mapper and extract relevant information"""
node_type = node_data.get("class_type")
mapper = get_mapper(node_type)
if not mapper:
logger.warning(f"No mapper found for node type: {node_type}")
return None
result = {}
# Extract inputs based on the mapper's tracked inputs
for input_name in mapper["inputs_to_track"]:
if input_name in node_data.get("inputs", {}):
input_value = node_data["inputs"][input_name]
# Check if input is a reference to another node's output
if isinstance(input_value, list) and len(input_value) == 2:
try:
# Format is [node_id, output_slot]
ref_node_id, output_slot = input_value
# Convert node_id to string if it's an integer
if isinstance(ref_node_id, int):
ref_node_id = str(ref_node_id)
# Recursively process the referenced node
ref_value = parser.process_node(ref_node_id, workflow)
if ref_value is not None:
result[input_name] = ref_value
else:
# If we couldn't get a value from the reference, store the raw value
result[input_name] = input_value
except Exception as e:
logger.error(f"Error processing reference in node {node_id}, input {input_name}: {e}")
result[input_name] = input_value
else:
# Direct value
result[input_name] = input_value
# Apply the transform function
try:
return mapper["transform"](result)
except Exception as e:
logger.error(f"Error in transform function for node {node_id} of type {node_type}: {e}")
return result
# =============================================================================
# Transform Functions
# =============================================================================
def transform_lora_loader(inputs: Dict) -> Dict:
"""Transform function for LoraLoader nodes"""
loras_data = inputs.get("loras", [])
lora_stack = inputs.get("lora_stack", {}).get("lora_stack", [])
lora_texts = []
# Process loras array
if isinstance(loras_data, dict) and "__value__" in loras_data:
loras_list = loras_data["__value__"]
elif isinstance(loras_data, list):
loras_list = loras_data
else:
loras_list = []
# Process each active lora entry
for lora in loras_list:
if isinstance(lora, dict) and lora.get("active", False):
lora_name = lora.get("name", "")
strength = lora.get("strength", 1.0)
lora_texts.append(f"<lora:{lora_name}:{strength}>")
# Process lora_stack if valid
if lora_stack and isinstance(lora_stack, list):
if not (len(lora_stack) == 2 and isinstance(lora_stack[0], (str, int)) and isinstance(lora_stack[1], int)):
for stack_entry in lora_stack:
lora_name = stack_entry[0]
strength = stack_entry[1]
lora_texts.append(f"<lora:{lora_name}:{strength}>")
result = {
"checkpoint": inputs.get("model", {}).get("checkpoint", ""),
"loras": " ".join(lora_texts)
}
if "clip" in inputs and isinstance(inputs["clip"], dict):
result["clip_skip"] = inputs["clip"].get("clip_skip", "-1")
return result
def transform_lora_stacker(inputs: Dict) -> Dict:
"""Transform function for LoraStacker nodes"""
loras_data = inputs.get("loras", [])
result_stack = []
# Handle existing stack entries
existing_stack = []
lora_stack_input = inputs.get("lora_stack", [])
if isinstance(lora_stack_input, dict) and "lora_stack" in lora_stack_input:
existing_stack = lora_stack_input["lora_stack"]
elif isinstance(lora_stack_input, list):
if not (len(lora_stack_input) == 2 and isinstance(lora_stack_input[0], (str, int)) and
isinstance(lora_stack_input[1], int)):
existing_stack = lora_stack_input
# Add existing entries
if existing_stack:
result_stack.extend(existing_stack)
# Process new loras
if isinstance(loras_data, dict) and "__value__" in loras_data:
loras_list = loras_data["__value__"]
elif isinstance(loras_data, list):
loras_list = loras_data
else:
loras_list = []
for lora in loras_list:
if isinstance(lora, dict) and lora.get("active", False):
lora_name = lora.get("name", "")
strength = float(lora.get("strength", 1.0))
result_stack.append((lora_name, strength))
return {"lora_stack": result_stack}
def transform_trigger_word_toggle(inputs: Dict) -> str:
"""Transform function for TriggerWordToggle nodes"""
toggle_data = inputs.get("toggle_trigger_words", [])
if isinstance(toggle_data, dict) and "__value__" in toggle_data:
toggle_words = toggle_data["__value__"]
elif isinstance(toggle_data, list):
toggle_words = toggle_data
else:
toggle_words = []
# Filter active trigger words
active_words = []
for item in toggle_words:
if isinstance(item, dict) and item.get("active", False):
word = item.get("text", "")
if word and not word.startswith("__dummy"):
active_words.append(word)
return ", ".join(active_words)
# =============================================================================
# Node Mapper Definitions
# =============================================================================
# Central definition of all supported node types and their configurations
NODE_MAPPERS = {
# LoraManager nodes
"Lora Loader (LoraManager)": {
"inputs_to_track": ["model", "clip", "loras", "lora_stack"],
"transform_func": transform_lora_loader
},
"Lora Stacker (LoraManager)": {
"inputs_to_track": ["loras", "lora_stack"],
"transform_func": transform_lora_stacker
},
"TriggerWord Toggle (LoraManager)": {
"inputs_to_track": ["toggle_trigger_words"],
"transform_func": transform_trigger_word_toggle
}
}
def register_all_mappers() -> None:
"""Register all mappers from the NODE_MAPPERS dictionary"""
for node_type, config in NODE_MAPPERS.items():
mapper = create_mapper(
node_type=node_type,
inputs_to_track=config["inputs_to_track"],
transform_func=config["transform_func"]
)
register_mapper(mapper)
logger.info(f"Registered {len(NODE_MAPPERS)} node mappers")
# =============================================================================
# Extension Loading
# =============================================================================
def load_extensions(ext_dir: str = None) -> None:
"""
Load mapper extensions from the specified directory
Extension files should define a NODE_MAPPERS_EXT dictionary containing mapper configurations.
These will be added to the global NODE_MAPPERS dictionary and registered automatically.
"""
# Use default path if none provided
if ext_dir is None:
# Get the directory of this file
current_dir = os.path.dirname(os.path.abspath(__file__))
ext_dir = os.path.join(current_dir, 'ext')
# Ensure the extension directory exists
if not os.path.exists(ext_dir):
os.makedirs(ext_dir, exist_ok=True)
logger.info(f"Created extension directory: {ext_dir}")
return
# Load each Python file in the extension directory
for filename in os.listdir(ext_dir):
if filename.endswith('.py') and not filename.startswith('_'):
module_path = os.path.join(ext_dir, filename)
module_name = f"workflow.ext.{filename[:-3]}" # Remove .py
try:
# Load the module
spec = importlib.util.spec_from_file_location(module_name, module_path)
if spec and spec.loader:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Check if the module defines NODE_MAPPERS_EXT
if hasattr(module, 'NODE_MAPPERS_EXT'):
# Add the extension mappers to the global NODE_MAPPERS dictionary
NODE_MAPPERS.update(module.NODE_MAPPERS_EXT)
logger.info(f"Added {len(module.NODE_MAPPERS_EXT)} mappers from extension: {filename}")
else:
logger.warning(f"Extension {filename} does not define NODE_MAPPERS_EXT dictionary")
except Exception as e:
logger.warning(f"Error loading extension {filename}: {e}")
# Re-register all mappers after loading extensions
register_all_mappers()
# Initialize the registry with default mappers
# register_default_mappers()

View File

@@ -1,181 +0,0 @@
"""
Main workflow parser implementation for ComfyUI
"""
import json
import logging
from typing import Dict, List, Any, Optional, Union, Set
from .mappers import get_mapper, get_all_mappers, load_extensions, process_node
from .utils import (
load_workflow, save_output, find_node_by_type,
trace_model_path
)
logger = logging.getLogger(__name__)
class WorkflowParser:
"""Parser for ComfyUI workflows"""
def __init__(self):
"""Initialize the parser with mappers"""
self.processed_nodes: Set[str] = set() # Track processed nodes to avoid cycles
self.node_results_cache: Dict[str, Any] = {} # Cache for processed node results
# Load extensions
load_extensions()
def process_node(self, node_id: str, workflow: Dict) -> Any:
"""Process a single node and extract relevant information"""
# Return cached result if available
if node_id in self.node_results_cache:
return self.node_results_cache[node_id]
# Check if we're in a cycle
if node_id in self.processed_nodes:
return None
# Mark this node as being processed (to detect cycles)
self.processed_nodes.add(node_id)
if node_id not in workflow:
self.processed_nodes.remove(node_id)
return None
node_data = workflow[node_id]
node_type = node_data.get("class_type")
result = None
if get_mapper(node_type):
try:
result = process_node(node_id, node_data, workflow, self)
# Cache the result
self.node_results_cache[node_id] = result
except Exception as e:
logger.error(f"Error processing node {node_id} of type {node_type}: {e}", exc_info=True)
# Return a partial result or None depending on how we want to handle errors
result = {}
# Remove node from processed set to allow it to be processed again in a different context
self.processed_nodes.remove(node_id)
return result
def find_primary_sampler_node(self, workflow: Dict) -> Optional[str]:
"""
Find the primary sampler node in the workflow.
Priority:
1. First try to find a SamplerCustomAdvanced node
2. If not found, look for KSampler nodes with denoise=1.0
3. If still not found, use the first KSampler node
Args:
workflow: The workflow data as a dictionary
Returns:
The node ID of the primary sampler node, or None if not found
"""
# First check for SamplerCustomAdvanced nodes
sampler_advanced_nodes = []
ksampler_nodes = []
# Scan workflow for sampler nodes
for node_id, node_data in workflow.items():
node_type = node_data.get("class_type")
if node_type == "SamplerCustomAdvanced":
sampler_advanced_nodes.append(node_id)
elif node_type == "KSampler":
ksampler_nodes.append(node_id)
# If we found SamplerCustomAdvanced nodes, return the first one
if sampler_advanced_nodes:
logger.debug(f"Found SamplerCustomAdvanced node: {sampler_advanced_nodes[0]}")
return sampler_advanced_nodes[0]
# If we have KSampler nodes, look for one with denoise=1.0
if ksampler_nodes:
for node_id in ksampler_nodes:
node_data = workflow[node_id]
inputs = node_data.get("inputs", {})
denoise = inputs.get("denoise", 0)
# Check if denoise is 1.0 (allowing for small floating point differences)
if abs(float(denoise) - 1.0) < 0.001:
logger.debug(f"Found KSampler node with denoise=1.0: {node_id}")
return node_id
# If no KSampler with denoise=1.0 found, use the first one
logger.debug(f"No KSampler with denoise=1.0 found, using first KSampler: {ksampler_nodes[0]}")
return ksampler_nodes[0]
# No sampler nodes found
logger.warning("No sampler nodes found in workflow")
return None
def parse_workflow(self, workflow_data: Union[str, Dict], output_path: Optional[str] = None) -> Dict:
"""
Parse the workflow and extract generation parameters
Args:
workflow_data: The workflow data as a dictionary or a file path
output_path: Optional path to save the output JSON
Returns:
Dictionary containing extracted parameters
"""
# Load workflow from file if needed
if isinstance(workflow_data, str):
workflow = load_workflow(workflow_data)
else:
workflow = workflow_data
# Reset the processed nodes tracker and cache
self.processed_nodes = set()
self.node_results_cache = {}
# Find the primary sampler node
sampler_node_id = self.find_primary_sampler_node(workflow)
if not sampler_node_id:
logger.warning("No suitable sampler node found in workflow")
return {}
# Process sampler node to extract parameters
sampler_result = self.process_node(sampler_node_id, workflow)
if not sampler_result:
return {}
# Return the sampler result directly - it's already in the format we need
# This simplifies the structure and makes it easier to use in recipe_routes.py
# Handle standard ComfyUI names vs our output format
if "cfg" in sampler_result:
sampler_result["cfg_scale"] = sampler_result.pop("cfg")
# Add clip_skip = 1 to match reference output if not already present
if "clip_skip" not in sampler_result:
sampler_result["clip_skip"] = "1"
# Ensure the prompt is a string and not a nested dictionary
if "prompt" in sampler_result and isinstance(sampler_result["prompt"], dict):
if "prompt" in sampler_result["prompt"]:
sampler_result["prompt"] = sampler_result["prompt"]["prompt"]
# Save the result if requested
if output_path:
save_output(sampler_result, output_path)
return sampler_result
def parse_workflow(workflow_path: str, output_path: Optional[str] = None) -> Dict:
"""
Parse a ComfyUI workflow file and extract generation parameters
Args:
workflow_path: Path to the workflow JSON file
output_path: Optional path to save the output JSON
Returns:
Dictionary containing extracted parameters
"""
parser = WorkflowParser()
return parser.parse_workflow(workflow_path, output_path)

View File

@@ -1,63 +0,0 @@
"""
Test script for the ComfyUI workflow parser
"""
import os
import json
import logging
from .parser import parse_workflow
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
# Configure paths
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
REFS_DIR = os.path.join(ROOT_DIR, 'refs')
OUTPUT_DIR = os.path.join(ROOT_DIR, 'output')
def test_parse_flux_workflow():
"""Test parsing the flux example workflow"""
# Ensure output directory exists
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Define input and output paths
input_path = os.path.join(REFS_DIR, 'flux_prompt.json')
output_path = os.path.join(OUTPUT_DIR, 'parsed_flux_output.json')
# Parse workflow
logger.info(f"Parsing workflow: {input_path}")
result = parse_workflow(input_path, output_path)
# Print result summary
logger.info(f"Output saved to: {output_path}")
logger.info(f"Parsing completed. Result summary:")
logger.info(f" LoRAs: {result.get('loras', '')}")
gen_params = result.get('gen_params', {})
logger.info(f" Prompt: {gen_params.get('prompt', '')[:50]}...")
logger.info(f" Steps: {gen_params.get('steps', '')}")
logger.info(f" Sampler: {gen_params.get('sampler', '')}")
logger.info(f" Size: {gen_params.get('size', '')}")
# Compare with reference output
ref_output_path = os.path.join(REFS_DIR, 'flux_output.json')
try:
with open(ref_output_path, 'r') as f:
ref_output = json.load(f)
# Simple validation
loras_match = result.get('loras', '') == ref_output.get('loras', '')
prompt_match = gen_params.get('prompt', '') == ref_output.get('gen_params', {}).get('prompt', '')
logger.info(f"Validation against reference:")
logger.info(f" LoRAs match: {loras_match}")
logger.info(f" Prompt match: {prompt_match}")
except Exception as e:
logger.warning(f"Failed to compare with reference output: {e}")
if __name__ == "__main__":
test_parse_flux_workflow()

View File

@@ -1,120 +0,0 @@
"""
Utility functions for ComfyUI workflow parsing
"""
import json
import os
import logging
from typing import Dict, List, Any, Optional, Union, Set, Tuple
logger = logging.getLogger(__name__)
def load_workflow(workflow_path: str) -> Dict:
"""Load a workflow from a JSON file"""
try:
with open(workflow_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
logger.error(f"Error loading workflow from {workflow_path}: {e}")
raise
def save_output(output: Dict, output_path: str) -> None:
"""Save the parsed output to a JSON file"""
os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
try:
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, indent=4)
except Exception as e:
logger.error(f"Error saving output to {output_path}: {e}")
raise
def find_node_by_type(workflow: Dict, node_type: str) -> Optional[str]:
"""Find a node of the specified type in the workflow"""
for node_id, node_data in workflow.items():
if node_data.get("class_type") == node_type:
return node_id
return None
def find_nodes_by_type(workflow: Dict, node_type: str) -> List[str]:
"""Find all nodes of the specified type in the workflow"""
return [node_id for node_id, node_data in workflow.items()
if node_data.get("class_type") == node_type]
def get_input_node_ids(workflow: Dict, node_id: str) -> Dict[str, Tuple[str, int]]:
"""
Get the node IDs for all inputs of the given node
Returns a dictionary mapping input names to (node_id, output_slot) tuples
"""
result = {}
if node_id not in workflow:
return result
node_data = workflow[node_id]
for input_name, input_value in node_data.get("inputs", {}).items():
# Check if this input is connected to another node
if isinstance(input_value, list) and len(input_value) == 2:
# Input is connected to another node's output
# Format: [node_id, output_slot]
ref_node_id, output_slot = input_value
result[input_name] = (str(ref_node_id), output_slot)
return result
def trace_model_path(workflow: Dict, start_node_id: str) -> List[str]:
"""
Trace the model path backward from KSampler to find all LoRA nodes
Args:
workflow: The workflow data
start_node_id: The starting node ID (usually KSampler)
Returns:
List of node IDs in the model path
"""
model_path_nodes = []
# Get the model input from the start node
if start_node_id not in workflow:
return model_path_nodes
# Track visited nodes to avoid cycles
visited = set()
# Stack for depth-first search
stack = []
# Get model input reference if available
start_node = workflow[start_node_id]
if "inputs" in start_node and "model" in start_node["inputs"] and isinstance(start_node["inputs"]["model"], list):
model_ref = start_node["inputs"]["model"]
stack.append(str(model_ref[0]))
# Perform depth-first search
while stack:
node_id = stack.pop()
# Skip if already visited
if node_id in visited:
continue
# Mark as visited
visited.add(node_id)
# Skip if node doesn't exist
if node_id not in workflow:
continue
node = workflow[node_id]
node_type = node.get("class_type", "")
# Add current node to result list if it's a LoRA node
if "Lora" in node_type:
model_path_nodes.append(node_id)
# Add all input nodes that have a "model" or "lora_stack" output to the stack
if "inputs" in node:
for input_name, input_value in node["inputs"].items():
if input_name in ["model", "lora_stack"] and isinstance(input_value, list) and len(input_value) == 2:
stack.append(str(input_value[0]))
return model_path_nodes

View File

@@ -1,7 +1,7 @@
[project]
name = "comfyui-lora-manager"
description = "LoRA Manager for ComfyUI - Access it at http://localhost:8188/loras for managing LoRA models with previews and metadata integration."
version = "0.8.11"
version = "0.8.17"
license = {file = "LICENSE"}
dependencies = [
"aiohttp",
@@ -13,7 +13,9 @@ dependencies = [
"Pillow",
"olefile", # for getting rid of warning message
"requests",
"toml"
"toml",
"natsort",
"msgpack"
]
[project.urls]
@@ -23,4 +25,4 @@ Repository = "https://github.com/willmiao/ComfyUI-Lora-Manager"
[tool.comfy]
PublisherId = "willmiao"
DisplayName = "ComfyUI-Lora-Manager"
Icon = ""
Icon = "https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/static/images/android-chrome-512x512.png?raw=true"

View File

@@ -9,4 +9,6 @@ olefile
requests
toml
numpy
torch
torch
natsort
msgpack

View File

@@ -295,6 +295,7 @@ class StandaloneLoraManager(LoraManager):
from py.routes.checkpoints_routes import CheckpointsRoutes
from py.routes.update_routes import UpdateRoutes
from py.routes.misc_routes import MiscRoutes
from py.routes.example_images_routes import ExampleImagesRoutes
lora_routes = LoraRoutes()
checkpoints_routes = CheckpointsRoutes()
@@ -306,6 +307,7 @@ class StandaloneLoraManager(LoraManager):
RecipeRoutes.setup_routes(app)
UpdateRoutes.setup_routes(app)
MiscRoutes.setup_routes(app)
ExampleImagesRoutes.setup_routes(app)
# Schedule service initialization
app.on_startup.append(lambda app: cls._initialize_services())

View File

@@ -32,13 +32,21 @@ html, body {
--card-bg: #ffffff;
--border-color: #e0e0e0;
/* Color System */
--lora-accent: oklch(68% 0.28 256);
/* Color Components */
--lora-accent-l: 68%;
--lora-accent-c: 0.28;
--lora-accent-h: 256;
--lora-warning-l: 75%;
--lora-warning-c: 0.25;
--lora-warning-h: 80;
/* Composed Colors */
--lora-accent: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h));
--lora-surface: oklch(100% 0 0 / 0.98);
--lora-border: oklch(90% 0.02 256 / 0.15);
--lora-text: oklch(95% 0.02 256);
--lora-error: oklch(75% 0.32 29);
--lora-warning: oklch(75% 0.25 80); /* Add warning color for deleted LoRAs */
--lora-warning: oklch(var(--lora-warning-l) var(--lora-warning-c) var(--lora-warning-h)); /* Modified to be used with oklch() */
/* Spacing Scale */
--space-1: calc(8px * 1);
@@ -79,7 +87,7 @@ html[data-theme="light"] {
--lora-surface: oklch(25% 0.02 256 / 0.98);
--lora-border: oklch(90% 0.02 256 / 0.15);
--lora-text: oklch(98% 0.02 256);
--lora-warning: oklch(75% 0.25 80); /* Add warning color for dark theme too */
--lora-warning: oklch(75% 0.25 80); /* Modified to be used with oklch() */
}
body {

View File

@@ -0,0 +1,165 @@
/* Alphabet Bar Component */
.alphabet-bar-container {
position: fixed;
left: 0;
top: 50%;
transform: translateY(-50%);
z-index: 100;
display: flex;
transition: transform 0.3s ease;
}
.alphabet-bar-container.collapsed {
transform: translateY(-50%) translateX(-90%);
}
/* New visual indicator for when a letter is active and bar is collapsed */
.alphabet-bar-container.collapsed .toggle-alphabet-bar.has-active-letter {
border-color: var(--lora-accent);
background: oklch(var(--lora-accent) / 0.15);
}
.alphabet-bar-container.collapsed .toggle-alphabet-bar.has-active-letter::after {
content: '';
position: absolute;
top: 7px;
right: 7px;
width: 8px;
height: 8px;
background-color: var(--lora-accent);
border-radius: 50%;
animation: pulse-active 2s infinite;
}
@keyframes pulse-active {
0% { transform: scale(0.8); opacity: 0.7; }
50% { transform: scale(1.1); opacity: 1; }
100% { transform: scale(0.8); opacity: 0.7; }
}
.alphabet-bar {
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: 0 var(--border-radius-xs) var(--border-radius-xs) 0;
padding: 8px 4px;
display: flex;
flex-direction: column;
gap: 6px;
align-items: center;
box-shadow: 2px 0 8px rgba(0, 0, 0, 0.1);
max-height: 80vh;
overflow-y: auto;
scrollbar-width: thin;
}
.alphabet-bar::-webkit-scrollbar {
width: 4px;
}
.alphabet-bar::-webkit-scrollbar-thumb {
background: var(--border-color);
border-radius: 4px;
}
.toggle-alphabet-bar {
background: var(--card-bg);
border: 1px solid var(--border-color);
border-left: none;
border-radius: 0 var(--border-radius-xs) var(--border-radius-xs) 0;
padding: 8px 4px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
color: var(--text-color);
width: 20px;
height: 40px;
align-self: center;
box-shadow: 2px 0 8px rgba(0, 0, 0, 0.1);
}
.toggle-alphabet-bar:hover {
background: var(--bg-hover);
}
.toggle-alphabet-bar i {
transition: transform 0.3s ease;
}
.alphabet-bar-container.collapsed .toggle-alphabet-bar i {
transform: rotate(180deg);
}
.letter-chip {
padding: 4px 2px;
border-radius: var(--border-radius-xs);
background: var(--bg-color);
color: var(--text-color);
cursor: pointer;
min-width: 24px;
text-align: center;
font-size: 0.85em;
transition: all 0.2s ease;
border: 1px solid var(--border-color);
}
.letter-chip:hover {
background: var(--lora-accent);
color: white;
transform: scale(1.1);
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.letter-chip.active {
background: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
}
.letter-chip.disabled {
opacity: 0.5;
pointer-events: none;
cursor: default;
}
/* Hide the count by default, only show in tooltip */
.letter-chip .count {
display: none;
}
.alphabet-bar-title {
font-size: 0.75em;
color: var(--text-color);
opacity: 0.7;
margin-bottom: 6px;
writing-mode: vertical-lr;
transform: rotate(180deg);
white-space: nowrap;
}
@media (max-width: 768px) {
.alphabet-bar-container {
transform: translateY(-50%) translateX(-90%);
}
.alphabet-bar-container.active {
transform: translateY(-50%) translateX(0);
}
.letter-chip {
padding: 3px 1px;
min-width: 20px;
font-size: 0.75em;
}
}
/* Keyframe animations for the active letter */
@keyframes pulse {
0% { transform: scale(1); }
50% { transform: scale(1.1); }
100% { transform: scale(1); }
}
.letter-chip.active {
animation: pulse 1s ease-in-out 1;
}

View File

@@ -60,6 +60,18 @@
border-color: var(--lora-accent);
}
/* Danger button style - updated to use proper theme variables */
.bulk-operations-actions button.danger-btn {
background: oklch(70% 0.2 29); /* Light red background that works in both themes */
color: oklch(98% 0.01 0); /* Almost white text for good contrast */
border-color: var(--lora-error);
}
.bulk-operations-actions button.danger-btn:hover {
background: var(--lora-error);
color: oklch(100% 0 0); /* Pure white text on hover for maximum contrast */
}
/* Style for selected cards */
.lora-card.selected {
box-shadow: 0 0 0 2px var(--lora-accent);
@@ -262,83 +274,6 @@
background: var(--lora-accent);
}
/* NSFW Level Selector */
.nsfw-level-selector {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-base);
padding: 16px;
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
z-index: var(--z-modal);
width: 300px;
display: none;
}
.nsfw-level-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
.nsfw-level-header h3 {
margin: 0;
font-size: 16px;
font-weight: 500;
}
.close-nsfw-selector {
background: transparent;
border: none;
color: var(--text-color);
cursor: pointer;
padding: 4px;
border-radius: var(--border-radius-xs);
}
.close-nsfw-selector:hover {
background: var(--border-color);
}
.current-level {
margin-bottom: 12px;
padding: 8px;
background: var(--bg-color);
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
}
.nsfw-level-options {
display: flex;
flex-wrap: wrap;
gap: 8px;
}
.nsfw-level-btn {
flex: 1 0 calc(33% - 8px);
padding: 8px;
border-radius: var(--border-radius-xs);
background: var(--bg-color);
border: 1px solid var(--border-color);
color: var(--text-color);
cursor: pointer;
transition: all 0.2s ease;
}
.nsfw-level-btn:hover {
background: var(--lora-border);
}
.nsfw-level-btn.active {
background: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
}
/* Mobile optimizations */
@media (max-width: 768px) {
.selected-thumbnails-strip {

View File

@@ -1,14 +1,17 @@
/* 卡片网格布局 */
.card-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(260px, 1fr)); /* Adjusted from 320px */
gap: 12px; /* Reduced from var(--space-2) for tighter horizontal spacing */
grid-template-columns: repeat(auto-fill, minmax(260px, 1fr)); /* Base size */
gap: 12px; /* Consistent gap for both row and column spacing */
row-gap: 20px; /* Increase vertical spacing between rows */
margin-top: var(--space-2);
padding-top: 4px; /* 添加顶部内边距,为悬停动画提供空间 */
padding-bottom: 4px; /* 添加底部内边距,为悬停动画提供空间 */
max-width: 1400px; /* Container width control */
width: 100%; /* Ensure it takes full width of container */
max-width: 1400px; /* Base container width */
margin-left: auto;
margin-right: auto;
box-sizing: border-box; /* Include padding in width calculation */
}
.lora-card {
@@ -17,13 +20,14 @@
border-radius: var(--border-radius-base);
backdrop-filter: blur(16px);
transition: transform 160ms ease-out;
aspect-ratio: 896/1152;
max-width: 260px; /* Adjusted from 320px to fit 5 cards */
aspect-ratio: 896/1152; /* Preserve aspect ratio */
max-width: 260px; /* Base size */
width: 100%;
margin: 0 auto;
cursor: pointer; /* Added from recipe-card */
display: flex; /* Added from recipe-card */
flex-direction: column; /* Added from recipe-card */
overflow: hidden; /* Add overflow hidden to contain children */
cursor: pointer;
display: flex;
flex-direction: column;
overflow: hidden;
}
.lora-card:hover {
@@ -36,6 +40,30 @@
outline-offset: 2px;
}
/* Responsive adjustments for 1440p screens (2K) */
@media (min-width: 2000px) {
.card-grid {
max-width: 1800px; /* Increased for 2K screens */
grid-template-columns: repeat(auto-fill, minmax(270px, 1fr));
}
.lora-card {
max-width: 270px;
}
}
/* Responsive adjustments for 4K screens */
@media (min-width: 3000px) {
.card-grid {
max-width: 2400px; /* Increased for 4K screens */
grid-template-columns: repeat(auto-fill, minmax(280px, 1fr));
}
.lora-card {
max-width: 280px;
}
}
/* Responsive adjustments */
@media (max-width: 1400px) {
.card-grid {
@@ -58,6 +86,42 @@
min-height: 0; /* Fix for potential flexbox sizing issue in Firefox */
}
/* Smaller text for medium density */
.medium-density .model-name {
font-size: 0.95em;
max-height: 2.6em;
}
.medium-density .base-model-label {
font-size: 0.85em;
max-width: 120px;
}
.medium-density .card-actions i {
font-size: 0.98em;
padding: 4px;
}
/* Smaller text for compact mode */
.compact-density .model-name {
font-size: 0.9em;
max-height: 2.4em;
}
.compact-density .base-model-label {
font-size: 0.8em;
max-width: 110px;
}
.compact-density .card-actions i {
font-size: 0.95em;
padding: 3px;
}
.compact-density .model-info {
padding-bottom: 2px;
}
.card-preview img,
.card-preview video {
width: 100%;
@@ -313,6 +377,25 @@
font-size: 0.85em;
}
/* Prevent text selection on cards and interactive elements */
.lora-card,
.lora-card *,
.card-actions,
.card-actions i,
.toggle-blur-btn,
.show-content-btn,
.card-preview img,
.card-preview video,
.card-footer,
.card-header,
.model-name,
.base-model-label {
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
/* Recipe specific elements - migrated from recipe-card.css */
.recipe-indicator {
position: absolute;
@@ -362,4 +445,44 @@
padding: 2rem;
background: var(--lora-surface-alt);
border-radius: var(--border-radius-base);
}
/* Virtual scrolling specific styles - updated */
.virtual-scroll-item {
position: absolute;
box-sizing: border-box;
transition: transform 160ms ease-out;
margin: 0; /* Remove margins, positioning is handled by VirtualScroller */
width: 100%; /* Allow width to be set by the VirtualScroller */
}
.virtual-scroll-item:hover {
transform: translateY(-2px); /* Keep hover effect */
z-index: 1; /* Ensure hovered items appear above others */
}
/* When using virtual scroll, adjust container */
.card-grid.virtual-scroll {
display: block;
position: relative;
margin: 0 auto;
padding: 4px 0; /* Add top/bottom padding equivalent to card padding */
height: auto;
width: 100%;
max-width: 1400px; /* Keep the max-width from original grid */
box-sizing: border-box; /* Include padding in width calculation */
overflow-x: hidden; /* Prevent horizontal overflow */
}
/* For larger screens, allow more space for the cards */
@media (min-width: 2000px) {
.card-grid.virtual-scroll {
max-width: 1800px;
}
}
@media (min-width: 3000px) {
.card-grid.virtual-scroll {
max-width: 2400px;
}
}

View File

@@ -0,0 +1,485 @@
/* Duplicates Management Styles */
/* Duplicates banner */
.duplicates-banner {
position: sticky; /* Keep the sticky position */
top: var(--space-1);
width: 100%;
background-color: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1); /* Use accent color with low opacity */
color: var(--text-color);
border-top: 1px solid oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.3); /* Add top border with accent color */
border-bottom: 1px solid oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.4); /* Make bottom border stronger */
z-index: var(--z-overlay);
padding: 12px 0;
box-shadow: 0 3px 10px rgba(0, 0, 0, 0.2); /* Stronger shadow */
transition: all 0.3s ease;
margin-bottom: 20px;
}
.duplicates-banner .banner-content {
position: relative;
max-width: 1400px;
margin: 0 auto;
display: flex;
align-items: center;
gap: 12px;
padding: 0 16px;
}
/* Responsive container for larger screens - match container in layout.css */
@media (min-width: 2000px) {
.duplicates-banner .banner-content {
max-width: 1800px;
}
}
@media (min-width: 3000px) {
.duplicates-banner .banner-content {
max-width: 2400px;
}
}
.duplicates-banner i.fa-exclamation-triangle {
font-size: 18px;
color: oklch(var(--lora-warning-l) var(--lora-warning-c) var(--lora-warning-h));
}
.duplicates-banner .banner-actions {
margin-left: auto;
display: flex;
gap: 8px;
align-items: center;
}
/* Improved exit button in banner */
.duplicates-banner button.btn-exit-mode {
min-width: 120px;
background-color: var(--card-bg);
color: var(--text-color);
border: 1px solid var(--border-color);
padding: 6px 12px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
gap: 6px;
transition: all 0.2s ease;
}
.duplicates-banner button.btn-exit-mode:hover {
background-color: var(--bg-color);
border-color: var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h);
transform: translateY(-1px);
}
.duplicates-banner button {
min-width: 100px;
display: flex;
align-items: center;
justify-content: center;
gap: 4px;
border-radius: var(--border-radius-xs);
padding: 4px 10px;
border: 1px solid var(--border-color);
background: var(--card-bg);
color: var(--text-color);
font-size: 0.85em;
transition: all 0.2s ease;
cursor: pointer;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.duplicates-banner button:hover {
border-color: var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h);
background: var(--bg-color);
transform: translateY(-1px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.08);
}
.duplicates-banner button.btn-exit {
min-width: unset;
width: 28px;
height: 28px;
padding: 0;
display: flex;
align-items: center;
justify-content: center;
border-radius: 50%;
}
.duplicates-banner button.disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* Duplicate groups */
.duplicate-group {
position: relative;
border: 2px solid oklch(var(--lora-warning-l) var(--lora-warning-c) var(--lora-warning-h));
border-radius: var(--border-radius-base);
padding: 16px;
margin-bottom: 24px;
background: var(--card-bg);
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.12); /* Add subtle shadow to groups */
/* Add responsive width settings to match banner */
max-width: 1400px;
margin-left: auto;
margin-right: auto;
}
/* Add responsive container adjustments for duplicate groups - match container in banner */
@media (min-width: 2000px) {
.duplicate-group {
max-width: 1800px;
}
}
@media (min-width: 3000px) {
.duplicate-group {
max-width: 2400px;
}
}
.duplicate-group-header {
background-color: var(--bg-color);
color: var(--text-color);
border: 1px solid var(--border-color);
padding: 10px 16px; /* Slightly increased padding */
border-radius: var(--border-radius-xs);
margin-bottom: 16px;
display: flex;
justify-content: space-between;
align-items: center;
border-left: 4px solid oklch(var(--lora-warning-l) var(--lora-warning-c) var(--lora-warning-h)); /* Add accent border on the left */
}
.duplicate-group-header span:last-child {
display: flex;
gap: 8px;
align-items: center;
}
.duplicate-group-header button {
min-width: 80px;
display: flex;
align-items: center;
justify-content: center;
gap: 4px;
border-radius: var(--border-radius-xs);
padding: 4px 8px;
border: 1px solid var(--border-color);
background: var(--card-bg);
color: var(--text-color);
font-size: 0.85em;
transition: all 0.2s ease;
cursor: pointer;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
margin-left: 8px;
}
.duplicate-group-header button:hover {
border-color: var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h);
background: var(--bg-color);
transform: translateY(-1px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.08);
}
.card-group-container {
display: flex;
flex-wrap: wrap;
gap: 16px;
justify-content: flex-start;
align-items: flex-start;
}
/* Make cards in duplicate groups have consistent width */
.card-group-container .lora-card {
flex: 0 0 auto;
width: 240px;
margin: 0;
cursor: pointer; /* Indicate the card is clickable */
}
/* Ensure the grid layout is only applied to the main recipe grid, not duplicate groups */
.duplicate-mode .card-grid {
display: block;
}
/* Scrollable container for large duplicate groups */
.card-group-container.scrollable {
max-height: 450px;
overflow-y: auto;
padding-right: 8px;
}
/* Add a toggle button to expand/collapse large duplicate groups */
.group-toggle-btn {
position: absolute;
right: 16px;
bottom: -12px;
background: var(--card-bg);
color: var(--text-color);
border: 1px solid var(--border-color);
border-radius: 50%;
width: 24px;
height: 24px;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
z-index: 1;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
transition: all 0.2s ease;
}
.group-toggle-btn:hover {
border-color: var(--lora-accent-l) var(--lora-accent-c) var (--lora-accent-h);
transform: translateY(-1px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.08);
}
/* Duplicate card styling */
.lora-card.duplicate {
position: relative;
transition: all 0.2s ease;
}
.lora-card.duplicate:hover {
border-color: var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h);
}
.lora-card.duplicate.latest {
border-style: solid;
border-color: oklch(var(--lora-warning-l) var(--lora-warning-c) var(--lora-warning-h));
}
.lora-card.duplicate-selected {
border: 2px solid oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h));
box-shadow: 0 0 8px rgba(0, 0, 0, 0.2);
}
.lora-card .selector-checkbox {
position: absolute;
top: 10px;
right: 10px;
z-index: 10;
width: 20px;
height: 20px;
cursor: pointer;
}
/* Latest indicator */
.lora-card.duplicate.latest::after {
content: "Latest";
position: absolute;
top: 10px;
left: 10px;
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h));
color: white;
font-size: 12px;
padding: 2px 6px;
border-radius: var(--border-radius-xs);
z-index: 5;
}
/* Model tooltip for duplicates mode */
.model-tooltip {
position: absolute;
background-color: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
box-shadow: 0 2px 10px rgba(0,0,0,0.2);
padding: 10px;
z-index: 1000;
max-width: 350px;
min-width: 250px;
color: var(--text-color);
font-size: 0.9em;
pointer-events: none; /* Don't block mouse events */
}
.model-tooltip .tooltip-header {
font-weight: bold;
font-size: 1.1em;
margin-bottom: 8px;
padding-bottom: 5px;
border-bottom: 1px solid var(--border-color);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.model-tooltip .tooltip-info div {
margin-bottom: 4px;
display: flex;
flex-wrap: wrap;
}
.model-tooltip .tooltip-info div strong {
margin-right: 5px;
min-width: 70px;
}
/* Badge Styles */
.badge {
display: inline-flex;
align-items: center;
justify-content: center;
min-width: 16px; /* Reduced from 20px */
height: 16px; /* Reduced from 20px */
border-radius: 8px; /* Adjusted for smaller size */
background-color: var(--lora-error);
color: white;
font-size: 10px; /* Smaller font size */
font-weight: bold;
padding: 0 4px; /* Reduced padding */
position: absolute;
top: -8px; /* Moved closer to button */
right: -8px; /* Moved closer to button */
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.15); /* Softer shadow */
transition: transform 0.2s ease, opacity 0.2s ease;
}
.badge:empty {
display: none;
}
/* Make the pulse animation more subtle */
.badge.pulse {
animation: badge-pulse 2s infinite; /* Slower animation */
}
@keyframes badge-pulse {
0% {
transform: scale(1);
}
50% {
transform: scale(1.1); /* Less expansion */
}
100% {
transform: scale(1);
}
}
/* Help icon styling */
.help-icon {
color: var(--text-color);
opacity: 0.7;
cursor: help;
font-size: 16px;
margin-left: 8px;
transition: all 0.2s ease;
}
.help-icon:hover {
opacity: 1;
color: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h));
}
/* Help tooltip */
.help-tooltip {
display: none;
position: absolute;
max-width: 400px;
background: var(--card-bg);
color: var(--text-color);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
padding: 12px 16px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
z-index: var(--z-overlay);
font-size: 0.9em;
margin-top: 10px;
text-align: left;
pointer-events: none;
}
.help-tooltip:after {
content: "";
position: absolute;
top: -8px;
left: 10px; /* Position the arrow near the left instead of center */
border-width: 0 8px 8px 8px;
border-style: solid;
border-color: transparent transparent var(--card-bg) transparent;
}
/* Responsive adjustments */
@media (max-width: 768px) {
.duplicates-banner .banner-content {
flex-direction: column;
align-items: flex-start;
gap: 8px;
}
.duplicates-banner .banner-actions {
width: 100%;
margin-left: 0;
justify-content: space-between;
}
.duplicate-group-header {
flex-direction: column;
gap: 8px;
align-items: flex-start;
}
.duplicate-group-header span:last-child {
display: flex;
gap: 8px;
width: 100%;
}
.duplicate-group-header button {
margin-left: 0;
flex: 1;
}
.help-tooltip {
max-width: calc(100% - 40px);
}
/* Remove the fixed positioning adjustments for mobile since we're now using dynamic positioning */
.help-tooltip:after {
left: 10px;
}
}
/* In dark mode, add additional distinction */
html[data-theme="dark"] .duplicates-banner {
box-shadow: 0 3px 12px rgba(0, 0, 0, 0.4); /* Stronger shadow in dark mode */
background-color: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.15); /* Slightly stronger background in dark mode */
}
html[data-theme="dark"] .duplicate-group {
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25); /* Stronger shadow in dark mode */
}
html[data-theme="dark"] .help-tooltip {
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
}
/* Styles for disabled controls during duplicates mode */
.disabled-during-duplicates {
opacity: 0.5 !important;
pointer-events: none !important;
cursor: not-allowed !important;
user-select: none !important;
filter: grayscale(50%) !important;
}
/* Make the active duplicates button more prominent */
#findDuplicatesBtn.active {
background: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
box-shadow: 0 0 0 2px oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.25);
position: relative;
z-index: 5;
}
#findDuplicatesBtn.active:hover {
background: oklch(calc(var(--lora-accent-l) - 5%) var(--lora-accent-c) var(--lora-accent-h));
}

View File

@@ -136,6 +136,30 @@
opacity: 0;
}
/* Badge styling */
.update-badge {
position: absolute;
top: -3px;
right: -3px;
width: 8px;
height: 8px;
background-color: var(--lora-error);
border-radius: 50%;
border: 2px solid var(--card-bg);
transition: all 0.2s ease;
pointer-events: none;
opacity: 0;
}
.update-badge.visible {
opacity: 1;
}
.update-badge.hidden,
.update-badge:not(.visible) {
opacity: 0;
}
/* Mobile adjustments */
@media (max-width: 768px) {
.app-title {

View File

@@ -291,7 +291,7 @@
gap: 8px;
padding: var(--space-1);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
border-radius: var (--border-radius-sm);
background: var(--lora-surface);
}
@@ -733,3 +733,150 @@
font-size: 0.9em;
line-height: 1.4;
}
/* Duplicate Recipes Styles */
.duplicate-recipes-container {
margin-bottom: var(--space-3);
border-radius: var(--border-radius-sm);
overflow: hidden;
animation: fadeIn 0.3s ease-in-out;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(-10px); }
to { opacity: 1; transform: translateY(0); }
}
.duplicate-warning {
display: flex;
align-items: flex-start;
gap: 12px;
padding: 12px 16px;
background: oklch(var(--lora-warning) / 0.1);
border: 1px solid var(--lora-warning);
border-radius: var(--border-radius-sm) var(--border-radius-sm) 0 0;
color: var(--text-color);
}
.duplicate-warning .warning-icon {
color: var(--lora-warning);
font-size: 1.2em;
padding-top: 2px;
}
.duplicate-warning .warning-content {
flex: 1;
}
.duplicate-warning .warning-title {
font-weight: 600;
margin-bottom: 4px;
}
.duplicate-warning .warning-text {
font-size: 0.9em;
line-height: 1.4;
display: flex;
justify-content: space-between;
align-items: center;
flex-wrap: wrap;
gap: 8px;
}
.toggle-duplicates-btn {
background: none;
border: none;
color: var(--lora-warning);
cursor: pointer;
font-size: 0.9em;
display: flex;
align-items: center;
gap: 6px;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
}
.toggle-duplicates-btn:hover {
background: oklch(var(--lora-warning) / 0.1);
}
.duplicate-recipes-list {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(150px, 1fr));
gap: 12px;
padding: 16px;
border: 1px solid var(--border-color);
border-top: none;
border-radius: 0 0 var(--border-radius-sm) var(--border-radius-sm);
background: var(--bg-color);
max-height: 300px;
overflow-y: auto;
transition: max-height 0.3s ease, padding 0.3s ease;
}
.duplicate-recipes-list.collapsed {
max-height: 0;
padding: 0 16px;
overflow: hidden;
}
.duplicate-recipe-card {
position: relative;
border-radius: var(--border-radius-sm);
overflow: hidden;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
transition: transform 0.2s ease;
}
.duplicate-recipe-card:hover {
transform: translateY(-2px);
}
.duplicate-recipe-preview {
width: 100%;
position: relative;
aspect-ratio: 2/3;
background: var(--bg-color);
}
.duplicate-recipe-preview img {
width: 100%;
height: 100%;
object-fit: cover;
}
.duplicate-recipe-title {
position: absolute;
bottom: 0;
left: 0;
right: 0;
padding: 8px;
background: rgba(0, 0, 0, 0.7);
color: white;
font-size: 0.85em;
line-height: 1.3;
max-height: 50%;
overflow: hidden;
text-overflow: ellipsis;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
}
.duplicate-recipe-details {
padding: 8px;
background: var(--bg-color);
font-size: 0.75em;
display: flex;
justify-content: space-between;
align-items: center;
color: var(--text-color);
opacity: 0.8;
}
.duplicate-recipe-date,
.duplicate-recipe-lora-count {
display: flex;
align-items: center;
gap: 4px;
}

View File

@@ -0,0 +1,96 @@
/* Keyboard navigation indicator and help */
.keyboard-nav-hint {
display: inline-flex;
align-items: center;
justify-content: center;
position: relative;
width: 32px;
height: 32px;
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var(--text-color);
cursor: help;
transition: all 0.2s ease;
margin-left: 8px;
}
.keyboard-nav-hint:hover {
background: var(--lora-accent);
color: white;
transform: translateY(-2px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.08);
}
.keyboard-nav-hint i {
font-size: 14px;
}
/* Tooltip styling */
.tooltip {
position: relative;
}
.tooltip .tooltiptext {
visibility: hidden;
width: 240px;
background-color: var(--lora-surface);
color: var(--text-color);
text-align: center;
border-radius: var(--border-radius-xs);
padding: 8px;
position: absolute;
z-index: 9999; /* 确保在卡片上方显示 */
left: 120%; /* 将tooltip显示在图标右侧 */
top: 50%; /* 垂直居中 */
transform: translateY(-50%); /* 垂直居中 */
opacity: 0;
transition: opacity 0.3s;
box-shadow: 0 3px 8px rgba(0, 0, 0, 0.15);
border: 1px solid var(--lora-border);
font-size: 0.85em;
line-height: 1.4;
}
.tooltip .tooltiptext::after {
content: "";
position: absolute;
top: 50%; /* 箭头垂直居中 */
right: 100%; /* 箭头在左侧 */
margin-top: -5px;
border-width: 5px;
border-style: solid;
border-color: transparent var(--lora-border) transparent transparent; /* 箭头指向左侧 */
}
.tooltip:hover .tooltiptext {
visibility: visible;
opacity: 1;
}
/* Keyboard shortcuts table */
.keyboard-shortcuts {
width: 100%;
border-collapse: collapse;
margin-top: 5px;
}
.keyboard-shortcuts td {
padding: 4px;
text-align: left;
}
.keyboard-shortcuts td:first-child {
font-weight: bold;
width: 40%;
}
.key {
display: inline-block;
background: var(--bg-color);
border: 1px solid var(--border-color);
border-radius: 3px;
padding: 1px 5px;
font-size: 0.8em;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.08);
}

View File

@@ -132,7 +132,7 @@
}
.scroll-indicator:hover {
background: oklch(var(--lora-accent) / 0.1);
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
transform: translateY(-1px);
}
@@ -241,7 +241,7 @@
/* Keep the hover effect using accent color */
.trigger-word-tag:hover {
background: oklch(var(--lora-accent) / 0.1);
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
border-color: var(--lora-accent);
}
@@ -301,7 +301,7 @@
}
.trigger-words-edit-controls button:hover {
background: oklch(var(--lora-accent) / 0.1);
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
border-color: var(--lora-accent);
}
@@ -324,6 +324,7 @@
margin-top: var(--space-2);
display: flex;
gap: var(--space-1);
position: relative; /* Added for dropdown positioning */
}
.new-trigger-word-input {
@@ -346,7 +347,7 @@
padding: 4px 8px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background: var(--bg-color);
background: var (--bg-color);
color: var(--text-color);
font-size: 0.85em;
cursor: pointer;
@@ -371,6 +372,146 @@
background: rgba(255, 255, 255, 0.05);
}
/* Trained Words Loading Indicator */
.trained-words-loading {
display: flex;
align-items: center;
justify-content: center;
margin: var(--space-1) 0;
color: var(--text-color);
opacity: 0.7;
font-size: 0.9em;
gap: 8px;
}
.trained-words-loading i {
color: var(--lora-accent);
}
/* Trained Words Dropdown Styles */
.trained-words-dropdown {
position: absolute;
top: 100%;
left: 0;
right: 0;
background: var(--bg-color);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
margin-top: 4px;
z-index: 100;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
overflow: hidden;
display: flex;
flex-direction: column;
}
.trained-words-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 8px 12px;
background: var(--card-bg);
border-bottom: 1px solid var(--border-color);
}
.trained-words-header span {
font-size: 0.9em;
font-weight: 500;
color: var(--text-color);
}
.trained-words-header small {
font-size: 0.8em;
opacity: 0.7;
}
.trained-words-container {
max-height: 200px;
overflow-y: auto;
padding: 10px;
display: flex;
flex-wrap: wrap;
gap: 8px;
align-content: flex-start;
}
.trained-word-item {
display: inline-flex;
align-items: center;
justify-content: space-between;
padding: 5px 10px;
cursor: pointer;
transition: all 0.2s ease;
border-radius: var(--border-radius-xs);
background: var(--lora-surface);
border: 1px solid var(--lora-border);
max-width: 150px;
}
.trained-word-item:hover {
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
border-color: var(--lora-accent);
}
.trained-word-item.already-added {
opacity: 0.7;
cursor: default;
}
.trained-word-item.already-added:hover {
background: var(--lora-surface);
border-color: var(--lora-border);
}
.trained-word-text {
color: var(--lora-accent);
font-size: 0.9em;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
margin-right: 4px;
max-width: 100px;
}
.trained-word-meta {
display: flex;
align-items: center;
gap: 4px;
flex-shrink: 0;
}
.trained-word-freq {
color: var (--text-color);
font-size: 0.75em;
background: rgba(0, 0, 0, 0.05);
border-radius: 10px;
min-width: 20px;
padding: 1px 5px;
text-align: center;
line-height: 1.2;
}
[data-theme="dark"] .trained-word-freq {
background: rgba(255, 255, 255, 0.05);
}
.added-indicator {
color: var(--lora-accent);
display: flex;
align-items: center;
justify-content: center;
font-size: 0.75em;
}
.no-trained-words {
padding: 16px 12px;
text-align: center;
color: var(--text-color);
opacity: 0.7;
font-style: italic;
font-size: 0.9em;
}
/* Editable Fields */
.editable-field {
position: relative;
@@ -515,7 +656,7 @@
}
.preset-tag:hover {
background: oklch(var(--lora-accent) / 0.1);
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
border-color: var(--lora-accent);
}
@@ -549,10 +690,6 @@
position: relative;
}
.file-name-wrapper:hover {
background: oklch(var(--lora-accent) / 0.1);
}
.file-name-content {
padding: 2px 4px;
border-radius: var(--border-radius-xs);
@@ -673,11 +810,6 @@
opacity: 0.9;
}
/* Model name field styles - complete replacement */
.model-name-field {
display: none;
}
/* New Model Name Header Styles */
.model-name-header {
display: flex;
@@ -754,7 +886,7 @@
.tab-btn:hover {
opacity: 1;
background: oklch(var(--lora-accent) / 0.05);
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.05);
}
.tab-btn.active {
@@ -936,7 +1068,7 @@
.model-description-content pre {
background: rgba(0, 0, 0, 0.05);
border-radius: var(--border-radius-xs);
padding: var(--space-1);
padding: var (--space-1);
white-space: pre-wrap;
margin: 1em 0;
overflow-x: auto;
@@ -1133,8 +1265,8 @@
pointer-events: none;
}
/* Show metadata panel only on hover */
.media-wrapper:hover .image-metadata-panel {
/* Show metadata panel only when the 'visible' class is added */
.media-wrapper .image-metadata-panel.visible {
transform: translateY(0);
opacity: 0.98;
pointer-events: auto;
@@ -1323,4 +1455,89 @@
.recipes-error i {
color: var(--lora-error);
}
/* Creator Information Styles */
.creator-info {
display: flex;
align-items: center;
gap: 10px;
margin-bottom: var(--space-1);
padding: 6px 10px;
background: rgba(0, 0, 0, 0.03);
border: 1px solid rgba(0, 0, 0, 0.1);
border-radius: var(--border-radius-sm);
max-width: fit-content;
}
[data-theme="dark"] .creator-info {
background: rgba(255, 255, 255, 0.03);
border: 1px solid var(--lora-border);
}
.creator-avatar {
width: 28px;
height: 28px;
border-radius: 50%;
overflow: hidden;
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;
background: var(--lora-surface);
border: 1px solid var(--lora-border);
}
.creator-avatar img {
width: 100%;
height: 100%;
object-fit: cover;
}
.creator-placeholder {
background: var(--lora-accent);
color: white;
display: flex;
align-items: center;
justify-content: center;
}
.creator-username {
font-size: 0.9em;
font-weight: 500;
color: var(--text-color);
}
/* Optional: add hover effect for creator info */
.creator-info:hover {
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1);
border-color: var(--lora-accent);
}
/* Class tokens styling */
.class-tokens-container {
padding: 10px;
display: flex;
flex-wrap: wrap;
gap: 8px;
}
.class-token-item {
background: oklch(var(--lora-accent-l) var(--lora-accent-c) var(--lora-accent-h) / 0.1) !important;
border: 1px solid var(--lora-accent) !important;
}
.token-badge {
background: var(--lora-accent);
color: white;
font-size: 0.7em;
padding: 2px 5px;
border-radius: 8px;
white-space: nowrap;
}
.dropdown-separator {
height: 1px;
background: var(--lora-border);
margin: 5px 10px;
}

View File

@@ -39,4 +39,81 @@
.context-menu-item i {
width: 16px;
text-align: center;
}
/* NSFW Level Selector */
.nsfw-level-selector {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-base);
padding: 16px;
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
z-index: var(--z-modal);
width: 300px;
display: none;
}
.nsfw-level-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
.nsfw-level-header h3 {
margin: 0;
font-size: 16px;
font-weight: 500;
}
.close-nsfw-selector {
background: transparent;
border: none;
color: var(--text-color);
cursor: pointer;
padding: 4px;
border-radius: var(--border-radius-xs);
}
.close-nsfw-selector:hover {
background: var(--border-color);
}
.current-level {
margin-bottom: 12px;
padding: 8px;
background: var(--bg-color);
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
}
.nsfw-level-options {
display: flex;
flex-wrap: wrap;
gap: 8px;
}
.nsfw-level-btn {
flex: 1 0 calc(33% - 8px);
padding: 8px;
border-radius: var(--border-radius-xs);
background: var(--bg-color);
border: 1px solid var(--border-color);
color: var(--text-color);
cursor: pointer;
transition: all 0.2s ease;
}
.nsfw-level-btn:hover {
background: var(--lora-border);
}
.nsfw-level-btn.active {
background: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
}

View File

@@ -44,26 +44,12 @@ body.modal-open {
}
/* Delete Modal specific styles */
.delete-modal-content {
max-width: 500px;
text-align: center;
}
.delete-message {
color: var(--text-color);
margin: var(--space-2) 0;
}
.delete-model-info {
background: var(--lora-surface);
border: 1px solid var(--lora-border);
border-radius: var(--border-radius-sm);
padding: var(--space-2);
margin: var(--space-2) 0;
color: var(--text-color);
word-break: break-all;
}
/* Update delete modal styles */
.delete-modal {
display: none; /* Set initial display to none */
@@ -92,7 +78,8 @@ body.modal-open {
animation: modalFadeIn 0.2s ease-out;
}
.delete-model-info {
.delete-model-info,
.exclude-model-info {
/* Update info display styling */
background: var(--lora-surface);
border: 1px solid var(--lora-border);
@@ -123,7 +110,7 @@ body.modal-open {
margin-top: var(--space-3);
}
.cancel-btn, .delete-btn {
.cancel-btn, .delete-btn, .exclude-btn, .confirm-btn {
padding: 8px var(--space-2);
border-radius: 6px;
border: none;
@@ -143,6 +130,12 @@ body.modal-open {
color: white;
}
/* Style for exclude button - different from delete button */
.exclude-btn, .confirm-btn {
background: var(--lora-accent, #4f46e5);
color: white;
}
.cancel-btn:hover {
background: var(--lora-border);
}
@@ -151,9 +144,14 @@ body.modal-open {
opacity: 0.9;
}
.exclude-btn:hover, .confirm-btn:hover {
opacity: 0.9;
background: oklch(from var(--lora-accent, #4f46e5) l c h / 85%);
}
.modal-content h2 {
color: var(--text-color);
margin-bottom: var(--space-2);
margin-bottom: var(--space-1);
font-size: 1.5em;
}
@@ -308,6 +306,18 @@ body.modal-open {
width: 100%; /* Full width */
}
/* Migrate control styling */
.migrate-control {
display: flex;
align-items: center;
gap: 8px;
}
.migrate-control input {
flex: 1;
min-width: 0;
}
/* 统一各个 section 的样式 */
.support-section,
.changelog-section,
@@ -365,6 +375,12 @@ body.modal-open {
background: rgba(255, 255, 255, 0.05);
}
/* Add disabled style for setting items */
.setting-item[data-requires-centralized="true"].disabled {
opacity: 0.6;
pointer-events: none;
}
/* Control row with label and input together */
.setting-row {
display: flex;
@@ -528,7 +544,7 @@ input:checked + .toggle-slider:before {
gap: 8px;
padding: 8px 16px;
background-color: var(--card-bg);
color: var(--text-color);
color: var (--text-color);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
cursor: pointer;
@@ -556,6 +572,13 @@ input:checked + .toggle-slider:before {
pointer-events: none;
}
.restart-required-icon {
color: var(--lora-warning);
margin-left: 5px;
font-size: 0.85em;
vertical-align: text-bottom;
}
/* Dark theme specific button adjustments */
[data-theme="dark"] .primary-btn:hover {
background-color: oklch(from var(--lora-accent) l c h / 75%);
@@ -587,7 +610,7 @@ input:checked + .toggle-slider:before {
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--lora-surface);
color: var(--text-color);
color: var (--text-color);
font-size: 0.95em;
height: 32px;
}
@@ -674,4 +697,279 @@ input:checked + .toggle-slider:before {
.changelog-item a:hover {
text-decoration: underline;
}
/* Add warning text style for settings */
.warning-text {
color: var(--lora-warning, #e67e22);
font-weight: 500;
}
[data-theme="dark"] .warning-text {
color: var(--lora-warning, #f39c12);
}
/* Add styles for density description list */
.density-description {
margin: 8px 0;
padding-left: 20px;
font-size: 0.9em;
}
.density-description li {
margin-bottom: 4px;
}
/* Help Modal styles */
.help-modal {
max-width: 850px;
}
.help-header {
display: flex;
align-items: center;
margin-bottom: var(--space-2);
}
.modal-help-icon {
font-size: 24px;
color: var(--lora-accent);
margin-right: var(--space-2);
vertical-align: text-bottom;
}
/* Tab navigation styles */
.help-tabs {
display: flex;
border-bottom: 1px solid var(--lora-border);
margin-bottom: var(--space-2);
gap: 8px;
}
.tab-btn {
padding: 8px 16px;
background: transparent;
border: none;
border-bottom: 2px solid transparent;
color: var(--text-color);
cursor: pointer;
font-weight: 500;
transition: all 0.2s;
opacity: 0.7;
}
.tab-btn:hover {
background-color: rgba(0, 0, 0, 0.05);
opacity: 0.9;
}
.tab-btn.active {
color: var(--lora-accent);
border-bottom: 2px solid var(--lora-accent);
opacity: 1;
}
/* Tab content styles */
.help-content {
padding: var(--space-1) 0;
overflow-y: auto;
}
.tab-pane {
display: none;
}
.tab-pane.active {
display: block;
}
/* Video embed styles */
.video-embed {
position: relative;
padding-bottom: 56.25%; /* 16:9 aspect ratio */
height: 0;
overflow: hidden;
max-width: 100%;
margin-bottom: var(--space-2);
border-radius: var(--border-radius-sm);
}
.video-embed iframe {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
}
.video-embed.small {
max-width: 100%;
margin-bottom: var(--space-1);
}
.help-text {
margin: var(--space-2) 0;
}
.help-text ul {
padding-left: 20px;
margin-top: 8px;
}
.help-text li {
margin-bottom: 8px;
}
/* Documentation link styles */
.docs-section {
margin-bottom: var(--space-3);
}
.docs-section h4 {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: var(--space-1);
}
.docs-links {
list-style-type: none;
padding-left: var(--space-3);
}
.docs-links li {
margin-bottom: var(--space-1);
position: relative;
}
.docs-links li:before {
content: "•";
position: absolute;
left: -15px;
color: var(--lora-accent);
}
.docs-links a {
color: var(--lora-accent);
text-decoration: none;
transition: color 0.2s;
}
.docs-links a:hover {
text-decoration: underline;
}
/* Update video list styles */
.video-list {
display: flex;
flex-direction: column;
gap: var(--space-3);
}
.video-item {
display: flex;
flex-direction: column;
}
.video-info {
padding: var(--space-1);
}
.video-info h4 {
margin-bottom: var(--space-1);
}
.video-info p {
font-size: 0.9em;
opacity: 0.8;
}
/* Dark theme adjustments */
[data-theme="dark"] .tab-btn:hover {
background-color: rgba(255, 255, 255, 0.05);
}
/* Update date badge styles */
.update-date-badge {
display: inline-flex;
align-items: center;
font-size: 0.75em;
font-weight: 500;
background-color: var(--lora-accent);
color: var(--lora-text);
padding: 4px 8px;
border-radius: 12px;
margin-left: 10px;
vertical-align: middle;
animation: fadeIn 0.5s ease-in-out;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.update-date-badge i {
margin-right: 5px;
font-size: 0.9em;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(-5px); }
to { opacity: 1; transform: translateY(0); }
}
/* Dark theme adjustments */
[data-theme="dark"] .update-date-badge {
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3);
}
/* Re-link to Civitai Modal styles */
.warning-box {
background-color: rgba(255, 193, 7, 0.1);
border: 1px solid rgba(255, 193, 7, 0.5);
border-radius: var(--border-radius-sm);
padding: var(--space-2);
margin-bottom: var(--space-3);
}
.warning-box i {
color: var(--lora-warning);
margin-right: var(--space-1);
}
.warning-box ul {
padding-left: 20px;
margin: var(--space-1) 0;
}
.warning-box li {
margin-bottom: 4px;
}
.input-group {
display: flex;
flex-direction: column;
margin-bottom: var(--space-2);
}
.input-group label {
margin-bottom: var(--space-1);
font-weight: 500;
}
.input-group input {
padding: 8px 12px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--lora-surface);
color: var(--text-color);
}
.input-error {
color: var(--lora-error);
font-size: 0.9em;
min-height: 20px;
margin-top: 4px;
}
[data-theme="dark"] .warning-box {
background-color: rgba(255, 193, 7, 0.05);
border-color: rgba(255, 193, 7, 0.3);
}

View File

@@ -12,11 +12,13 @@
transition: transform 0.3s ease, opacity 0.3s ease;
opacity: 0;
transform: translateY(20px);
pointer-events: none; /* Ignore mouse events when invisible */
}
.progress-panel.visible {
opacity: 1;
transform: translateY(0);
pointer-events: auto; /* Capture mouse events when visible */
}
.progress-panel.collapsed .progress-panel-content {

View File

@@ -229,8 +229,10 @@
background: var(--lora-surface);
border: 1px solid var(--border-color);
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
position: relative;
}
.recipe-preview-container img,
@@ -246,6 +248,133 @@
object-fit: contain;
}
/* Source URL container */
.source-url-container {
position: absolute;
bottom: 0;
left: 0;
right: 0;
background: rgba(0, 0, 0, 0.5);
padding: 8px 12px;
display: flex;
justify-content: space-between;
align-items: center;
transition: transform 0.3s ease;
transform: translateY(100%);
}
.recipe-preview-container:hover .source-url-container {
transform: translateY(0);
}
.source-url-container.active {
transform: translateY(0);
}
.source-url-content {
display: flex;
align-items: center;
color: #fff;
flex: 1;
overflow: hidden;
font-size: 0.85em;
}
.source-url-icon {
margin-right: 8px;
flex-shrink: 0;
}
.source-url-text {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
cursor: pointer;
flex: 1;
}
.source-url-edit-btn {
background: none;
border: none;
color: #fff;
cursor: pointer;
padding: 4px;
margin-left: 8px;
border-radius: var(--border-radius-xs);
opacity: 0.7;
transition: opacity 0.2s ease;
flex-shrink: 0;
}
.source-url-edit-btn:hover {
opacity: 1;
background: rgba(255, 255, 255, 0.1);
}
/* Source URL editor */
.source-url-editor {
display: none;
position: absolute;
bottom: 0;
left: 0;
right: 0;
background: var(--bg-color);
border-top: 1px solid var(--border-color);
padding: 12px;
flex-direction: column;
gap: 10px;
z-index: 5;
}
.source-url-editor.active {
display: flex;
}
.source-url-input {
width: 100%;
padding: 8px 10px;
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
background: var(--bg-color);
color: var(--text-color);
font-size: 0.9em;
}
.source-url-actions {
display: flex;
justify-content: flex-end;
gap: 8px;
}
.source-url-cancel-btn,
.source-url-save-btn {
padding: 6px 12px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
cursor: pointer;
border: none;
transition: all 0.2s;
}
.source-url-cancel-btn {
background: var(--bg-color);
color: var(--text-color);
border: 1px solid var(--border-color);
}
.source-url-save-btn {
background: var(--lora-accent);
color: white;
}
.source-url-cancel-btn:hover {
background: var(--lora-surface);
}
.source-url-save-btn:hover {
background: color-mix(in oklch, var(--lora-accent), black 10%);
}
/* Generation Parameters */
.recipe-gen-params {
height: 360px;

View File

@@ -117,9 +117,50 @@
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
/* QR Code section styles */
.qrcode-toggle {
width: 100%;
margin-top: var(--space-2);
justify-content: center;
position: relative;
}
.qrcode-toggle .toggle-icon {
margin-left: 8px;
transition: transform 0.3s ease;
}
.qrcode-toggle.active .toggle-icon {
transform: rotate(180deg);
}
.qrcode-container {
max-height: 0;
overflow: hidden;
transition: max-height 0.4s ease, opacity 0.3s ease;
opacity: 0;
display: flex;
flex-direction: column;
align-items: center;
}
.qrcode-container.show {
max-height: 500px;
opacity: 1;
margin-top: var(--space-3);
}
.qrcode-image {
max-width: 80%;
height: auto;
border-radius: var(--border-radius-sm);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
border: 1px solid var(--lora-border);
aspect-ratio: 1/1; /* Ensure proper aspect ratio for the square QR code */
}
.support-footer {
text-align: center;
margin-top: var(--space-1);
font-style: italic;
color: var(--text-color);
}

View File

@@ -1,10 +1,10 @@
.page-content {
height: calc(100vh - 48px); /* Full height minus header */
margin-top: 48px; /* Push down below header */
overflow-y: auto; /* Enable scrolling here */
width: 100%;
position: relative;
overflow-y: scroll;
overflow-x: hidden; /* Prevent horizontal scrolling */
overflow-y: auto; /* Enable vertical scrolling */
}
.container {
@@ -15,6 +15,19 @@
z-index: var(--z-base);
}
/* Responsive container for larger screens */
@media (min-width: 2000px) {
.container {
max-width: 1800px;
}
}
@media (min-width: 3000px) {
.container {
max-width: 2400px;
}
}
.controls {
display: flex;
flex-direction: column;
@@ -22,6 +35,13 @@
margin-bottom: var(--space-2);
}
.controls-right {
display: flex;
align-items: center;
gap: 8px;
margin-left: auto; /* Push to the right */
}
.actions {
display: flex;
align-items: center;
@@ -293,6 +313,86 @@
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
}
/* Prevent text selection in control and header areas */
.tag,
.control-group button,
.control-group select,
.toggle-folders-btn,
.bulk-operations-panel,
.app-header,
.header-branding,
.app-title,
.main-nav,
.nav-item,
.header-actions button,
.header-controls,
.toggle-folders-container button {
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
/* Dropdown Button Styling */
.dropdown-group {
position: relative;
display: flex;
}
.dropdown-main {
border-top-right-radius: 0;
border-bottom-right-radius: 0;
border-right: 1px solid rgba(0, 0, 0, 0.1);
}
.dropdown-toggle {
width: 24px !important;
min-width: unset !important;
border-top-left-radius: 0;
border-bottom-left-radius: 0;
padding: 0 !important;
}
.dropdown-menu {
position: absolute;
top: 100%;
left: 0;
z-index: 1000;
display: none;
min-width: 230px;
padding: 5px 0;
margin: 2px 0 0;
font-size: 0.85em;
background-color: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);
}
.dropdown-group.active .dropdown-menu {
display: block;
}
.dropdown-item {
display: block;
padding: 6px 15px;
clear: both;
font-weight: 400;
color: var(--text-color);
cursor: pointer;
transition: background-color 0.2s ease;
}
.dropdown-item:hover {
background-color: oklch(var(--lora-accent) / 0.1);
}
.dropdown-item i {
margin-right: 8px;
width: 16px;
text-align: center;
}
@media (max-width: 768px) {
.actions {
flex-wrap: wrap;
@@ -305,11 +405,14 @@
width: 100%;
}
.controls-right {
width: 100%;
justify-content: flex-end;
margin-top: 8px;
}
.toggle-folders-container {
margin-left: 0;
width: 100%;
display: flex;
justify-content: flex-end;
}
.folder-tags-container {
@@ -335,4 +438,9 @@
.back-to-top {
bottom: 60px; /* Give some extra space from bottom on mobile */
}
.dropdown-menu {
left: auto;
right: 0; /* Align to right on mobile */
}
}

View File

@@ -21,6 +21,9 @@
@import 'components/filter-indicator.css';
@import 'components/initialization.css';
@import 'components/progress-panel.css';
@import 'components/alphabet-bar.css'; /* Add alphabet bar component */
@import 'components/duplicates.css'; /* Add duplicates component */
@import 'components/keyboard-nav.css'; /* Add keyboard navigation component */
.initialization-notice {
display: flex;

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

View File

@@ -1,7 +1,6 @@
// filepath: d:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\static\js\api\baseModelApi.js
import { state, getCurrentPageState } from '../state/index.js';
import { showToast } from '../utils/uiHelpers.js';
import { showDeleteModal, confirmDelete } from '../utils/modalUtils.js';
import { getSessionItem, saveMapToStorage } from '../utils/storageHelpers.js';
/**
@@ -49,6 +48,11 @@ export async function loadMoreModels(options = {}) {
if (pageState.showFavoritesOnly) {
params.append('favorites_only', 'true');
}
// Add active letter filter if set
if (pageState.activeLetterFilter) {
params.append('first_letter', pageState.activeLetterFilter);
}
// Add search parameters if there's a search term
if (pageState.filters?.search) {
@@ -155,6 +159,231 @@ export async function loadMoreModels(options = {}) {
}
}
// New method for virtual scrolling fetch
export async function fetchModelsPage(options = {}) {
const {
modelType = 'lora',
page = 1,
pageSize = 100,
endpoint = '/api/loras'
} = options;
const pageState = getCurrentPageState();
try {
const params = new URLSearchParams({
page: page,
page_size: pageSize || pageState.pageSize || 20,
sort_by: pageState.sortBy
});
if (pageState.activeFolder !== null) {
params.append('folder', pageState.activeFolder);
}
// Add favorites filter parameter if enabled
if (pageState.showFavoritesOnly) {
params.append('favorites_only', 'true');
}
// Add active letter filter if set
if (pageState.activeLetterFilter) {
params.append('first_letter', pageState.activeLetterFilter);
}
// Add search parameters if there's a search term
if (pageState.filters?.search) {
params.append('search', pageState.filters.search);
params.append('fuzzy', 'true');
// Add search option parameters if available
if (pageState.searchOptions) {
params.append('search_filename', pageState.searchOptions.filename.toString());
params.append('search_modelname', pageState.searchOptions.modelname.toString());
if (pageState.searchOptions.tags !== undefined) {
params.append('search_tags', pageState.searchOptions.tags.toString());
}
params.append('recursive', (pageState.searchOptions?.recursive ?? false).toString());
}
}
// Add filter parameters if active
if (pageState.filters) {
// Handle tags filters
if (pageState.filters.tags && pageState.filters.tags.length > 0) {
// Checkpoints API expects individual 'tag' parameters, Loras API expects comma-separated 'tags'
if (modelType === 'checkpoint') {
pageState.filters.tags.forEach(tag => {
params.append('tag', tag);
});
} else {
params.append('tags', pageState.filters.tags.join(','));
}
}
// Handle base model filters
if (pageState.filters.baseModel && pageState.filters.baseModel.length > 0) {
if (modelType === 'checkpoint') {
pageState.filters.baseModel.forEach(model => {
params.append('base_model', model);
});
} else {
params.append('base_models', pageState.filters.baseModel.join(','));
}
}
}
// Add model-specific parameters
if (modelType === 'lora') {
// Check for recipe-based filtering parameters from session storage
const filterLoraHash = getSessionItem('recipe_to_lora_filterLoraHash');
const filterLoraHashes = getSessionItem('recipe_to_lora_filterLoraHashes');
// Add hash filter parameter if present
if (filterLoraHash) {
params.append('lora_hash', filterLoraHash);
}
// Add multiple hashes filter if present
else if (filterLoraHashes) {
try {
if (Array.isArray(filterLoraHashes) && filterLoraHashes.length > 0) {
params.append('lora_hashes', filterLoraHashes.join(','));
}
} catch (error) {
console.error('Error parsing lora hashes from session storage:', error);
}
}
}
const response = await fetch(`${endpoint}?${params}`);
if (!response.ok) {
throw new Error(`Failed to fetch models: ${response.statusText}`);
}
const data = await response.json();
return {
items: data.items,
totalItems: data.total,
totalPages: data.total_pages,
currentPage: page,
hasMore: page < data.total_pages,
folders: data.folders
};
} catch (error) {
console.error(`Error fetching ${modelType}s:`, error);
showToast(`Failed to fetch ${modelType}s: ${error.message}`, 'error');
throw error;
}
}
/**
* Reset and reload models using virtual scrolling
* @param {Object} options - Operation options
* @returns {Promise<Object>} The fetch result
*/
export async function resetAndReloadWithVirtualScroll(options = {}) {
const {
modelType = 'lora',
updateFolders = false,
fetchPageFunction
} = options;
const pageState = getCurrentPageState();
try {
pageState.isLoading = true;
document.body.classList.add('loading');
// Reset page counter
pageState.currentPage = 1;
// Fetch the first page
const result = await fetchPageFunction(1, pageState.pageSize || 50);
// Update the virtual scroller
state.virtualScroller.refreshWithData(
result.items,
result.totalItems,
result.hasMore
);
// Update state
pageState.hasMore = result.hasMore;
pageState.currentPage = 2; // Next page will be 2
// Update folders if needed
if (updateFolders && result.folders) {
updateFolderTags(result.folders);
}
return result;
} catch (error) {
console.error(`Error reloading ${modelType}s:`, error);
showToast(`Failed to reload ${modelType}s: ${error.message}`, 'error');
throw error;
} finally {
pageState.isLoading = false;
document.body.classList.remove('loading');
}
}
/**
* Load more models using virtual scrolling
* @param {Object} options - Operation options
* @returns {Promise<Object>} The fetch result
*/
export async function loadMoreWithVirtualScroll(options = {}) {
const {
modelType = 'lora',
resetPage = false,
updateFolders = false,
fetchPageFunction
} = options;
const pageState = getCurrentPageState();
try {
// Start loading state
pageState.isLoading = true;
document.body.classList.add('loading');
// Reset to first page if requested
if (resetPage) {
pageState.currentPage = 1;
}
// Fetch the first page of data
const result = await fetchPageFunction(pageState.currentPage, pageState.pageSize || 50);
// Update virtual scroller with the new data
state.virtualScroller.refreshWithData(
result.items,
result.totalItems,
result.hasMore
);
// Update state
pageState.hasMore = result.hasMore;
pageState.currentPage = 2; // Next page to load would be 2
// Update folders if needed
if (updateFolders && result.folders) {
updateFolderTags(result.folders);
}
return result;
} catch (error) {
console.error(`Error loading ${modelType}s:`, error);
showToast(`Failed to load ${modelType}s: ${error.message}`, 'error');
throw error;
} finally {
pageState.isLoading = false;
document.body.classList.remove('loading');
}
}
// Update folder tags in the UI
export function updateFolderTags(folders) {
const folderTagsContainer = document.querySelector('.folder-tags');
@@ -203,13 +432,53 @@ export function replaceModelPreview(filePath, modelType = 'lora') {
}
// Delete a model (generic)
export function deleteModel(filePath, modelType = 'lora') {
if (modelType === 'checkpoint') {
confirmDelete('Are you sure you want to delete this checkpoint?', () => {
performDelete(filePath, modelType);
export async function deleteModel(filePath, modelType = 'lora') {
try {
state.loadingManager.showSimpleLoading(`Deleting ${modelType}...`);
const endpoint = modelType === 'checkpoint'
? '/api/checkpoints/delete'
: '/api/delete_model';
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath
})
});
} else {
showDeleteModal(filePath);
if (!response.ok) {
throw new Error(`Failed to delete ${modelType}: ${response.statusText}`);
}
const data = await response.json();
if (data.success) {
// If virtual scroller exists, update its data
if (state.virtualScroller) {
state.virtualScroller.removeItemByFilePath(filePath);
} else {
// Legacy approach: remove the card from UI directly
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (card) {
card.remove();
}
}
showToast(`${modelType} deleted successfully`, 'success');
return true;
} else {
throw new Error(data.error || `Failed to delete ${modelType}`);
}
} catch (error) {
console.error(`Error deleting ${modelType}:`, error);
showToast(`Failed to delete ${modelType}: ${error.message}`, 'error');
return false;
} finally {
state.loadingManager.hide();
}
}
@@ -234,26 +503,31 @@ export async function refreshModels(options = {}) {
const {
modelType = 'lora',
scanEndpoint = '/api/loras/scan',
resetAndReloadFunction
resetAndReloadFunction,
fullRebuild = false // New parameter with default value false
} = options;
try {
state.loadingManager.showSimpleLoading(`Refreshing ${modelType}s...`);
state.loadingManager.showSimpleLoading(`${fullRebuild ? 'Full rebuild' : 'Refreshing'} ${modelType}s...`);
const response = await fetch(scanEndpoint);
// Add fullRebuild parameter to the request
const url = new URL(scanEndpoint, window.location.origin);
url.searchParams.append('full_rebuild', fullRebuild);
const response = await fetch(url);
if (!response.ok) {
throw new Error(`Failed to refresh ${modelType}s: ${response.status} ${response.statusText}`);
}
if (typeof resetAndReloadFunction === 'function') {
await resetAndReloadFunction();
await resetAndReloadFunction(true); // update folders
}
showToast(`Refresh complete`, 'success');
showToast(`${fullRebuild ? 'Full rebuild' : 'Refresh'} complete`, 'success');
} catch (error) {
console.error(`Refresh failed:`, error);
showToast(`Failed to refresh ${modelType}s`, 'error');
showToast(`Failed to ${fullRebuild ? 'rebuild' : 'refresh'} ${modelType}s`, 'error');
} finally {
state.loadingManager.hide();
state.loadingManager.restoreProgressBar();
@@ -389,6 +663,57 @@ export async function refreshSingleModelMetadata(filePath, modelType = 'lora') {
}
}
// Generic function to exclude a model
export async function excludeModel(filePath, modelType = 'lora') {
try {
state.loadingManager.showSimpleLoading(`Excluding ${modelType}...`);
const endpoint = modelType === 'checkpoint'
? '/api/checkpoints/exclude'
: '/api/loras/exclude';
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath
})
});
if (!response.ok) {
throw new Error(`Failed to exclude ${modelType}: ${response.statusText}`);
}
const data = await response.json();
if (data.success) {
// If virtual scroller exists, update its data
if (state.virtualScroller) {
state.virtualScroller.removeItemByFilePath(filePath);
} else {
// Legacy approach: remove the card from UI directly
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (card) {
card.remove();
}
}
showToast(`${modelType} excluded successfully`, 'success');
return true;
} else {
throw new Error(data.error || `Failed to exclude ${modelType}`);
}
} catch (error) {
console.error(`Error excluding ${modelType}:`, error);
showToast(`Failed to exclude ${modelType}: ${error.message}`, 'error');
return false;
} finally {
state.loadingManager.hide();
}
}
// Private methods
// Upload a preview image

View File

@@ -1,40 +1,87 @@
import { createCheckpointCard } from '../components/CheckpointCard.js';
import {
loadMoreModels,
fetchModelsPage,
resetAndReload as baseResetAndReload,
resetAndReloadWithVirtualScroll,
loadMoreWithVirtualScroll,
refreshModels as baseRefreshModels,
deleteModel as baseDeleteModel,
replaceModelPreview,
fetchCivitaiMetadata,
refreshSingleModelMetadata
refreshSingleModelMetadata,
excludeModel as baseExcludeModel
} from './baseModelApi.js';
import { state } from '../state/index.js';
// Load more checkpoints with pagination
export async function loadMoreCheckpoints(resetPagination = true) {
return loadMoreModels({
resetPage: resetPagination,
updateFolders: true,
/**
* Fetch checkpoints with pagination for virtual scrolling
* @param {number} page - Page number to fetch
* @param {number} pageSize - Number of items per page
* @returns {Promise<Object>} Object containing items, total count, and pagination info
*/
export async function fetchCheckpointsPage(page = 1, pageSize = 100) {
return fetchModelsPage({
modelType: 'checkpoint',
createCardFunction: createCheckpointCard,
page,
pageSize,
endpoint: '/api/checkpoints'
});
}
/**
* Load more checkpoints with pagination - updated to work with VirtualScroller
* @param {boolean} resetPage - Whether to reset to the first page
* @param {boolean} updateFolders - Whether to update folder tags
* @returns {Promise<void>}
*/
export async function loadMoreCheckpoints(resetPage = false, updateFolders = false) {
// Check if virtual scroller is available
if (state.virtualScroller) {
return loadMoreWithVirtualScroll({
modelType: 'checkpoint',
resetPage,
updateFolders,
fetchPageFunction: fetchCheckpointsPage
});
} else {
// Fall back to the original implementation if virtual scroller isn't available
return loadMoreModels({
resetPage,
updateFolders,
modelType: 'checkpoint',
createCardFunction: createCheckpointCard,
endpoint: '/api/checkpoints'
});
}
}
// Reset and reload checkpoints
export async function resetAndReload() {
return baseResetAndReload({
updateFolders: true,
modelType: 'checkpoint',
loadMoreFunction: loadMoreCheckpoints
});
export async function resetAndReload(updateFolders = false) {
// Check if virtual scroller is available
if (state.virtualScroller) {
return resetAndReloadWithVirtualScroll({
modelType: 'checkpoint',
updateFolders,
fetchPageFunction: fetchCheckpointsPage
});
} else {
// Fall back to original implementation
return baseResetAndReload({
updateFolders,
modelType: 'checkpoint',
loadMoreFunction: loadMoreCheckpoints
});
}
}
// Refresh checkpoints
export async function refreshCheckpoints() {
export async function refreshCheckpoints(fullRebuild = false) {
return baseRefreshModels({
modelType: 'checkpoint',
scanEndpoint: '/api/checkpoints/scan',
resetAndReloadFunction: resetAndReload
resetAndReloadFunction: resetAndReload,
fullRebuild: fullRebuild
});
}
@@ -59,7 +106,11 @@ export async function fetchCivitai() {
// Refresh single checkpoint metadata
export async function refreshSingleCheckpointMetadata(filePath) {
return refreshSingleModelMetadata(filePath, 'checkpoint');
const success = await refreshSingleModelMetadata(filePath, 'checkpoint');
if (success) {
// Reload the current view to show updated data
await resetAndReload();
}
}
/**
@@ -69,20 +120,73 @@ export async function refreshSingleCheckpointMetadata(filePath) {
* @returns {Promise} - Promise that resolves with the server response
*/
export async function saveModelMetadata(filePath, data) {
const response = await fetch('/api/checkpoints/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
try {
// Show loading indicator
state.loadingManager.showSimpleLoading('Saving metadata...');
const response = await fetch('/api/checkpoints/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
if (!response.ok) {
throw new Error('Failed to save metadata');
if (!response.ok) {
throw new Error('Failed to save metadata');
}
return response.json();
} finally {
// Always hide the loading indicator when done
state.loadingManager.hide();
}
}
/**
* Exclude a checkpoint model from being shown in the UI
* @param {string} filePath - File path of the checkpoint to exclude
* @returns {Promise<boolean>} Promise resolving to success status
*/
export function excludeCheckpoint(filePath) {
return baseExcludeModel(filePath, 'checkpoint');
}
/**
* Rename a checkpoint file
* @param {string} filePath - Current file path
* @param {string} newFileName - New file name (without path)
* @returns {Promise<Object>} - Promise that resolves with the server response
*/
export async function renameCheckpointFile(filePath, newFileName) {
try {
// Show loading indicator
state.loadingManager.showSimpleLoading('Renaming checkpoint file...');
const response = await fetch('/api/rename_checkpoint', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
new_file_name: newFileName
})
});
if (!response.ok) {
throw new Error(`Server returned ${response.status}: ${response.statusText}`);
}
return await response.json();
} catch (error) {
console.error('Error renaming checkpoint file:', error);
throw error;
} finally {
// Hide loading indicator
state.loadingManager.hide();
}
return response.json();
}

View File

@@ -1,13 +1,18 @@
import { createLoraCard } from '../components/LoraCard.js';
import {
loadMoreModels,
fetchModelsPage,
resetAndReload as baseResetAndReload,
resetAndReloadWithVirtualScroll,
loadMoreWithVirtualScroll,
refreshModels as baseRefreshModels,
deleteModel as baseDeleteModel,
replaceModelPreview,
fetchCivitaiMetadata,
refreshSingleModelMetadata
refreshSingleModelMetadata,
excludeModel as baseExcludeModel
} from './baseModelApi.js';
import { state, getCurrentPageState } from '../state/index.js';
/**
* Save model metadata to the server
@@ -16,30 +21,81 @@ import {
* @returns {Promise} Promise of the save operation
*/
export async function saveModelMetadata(filePath, data) {
const response = await fetch('/api/loras/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
try {
// Show loading indicator
state.loadingManager.showSimpleLoading('Saving metadata...');
const response = await fetch('/api/loras/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
if (!response.ok) {
throw new Error('Failed to save metadata');
if (!response.ok) {
throw new Error('Failed to save metadata');
}
return response.json();
} finally {
// Always hide the loading indicator when done
state.loadingManager.hide();
}
return response.json();
}
/**
* Exclude a lora model from being shown in the UI
* @param {string} filePath - File path of the model to exclude
* @returns {Promise<boolean>} Promise resolving to success status
*/
export async function excludeLora(filePath) {
return baseExcludeModel(filePath, 'lora');
}
/**
* Load more loras with pagination - updated to work with VirtualScroller
* @param {boolean} resetPage - Whether to reset to the first page
* @param {boolean} updateFolders - Whether to update folder tags
* @returns {Promise<void>}
*/
export async function loadMoreLoras(resetPage = false, updateFolders = false) {
return loadMoreModels({
resetPage,
updateFolders,
const pageState = getCurrentPageState();
// Check if virtual scroller is available
if (state.virtualScroller) {
return loadMoreWithVirtualScroll({
modelType: 'lora',
resetPage,
updateFolders,
fetchPageFunction: fetchLorasPage
});
} else {
// Fall back to the original implementation if virtual scroller isn't available
return loadMoreModels({
resetPage,
updateFolders,
modelType: 'lora',
createCardFunction: createLoraCard,
endpoint: '/api/loras'
});
}
}
/**
* Fetch loras with pagination for virtual scrolling
* @param {number} page - Page number to fetch
* @param {number} pageSize - Number of items per page
* @returns {Promise<Object>} Object containing items, total count, and pagination info
*/
export async function fetchLorasPage(page = 1, pageSize = 100) {
return fetchModelsPage({
modelType: 'lora',
createCardFunction: createLoraCard,
page,
pageSize,
endpoint: '/api/loras'
});
}
@@ -61,28 +117,46 @@ export async function replacePreview(filePath) {
}
export function appendLoraCards(loras) {
const grid = document.getElementById('loraGrid');
const sentinel = document.getElementById('scroll-sentinel');
loras.forEach(lora => {
const card = createLoraCard(lora);
grid.appendChild(card);
});
// This function is no longer needed with virtual scrolling
// but kept for compatibility
if (state.virtualScroller) {
console.warn('appendLoraCards is deprecated when using virtual scrolling');
} else {
const grid = document.getElementById('loraGrid');
loras.forEach(lora => {
const card = createLoraCard(lora);
grid.appendChild(card);
});
}
}
export async function resetAndReload(updateFolders = false) {
return baseResetAndReload({
updateFolders,
modelType: 'lora',
loadMoreFunction: loadMoreLoras
});
const pageState = getCurrentPageState();
// Check if virtual scroller is available
if (state.virtualScroller) {
return resetAndReloadWithVirtualScroll({
modelType: 'lora',
updateFolders,
fetchPageFunction: fetchLorasPage
});
} else {
// Fall back to original implementation
return baseResetAndReload({
updateFolders,
modelType: 'lora',
loadMoreFunction: loadMoreLoras
});
}
}
export async function refreshLoras() {
export async function refreshLoras(fullRebuild = false) {
return baseRefreshModels({
modelType: 'lora',
scanEndpoint: '/api/loras/scan',
resetAndReloadFunction: resetAndReload
resetAndReloadFunction: resetAndReload,
fullRebuild: fullRebuild
});
}
@@ -107,4 +181,40 @@ export async function fetchModelDescription(modelId, filePath) {
console.error('Error fetching model description:', error);
throw error;
}
}
/**
* Rename a LoRA file
* @param {string} filePath - Current file path
* @param {string} newFileName - New file name (without path)
* @returns {Promise<Object>} - Promise that resolves with the server response
*/
export async function renameLoraFile(filePath, newFileName) {
try {
// Show loading indicator
state.loadingManager.showSimpleLoading('Renaming LoRA file...');
const response = await fetch('/api/rename_lora', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
new_file_name: newFileName
})
});
if (!response.ok) {
throw new Error(`Server returned ${response.status}: ${response.statusText}`);
}
return await response.json();
} catch (error) {
console.error('Error renaming LoRA file:', error);
throw error;
} finally {
// Hide loading indicator
state.loadingManager.hide();
}
}

174
static/js/api/recipeApi.js Normal file
View File

@@ -0,0 +1,174 @@
import { RecipeCard } from '../components/RecipeCard.js';
import {
fetchModelsPage,
resetAndReloadWithVirtualScroll,
loadMoreWithVirtualScroll
} from './baseModelApi.js';
import { state, getCurrentPageState } from '../state/index.js';
import { showToast } from '../utils/uiHelpers.js';
/**
* Fetch recipes with pagination for virtual scrolling
* @param {number} page - Page number to fetch
* @param {number} pageSize - Number of items per page
* @returns {Promise<Object>} Object containing items, total count, and pagination info
*/
export async function fetchRecipesPage(page = 1, pageSize = 100) {
const pageState = getCurrentPageState();
try {
const params = new URLSearchParams({
page: page,
page_size: pageSize || pageState.pageSize || 20,
sort_by: pageState.sortBy
});
// If we have a specific recipe ID to load
if (pageState.customFilter?.active && pageState.customFilter?.recipeId) {
// Special case: load specific recipe
const response = await fetch(`/api/recipe/${pageState.customFilter.recipeId}`);
if (!response.ok) {
throw new Error(`Failed to load recipe: ${response.statusText}`);
}
const recipe = await response.json();
// Return in expected format
return {
items: [recipe],
totalItems: 1,
totalPages: 1,
currentPage: 1,
hasMore: false
};
}
// Add custom filter for Lora if present
if (pageState.customFilter?.active && pageState.customFilter?.loraHash) {
params.append('lora_hash', pageState.customFilter.loraHash);
params.append('bypass_filters', 'true');
} else {
// Normal filtering logic
// Add search filter if present
if (pageState.filters?.search) {
params.append('search', pageState.filters.search);
// Add search option parameters
if (pageState.searchOptions) {
params.append('search_title', pageState.searchOptions.title.toString());
params.append('search_tags', pageState.searchOptions.tags.toString());
params.append('search_lora_name', pageState.searchOptions.loraName.toString());
params.append('search_lora_model', pageState.searchOptions.loraModel.toString());
params.append('fuzzy', 'true');
}
}
// Add base model filters
if (pageState.filters?.baseModel && pageState.filters.baseModel.length) {
params.append('base_models', pageState.filters.baseModel.join(','));
}
// Add tag filters
if (pageState.filters?.tags && pageState.filters.tags.length) {
params.append('tags', pageState.filters.tags.join(','));
}
}
// Fetch recipes
const response = await fetch(`/api/recipes?${params.toString()}`);
if (!response.ok) {
throw new Error(`Failed to load recipes: ${response.statusText}`);
}
const data = await response.json();
return {
items: data.items,
totalItems: data.total,
totalPages: data.total_pages,
currentPage: page,
hasMore: page < data.total_pages
};
} catch (error) {
console.error('Error fetching recipes:', error);
showToast(`Failed to fetch recipes: ${error.message}`, 'error');
throw error;
}
}
/**
* Reset and reload recipes using virtual scrolling
* @param {boolean} updateFolders - Whether to update folder tags
* @returns {Promise<Object>} The fetch result
*/
export async function resetAndReload(updateFolders = false) {
return resetAndReloadWithVirtualScroll({
modelType: 'recipe',
updateFolders,
fetchPageFunction: fetchRecipesPage
});
}
/**
* Refreshes the recipe list by first rebuilding the cache and then loading recipes
*/
export async function refreshRecipes() {
try {
state.loadingManager.showSimpleLoading('Refreshing recipes...');
// Call the API endpoint to rebuild the recipe cache
const response = await fetch('/api/recipes/scan');
if (!response.ok) {
const data = await response.json();
throw new Error(data.error || 'Failed to refresh recipe cache');
}
// After successful cache rebuild, reload the recipes
await resetAndReload();
showToast('Refresh complete', 'success');
} catch (error) {
console.error('Error refreshing recipes:', error);
showToast(error.message || 'Failed to refresh recipes', 'error');
} finally {
state.loadingManager.hide();
state.loadingManager.restoreProgressBar();
}
}
/**
* Load more recipes with pagination - updated to work with VirtualScroller
* @param {boolean} resetPage - Whether to reset to the first page
* @returns {Promise<void>}
*/
export async function loadMoreRecipes(resetPage = false) {
const pageState = getCurrentPageState();
// Use virtual scroller if available
if (state.virtualScroller) {
return loadMoreWithVirtualScroll({
modelType: 'recipe',
resetPage,
updateFolders: false,
fetchPageFunction: fetchRecipesPage
});
}
}
/**
* Create a recipe card instance from recipe data
* @param {Object} recipe - Recipe data
* @returns {HTMLElement} Recipe card DOM element
*/
export function createRecipeCard(recipe) {
const recipeCard = new RecipeCard(recipe, (recipe) => {
if (window.recipeManager) {
window.recipeManager.showRecipeDetails(recipe);
}
});
return recipeCard.element;
}

View File

@@ -1,10 +1,10 @@
import { appCore } from './core.js';
import { initializeInfiniteScroll } from './utils/infiniteScroll.js';
import { confirmDelete, closeDeleteModal } from './utils/modalUtils.js';
import { confirmDelete, closeDeleteModal, confirmExclude, closeExcludeModal } from './utils/modalUtils.js';
import { createPageControls } from './components/controls/index.js';
import { loadMoreCheckpoints } from './api/checkpointApi.js';
import { CheckpointDownloadManager } from './managers/CheckpointDownloadManager.js';
import { CheckpointContextMenu } from './components/ContextMenu/index.js';
import { ModelDuplicatesManager } from './components/ModelDuplicatesManager.js';
// Initialize the Checkpoints page
class CheckpointsPageManager {
@@ -15,6 +15,9 @@ class CheckpointsPageManager {
// Initialize checkpoint download manager
window.checkpointDownloadManager = new CheckpointDownloadManager();
// Initialize the ModelDuplicatesManager
this.duplicatesManager = new ModelDuplicatesManager(this, 'checkpoints');
// Expose only necessary functions to global scope
this._exposeRequiredGlobalFunctions();
}
@@ -23,11 +26,16 @@ class CheckpointsPageManager {
// Minimal set of functions that need to remain global
window.confirmDelete = confirmDelete;
window.closeDeleteModal = closeDeleteModal;
window.confirmExclude = confirmExclude;
window.closeExcludeModal = closeExcludeModal;
// Add loadCheckpoints function to window for FilterManager compatibility
window.checkpointManager = {
loadCheckpoints: (reset) => loadMoreCheckpoints(reset)
};
// Expose duplicates manager
window.modelDuplicatesManager = this.duplicatesManager;
}
async initialize() {
@@ -38,9 +46,6 @@ class CheckpointsPageManager {
// Initialize context menu
new CheckpointContextMenu();
// Initialize infinite scroll
initializeInfiniteScroll('checkpoints');
// Initialize common page features
appCore.initializePageFeatures();

View File

@@ -1,8 +1,9 @@
import { showToast, copyToClipboard } from '../utils/uiHelpers.js';
import { showToast, copyToClipboard, openExampleImagesFolder } from '../utils/uiHelpers.js';
import { state } from '../state/index.js';
import { showCheckpointModal } from './checkpointModal/index.js';
import { NSFW_LEVELS } from '../utils/constants.js';
import { replaceCheckpointPreview as apiReplaceCheckpointPreview, saveModelMetadata } from '../api/checkpointApi.js';
import { showDeleteModal } from '../utils/modalUtils.js';
export function createCheckpointCard(checkpoint) {
const card = document.createElement('div');
@@ -114,8 +115,8 @@ export function createCheckpointCard(checkpoint) {
<span class="model-name">${checkpoint.model_name}</span>
</div>
<div class="card-actions">
<i class="fas fa-image"
title="Replace Preview Image">
<i class="fas fa-folder-open"
title="Open Example Images Folder">
</i>
</div>
</div>
@@ -262,7 +263,7 @@ export function createCheckpointCard(checkpoint) {
// Delete button click event
card.querySelector('.fa-trash')?.addEventListener('click', e => {
e.stopPropagation();
deleteCheckpoint(checkpoint.file_path);
showDeleteModal(checkpoint.file_path);
});
// Replace preview button click event
@@ -271,6 +272,12 @@ export function createCheckpointCard(checkpoint) {
replaceCheckpointPreview(checkpoint.file_path);
});
// Open example images folder button click event
card.querySelector('.fa-folder-open')?.addEventListener('click', e => {
e.stopPropagation();
openExampleImagesFolder(checkpoint.sha256);
});
// Add autoplayOnHover handlers for video elements if needed
const videoElement = card.querySelector('video');
if (videoElement && autoplayOnHover) {
@@ -322,17 +329,6 @@ function openCivitai(modelName) {
}
}
function deleteCheckpoint(filePath) {
if (window.deleteCheckpoint) {
window.deleteCheckpoint(filePath);
} else {
// Use the modal delete functionality
import('../utils/modalUtils.js').then(({ showDeleteModal }) => {
showDeleteModal(filePath, 'checkpoint');
});
}
}
function replaceCheckpointPreview(filePath) {
if (window.replaceCheckpointPreview) {
window.replaceCheckpointPreview(filePath);

View File

@@ -1,372 +0,0 @@
import { refreshSingleLoraMetadata } from '../api/loraApi.js';
import { showToast, getNSFWLevelName } from '../utils/uiHelpers.js';
import { NSFW_LEVELS } from '../utils/constants.js';
import { getStorageItem } from '../utils/storageHelpers.js';
export class LoraContextMenu {
constructor() {
this.menu = document.getElementById('loraContextMenu');
this.currentCard = null;
this.nsfwSelector = document.getElementById('nsfwLevelSelector');
this.init();
}
init() {
document.addEventListener('click', () => this.hideMenu());
document.addEventListener('contextmenu', (e) => {
const card = e.target.closest('.lora-card');
if (!card) {
this.hideMenu();
return;
}
e.preventDefault();
this.showMenu(e.clientX, e.clientY, card);
});
this.menu.addEventListener('click', (e) => {
const menuItem = e.target.closest('.context-menu-item');
if (!menuItem || !this.currentCard) return;
const action = menuItem.dataset.action;
if (!action) return;
switch(action) {
case 'detail':
// Trigger the main card click which shows the modal
this.currentCard.click();
break;
case 'civitai':
// Only trigger if the card is from civitai
if (this.currentCard.dataset.from_civitai === 'true') {
if (this.currentCard.dataset.meta === '{}') {
showToast('Please fetch metadata from CivitAI first', 'info');
} else {
this.currentCard.querySelector('.fa-globe')?.click();
}
} else {
showToast('No CivitAI information available', 'info');
}
break;
case 'copyname':
this.currentCard.querySelector('.fa-copy')?.click();
break;
case 'preview':
this.currentCard.querySelector('.fa-image')?.click();
break;
case 'delete':
this.currentCard.querySelector('.fa-trash')?.click();
break;
case 'move':
moveManager.showMoveModal(this.currentCard.dataset.filepath);
break;
case 'refresh-metadata':
refreshSingleLoraMetadata(this.currentCard.dataset.filepath);
break;
case 'set-nsfw':
this.showNSFWLevelSelector(null, null, this.currentCard);
break;
}
this.hideMenu();
});
// Initialize NSFW Level Selector events
this.initNSFWSelector();
}
initNSFWSelector() {
// Close button
const closeBtn = this.nsfwSelector.querySelector('.close-nsfw-selector');
closeBtn.addEventListener('click', () => {
this.nsfwSelector.style.display = 'none';
});
// Level buttons
const levelButtons = this.nsfwSelector.querySelectorAll('.nsfw-level-btn');
levelButtons.forEach(btn => {
btn.addEventListener('click', async () => {
const level = parseInt(btn.dataset.level);
const filePath = this.nsfwSelector.dataset.cardPath;
if (!filePath) return;
try {
await this.saveModelMetadata(filePath, { preview_nsfw_level: level });
// Update card data
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (card) {
let metaData = {};
try {
metaData = JSON.parse(card.dataset.meta || '{}');
} catch (err) {
console.error('Error parsing metadata:', err);
}
metaData.preview_nsfw_level = level;
card.dataset.meta = JSON.stringify(metaData);
card.dataset.nsfwLevel = level.toString();
// Apply blur effect immediately
this.updateCardBlurEffect(card, level);
}
showToast(`Content rating set to ${getNSFWLevelName(level)}`, 'success');
this.nsfwSelector.style.display = 'none';
} catch (error) {
showToast(`Failed to set content rating: ${error.message}`, 'error');
}
});
});
// Close when clicking outside
document.addEventListener('click', (e) => {
if (this.nsfwSelector.style.display === 'block' &&
!this.nsfwSelector.contains(e.target) &&
!e.target.closest('.context-menu-item[data-action="set-nsfw"]')) {
this.nsfwSelector.style.display = 'none';
}
});
}
async saveModelMetadata(filePath, data) {
const response = await fetch('/api/loras/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
if (!response.ok) {
throw new Error('Failed to save metadata');
}
return await response.json();
}
updateCardBlurEffect(card, level) {
// Get user settings for blur threshold
const blurThreshold = parseInt(getStorageItem('nsfwBlurLevel') || '4');
// Get card preview container
const previewContainer = card.querySelector('.card-preview');
if (!previewContainer) return;
// Get preview media element
const previewMedia = previewContainer.querySelector('img') || previewContainer.querySelector('video');
if (!previewMedia) return;
// Check if blur should be applied
if (level >= blurThreshold) {
// Add blur class to the preview container
previewContainer.classList.add('blurred');
// Get or create the NSFW overlay
let nsfwOverlay = previewContainer.querySelector('.nsfw-overlay');
if (!nsfwOverlay) {
// Create new overlay
nsfwOverlay = document.createElement('div');
nsfwOverlay.className = 'nsfw-overlay';
// Create and configure the warning content
const warningContent = document.createElement('div');
warningContent.className = 'nsfw-warning';
// Determine NSFW warning text based on level
let nsfwText = "Mature Content";
if (level >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (level >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (level >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
// Add warning text and show button
warningContent.innerHTML = `
<p>${nsfwText}</p>
<button class="show-content-btn">Show</button>
`;
// Add click event to the show button
const showBtn = warningContent.querySelector('.show-content-btn');
showBtn.addEventListener('click', (e) => {
e.stopPropagation();
previewContainer.classList.remove('blurred');
nsfwOverlay.style.display = 'none';
// Update toggle button icon if it exists
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
});
nsfwOverlay.appendChild(warningContent);
previewContainer.appendChild(nsfwOverlay);
} else {
// Update existing overlay
const warningText = nsfwOverlay.querySelector('p');
if (warningText) {
let nsfwText = "Mature Content";
if (level >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (level >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (level >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
warningText.textContent = nsfwText;
}
nsfwOverlay.style.display = 'flex';
}
// Get or create the toggle button in the header
const cardHeader = previewContainer.querySelector('.card-header');
if (cardHeader) {
let toggleBtn = cardHeader.querySelector('.toggle-blur-btn');
if (!toggleBtn) {
toggleBtn = document.createElement('button');
toggleBtn.className = 'toggle-blur-btn';
toggleBtn.title = 'Toggle blur';
toggleBtn.innerHTML = '<i class="fas fa-eye"></i>';
// Add click event to toggle button
toggleBtn.addEventListener('click', (e) => {
e.stopPropagation();
const isBlurred = previewContainer.classList.toggle('blurred');
const icon = toggleBtn.querySelector('i');
// Update icon and overlay visibility
if (isBlurred) {
icon.className = 'fas fa-eye';
nsfwOverlay.style.display = 'flex';
} else {
icon.className = 'fas fa-eye-slash';
nsfwOverlay.style.display = 'none';
}
});
// Add to the beginning of header
cardHeader.insertBefore(toggleBtn, cardHeader.firstChild);
// Update base model label class
const baseModelLabel = cardHeader.querySelector('.base-model-label');
if (baseModelLabel && !baseModelLabel.classList.contains('with-toggle')) {
baseModelLabel.classList.add('with-toggle');
}
} else {
// Update existing toggle button
toggleBtn.querySelector('i').className = 'fas fa-eye';
}
}
} else {
// Remove blur
previewContainer.classList.remove('blurred');
// Hide overlay if it exists
const overlay = previewContainer.querySelector('.nsfw-overlay');
if (overlay) overlay.style.display = 'none';
// Update or remove toggle button
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
// We'll leave the button but update the icon
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
}
}
showNSFWLevelSelector(x, y, card) {
const selector = document.getElementById('nsfwLevelSelector');
const currentLevelEl = document.getElementById('currentNSFWLevel');
// Get current NSFW level
let currentLevel = 0;
try {
const metaData = JSON.parse(card.dataset.meta || '{}');
currentLevel = metaData.preview_nsfw_level || 0;
// Update if we have no recorded level but have a dataset attribute
if (!currentLevel && card.dataset.nsfwLevel) {
currentLevel = parseInt(card.dataset.nsfwLevel) || 0;
}
} catch (err) {
console.error('Error parsing metadata:', err);
}
currentLevelEl.textContent = getNSFWLevelName(currentLevel);
// Position the selector
if (x && y) {
const viewportWidth = document.documentElement.clientWidth;
const viewportHeight = document.documentElement.clientHeight;
const selectorRect = selector.getBoundingClientRect();
// Center the selector if no coordinates provided
let finalX = (viewportWidth - selectorRect.width) / 2;
let finalY = (viewportHeight - selectorRect.height) / 2;
selector.style.left = `${finalX}px`;
selector.style.top = `${finalY}px`;
}
// Highlight current level button
document.querySelectorAll('.nsfw-level-btn').forEach(btn => {
if (parseInt(btn.dataset.level) === currentLevel) {
btn.classList.add('active');
} else {
btn.classList.remove('active');
}
});
// Store reference to current card
selector.dataset.cardPath = card.dataset.filepath;
// Show selector
selector.style.display = 'block';
}
showMenu(x, y, card) {
this.currentCard = card;
this.menu.style.display = 'block';
// 获取菜单尺寸
const menuRect = this.menu.getBoundingClientRect();
// 获取视口尺寸
const viewportWidth = document.documentElement.clientWidth;
const viewportHeight = document.documentElement.clientHeight;
// 计算最终位置 - 使用 clientX/Y不需要考虑滚动偏移
let finalX = x;
let finalY = y;
// 确保菜单不会超出右侧边界
if (x + menuRect.width > viewportWidth) {
finalX = x - menuRect.width;
}
// 确保菜单不会超出底部边界
if (y + menuRect.height > viewportHeight) {
finalY = y - menuRect.height;
}
// 直接设置位置,因为 position: fixed 是相对于视口定位的
this.menu.style.left = `${finalX}px`;
this.menu.style.top = `${finalY}px`;
}
hideMenu() {
this.menu.style.display = 'none';
this.currentCard = null;
}
}
// For backward compatibility, re-export the LoraContextMenu class
// export { LoraContextMenu } from './ContextMenu/LoraContextMenu.js';

View File

@@ -1,8 +1,12 @@
import { BaseContextMenu } from './BaseContextMenu.js';
import { refreshSingleCheckpointMetadata, saveModelMetadata } from '../../api/checkpointApi.js';
import { showToast, getNSFWLevelName } from '../../utils/uiHelpers.js';
import { refreshSingleCheckpointMetadata, saveModelMetadata, replaceCheckpointPreview } from '../../api/checkpointApi.js';
import { showToast, getNSFWLevelName, openExampleImagesFolder } from '../../utils/uiHelpers.js';
import { NSFW_LEVELS } from '../../utils/constants.js';
import { getStorageItem } from '../../utils/storageHelpers.js';
import { showExcludeModal } from '../../utils/modalUtils.js';
import { modalManager } from '../../managers/ModalManager.js';
import { state } from '../../state/index.js';
import { resetAndReload } from '../../api/checkpointApi.js';
export class CheckpointContextMenu extends BaseContextMenu {
constructor() {
@@ -22,10 +26,12 @@ export class CheckpointContextMenu extends BaseContextMenu {
this.currentCard.click();
break;
case 'preview':
// Replace checkpoint preview
if (this.currentCard.querySelector('.fa-image')) {
this.currentCard.querySelector('.fa-image').click();
}
// Open example images folder instead of replacing preview
openExampleImagesFolder(this.currentCard.dataset.sha256);
break;
case 'replace-preview':
// Add new action for replacing preview images
replaceCheckpointPreview(this.currentCard.dataset.filepath);
break;
case 'civitai':
// Open civitai page
@@ -53,6 +59,10 @@ export class CheckpointContextMenu extends BaseContextMenu {
// Refresh metadata from CivitAI
refreshSingleCheckpointMetadata(this.currentCard.dataset.filepath);
break;
case 'relink-civitai':
// Handle re-link to Civitai
this.showRelinkCivitaiModal();
break;
case 'set-nsfw':
// Set NSFW level
this.showNSFWLevelSelector(null, null, this.currentCard);
@@ -61,6 +71,10 @@ export class CheckpointContextMenu extends BaseContextMenu {
// Move to folder (placeholder)
showToast('Move to folder feature coming soon', 'info');
break;
case 'exclude':
showExcludeModal(this.currentCard.dataset.filepath, 'checkpoint');
break;
}
}
@@ -312,4 +326,87 @@ export class CheckpointContextMenu extends BaseContextMenu {
// Show selector
selector.style.display = 'block';
}
showRelinkCivitaiModal() {
const filePath = this.currentCard.dataset.filepath;
if (!filePath) return;
// Set up confirm button handler
const confirmBtn = document.getElementById('confirmRelinkBtn');
const urlInput = document.getElementById('civitaiModelUrl');
const errorDiv = document.getElementById('civitaiModelUrlError');
// Remove previous event listener if exists
if (this._boundRelinkHandler) {
confirmBtn.removeEventListener('click', this._boundRelinkHandler);
}
// Create new bound handler
this._boundRelinkHandler = async () => {
const url = urlInput.value.trim();
const modelVersionId = this.extractModelVersionId(url);
if (!modelVersionId) {
errorDiv.textContent = 'Invalid URL format. Must include modelVersionId parameter.';
return;
}
errorDiv.textContent = '';
modalManager.closeModal('relinkCivitaiModal');
try {
state.loadingManager.showSimpleLoading('Re-linking to Civitai...');
const response = await fetch('/api/checkpoints/relink-civitai', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
file_path: filePath,
model_version_id: modelVersionId
})
});
if (!response.ok) {
throw new Error(`Failed to re-link model: ${response.statusText}`);
}
const data = await response.json();
if (data.success) {
showToast('Model successfully re-linked to Civitai', 'success');
// Reload the current view to show updated data
await resetAndReload();
} else {
throw new Error(data.error || 'Failed to re-link model');
}
} catch (error) {
console.error('Error re-linking model:', error);
showToast(`Error: ${error.message}`, 'error');
} finally {
state.loadingManager.hide();
}
};
// Set new event listener
confirmBtn.addEventListener('click', this._boundRelinkHandler);
// Clear previous input
urlInput.value = '';
errorDiv.textContent = '';
// Show modal
modalManager.showModal('relinkCivitaiModal');
}
extractModelVersionId(url) {
try {
const parsedUrl = new URL(url);
const modelVersionId = parsedUrl.searchParams.get('modelVersionId');
return modelVersionId;
} catch (e) {
return null;
}
}
}

View File

@@ -1,8 +1,11 @@
import { BaseContextMenu } from './BaseContextMenu.js';
import { refreshSingleLoraMetadata, saveModelMetadata } from '../../api/loraApi.js';
import { showToast, getNSFWLevelName } from '../../utils/uiHelpers.js';
import { refreshSingleLoraMetadata, saveModelMetadata, replacePreview, resetAndReload } from '../../api/loraApi.js';
import { showToast, getNSFWLevelName, copyToClipboard, sendLoraToWorkflow, openExampleImagesFolder } from '../../utils/uiHelpers.js';
import { NSFW_LEVELS } from '../../utils/constants.js';
import { getStorageItem } from '../../utils/storageHelpers.js';
import { showExcludeModal, showDeleteModal } from '../../utils/modalUtils.js';
import { modalManager } from '../../managers/ModalManager.js';
import { state } from '../../state/index.js';
export class LoraContextMenu extends BaseContextMenu {
constructor() {
@@ -34,13 +37,28 @@ export class LoraContextMenu extends BaseContextMenu {
}
break;
case 'copyname':
this.currentCard.querySelector('.fa-copy')?.click();
// Generate and copy LoRA syntax
this.copyLoraSyntax();
break;
case 'sendappend':
// Send LoRA to workflow (append mode)
this.sendLoraToWorkflow(false);
break;
case 'sendreplace':
// Send LoRA to workflow (replace mode)
this.sendLoraToWorkflow(true);
break;
case 'preview':
this.currentCard.querySelector('.fa-image')?.click();
// Open example images folder instead of showing preview image dialog
openExampleImagesFolder(this.currentCard.dataset.sha256);
break;
case 'replace-preview':
// Add a new action for replacing preview images
replacePreview(this.currentCard.dataset.filepath);
break;
case 'delete':
this.currentCard.querySelector('.fa-trash')?.click();
// Call showDeleteModal directly instead of clicking the trash button
showDeleteModal(this.currentCard.dataset.filepath);
break;
case 'move':
moveManager.showMoveModal(this.currentCard.dataset.filepath);
@@ -48,9 +66,119 @@ export class LoraContextMenu extends BaseContextMenu {
case 'refresh-metadata':
refreshSingleLoraMetadata(this.currentCard.dataset.filepath);
break;
case 'relink-civitai':
this.showRelinkCivitaiModal();
break;
case 'set-nsfw':
this.showNSFWLevelSelector(null, null, this.currentCard);
break;
case 'exclude':
showExcludeModal(this.currentCard.dataset.filepath);
break;
}
}
// New method to handle copy syntax functionality
copyLoraSyntax() {
const card = this.currentCard;
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
const strength = usageTips.strength || 1;
const loraSyntax = `<lora:${card.dataset.file_name}:${strength}>`;
copyToClipboard(loraSyntax, 'LoRA syntax copied to clipboard');
}
// New method to handle send to workflow functionality
sendLoraToWorkflow(replaceMode) {
const card = this.currentCard;
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
const strength = usageTips.strength || 1;
const loraSyntax = `<lora:${card.dataset.file_name}:${strength}>`;
sendLoraToWorkflow(loraSyntax, replaceMode, 'lora');
}
// New method to handle re-link to Civitai
showRelinkCivitaiModal() {
const filePath = this.currentCard.dataset.filepath;
if (!filePath) return;
// Set up confirm button handler
const confirmBtn = document.getElementById('confirmRelinkBtn');
const urlInput = document.getElementById('civitaiModelUrl');
const errorDiv = document.getElementById('civitaiModelUrlError');
// Remove previous event listener if exists
if (this._boundRelinkHandler) {
confirmBtn.removeEventListener('click', this._boundRelinkHandler);
}
// Create new bound handler
this._boundRelinkHandler = async () => {
const url = urlInput.value.trim();
const modelVersionId = this.extractModelVersionId(url);
if (!modelVersionId) {
errorDiv.textContent = 'Invalid URL format. Must include modelVersionId parameter.';
return;
}
errorDiv.textContent = '';
modalManager.closeModal('relinkCivitaiModal');
try {
state.loadingManager.showSimpleLoading('Re-linking to Civitai...');
const response = await fetch('/api/relink-civitai', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
file_path: filePath,
model_version_id: modelVersionId
})
});
if (!response.ok) {
throw new Error(`Failed to re-link model: ${response.statusText}`);
}
const data = await response.json();
if (data.success) {
showToast('Model successfully re-linked to Civitai', 'success');
// Reload the current view to show updated data
await resetAndReload();
} else {
throw new Error(data.error || 'Failed to re-link model');
}
} catch (error) {
console.error('Error re-linking model:', error);
showToast(`Error: ${error.message}`, 'error');
} finally {
state.loadingManager.hide();
}
};
// Set new event listener
confirmBtn.addEventListener('click', this._boundRelinkHandler);
// Clear previous input
urlInput.value = '';
errorDiv.textContent = '';
// Show modal
modalManager.showModal('relinkCivitaiModal');
}
extractModelVersionId(url) {
try {
const parsedUrl = new URL(url);
const modelVersionId = parsedUrl.searchParams.get('modelVersionId');
return modelVersionId;
} catch (e) {
return null;
}
}

View File

@@ -1,5 +1,5 @@
import { BaseContextMenu } from './BaseContextMenu.js';
import { showToast } from '../../utils/uiHelpers.js';
import { showToast, copyToClipboard, sendLoraToWorkflow } from '../../utils/uiHelpers.js';
import { setSessionItem, removeSessionItem } from '../../utils/storageHelpers.js';
import { state } from '../../state/index.js';
@@ -39,8 +39,16 @@ export class RecipeContextMenu extends BaseContextMenu {
this.currentCard.click();
break;
case 'copy':
// Copy recipe to clipboard
this.currentCard.querySelector('.fa-copy')?.click();
// Copy recipe syntax to clipboard
this.copyRecipeSyntax();
break;
case 'sendappend':
// Send recipe to workflow (append mode)
this.sendRecipeToWorkflow(false);
break;
case 'sendreplace':
// Send recipe to workflow (replace mode)
this.sendRecipeToWorkflow(true);
break;
case 'share':
// Share recipe
@@ -61,6 +69,52 @@ export class RecipeContextMenu extends BaseContextMenu {
}
}
// New method to copy recipe syntax to clipboard
copyRecipeSyntax() {
const recipeId = this.currentCard.dataset.id;
if (!recipeId) {
showToast('Cannot copy recipe: Missing recipe ID', 'error');
return;
}
fetch(`/api/recipe/${recipeId}/syntax`)
.then(response => response.json())
.then(data => {
if (data.success && data.syntax) {
copyToClipboard(data.syntax, 'Recipe syntax copied to clipboard');
} else {
throw new Error(data.error || 'No syntax returned');
}
})
.catch(err => {
console.error('Failed to copy recipe syntax: ', err);
showToast('Failed to copy recipe syntax', 'error');
});
}
// New method to send recipe to workflow
sendRecipeToWorkflow(replaceMode) {
const recipeId = this.currentCard.dataset.id;
if (!recipeId) {
showToast('Cannot send recipe: Missing recipe ID', 'error');
return;
}
fetch(`/api/recipe/${recipeId}/syntax`)
.then(response => response.json())
.then(data => {
if (data.success && data.syntax) {
return sendLoraToWorkflow(data.syntax, replaceMode, 'recipe');
} else {
throw new Error(data.error || 'No syntax returned');
}
})
.catch(err => {
console.error('Failed to send recipe to workflow: ', err);
showToast('Failed to send recipe to workflow', 'error');
});
}
// View all LoRAs in the recipe
viewRecipeLoRAs(recipeId) {
if (!recipeId) {

View File

@@ -0,0 +1,386 @@
// Duplicates Manager Component
import { showToast } from '../utils/uiHelpers.js';
import { RecipeCard } from './RecipeCard.js';
import { state, getCurrentPageState } from '../state/index.js';
export class DuplicatesManager {
constructor(recipeManager) {
this.recipeManager = recipeManager;
this.duplicateGroups = [];
this.inDuplicateMode = false;
this.selectedForDeletion = new Set();
}
async findDuplicates() {
try {
const response = await fetch('/api/recipes/find-duplicates');
if (!response.ok) {
throw new Error('Failed to find duplicates');
}
const data = await response.json();
if (!data.success) {
throw new Error(data.error || 'Unknown error finding duplicates');
}
this.duplicateGroups = data.duplicate_groups || [];
if (this.duplicateGroups.length === 0) {
showToast('No duplicate recipes found', 'info');
return false;
}
this.enterDuplicateMode();
return true;
} catch (error) {
console.error('Error finding duplicates:', error);
showToast('Failed to find duplicates: ' + error.message, 'error');
return false;
}
}
enterDuplicateMode() {
this.inDuplicateMode = true;
this.selectedForDeletion.clear();
// Update state
const pageState = getCurrentPageState();
pageState.duplicatesMode = true;
// Show duplicates banner
const banner = document.getElementById('duplicatesBanner');
const countSpan = document.getElementById('duplicatesCount');
if (banner && countSpan) {
countSpan.textContent = `Found ${this.duplicateGroups.length} duplicate group${this.duplicateGroups.length !== 1 ? 's' : ''}`;
banner.style.display = 'block';
}
// Disable virtual scrolling if active
if (state.virtualScroller) {
state.virtualScroller.disable();
}
// Add duplicate-mode class to the body
document.body.classList.add('duplicate-mode');
// Render duplicate groups
this.renderDuplicateGroups();
// Update selected count
this.updateSelectedCount();
}
exitDuplicateMode() {
this.inDuplicateMode = false;
this.selectedForDeletion.clear();
// Update state
const pageState = getCurrentPageState();
pageState.duplicatesMode = false;
// Hide duplicates banner
const banner = document.getElementById('duplicatesBanner');
if (banner) {
banner.style.display = 'none';
}
// Remove duplicate-mode class from the body
document.body.classList.remove('duplicate-mode');
// Clear the recipe grid first
const recipeGrid = document.getElementById('recipeGrid');
if (recipeGrid) {
recipeGrid.innerHTML = '';
}
// Re-enable virtual scrolling
state.virtualScroller.enable();
}
renderDuplicateGroups() {
const recipeGrid = document.getElementById('recipeGrid');
if (!recipeGrid) return;
// Clear existing content
recipeGrid.innerHTML = '';
// Render each duplicate group
this.duplicateGroups.forEach((group, groupIndex) => {
const groupDiv = document.createElement('div');
groupDiv.className = 'duplicate-group';
groupDiv.dataset.fingerprint = group.fingerprint;
// Create group header
const header = document.createElement('div');
header.className = 'duplicate-group-header';
header.innerHTML = `
<span>Duplicate Group #${groupIndex + 1} (${group.recipes.length} recipes)</span>
<span>
<button class="btn-select-all" onclick="recipeManager.duplicatesManager.toggleSelectAllInGroup('${group.fingerprint}')">
Select All
</button>
<button class="btn-select-latest" onclick="recipeManager.duplicatesManager.selectLatestInGroup('${group.fingerprint}')">
Keep Latest
</button>
</span>
`;
groupDiv.appendChild(header);
// Create cards container
const cardsDiv = document.createElement('div');
cardsDiv.className = 'card-group-container';
// Add scrollable class if there are many recipes in the group
if (group.recipes.length > 6) {
cardsDiv.classList.add('scrollable');
// Add expand/collapse toggle button
const toggleBtn = document.createElement('button');
toggleBtn.className = 'group-toggle-btn';
toggleBtn.innerHTML = '<i class="fas fa-chevron-down"></i>';
toggleBtn.title = "Expand/Collapse";
toggleBtn.onclick = function() {
cardsDiv.classList.toggle('scrollable');
this.innerHTML = cardsDiv.classList.contains('scrollable') ?
'<i class="fas fa-chevron-down"></i>' :
'<i class="fas fa-chevron-up"></i>';
};
groupDiv.appendChild(toggleBtn);
}
// Sort recipes by date (newest first)
const sortedRecipes = [...group.recipes].sort((a, b) => b.modified - a.modified);
// Add all recipe cards in this group
sortedRecipes.forEach((recipe, index) => {
// Create recipe card
const recipeCard = new RecipeCard(recipe, (recipe) => {
this.recipeManager.showRecipeDetails(recipe);
});
const card = recipeCard.element;
// Add duplicate class
card.classList.add('duplicate');
// Mark the latest one
if (index === 0) {
card.classList.add('latest');
}
// Add selection checkbox
const checkbox = document.createElement('input');
checkbox.type = 'checkbox';
checkbox.className = 'selector-checkbox';
checkbox.dataset.recipeId = recipe.id;
checkbox.dataset.groupFingerprint = group.fingerprint;
// Check if already selected
if (this.selectedForDeletion.has(recipe.id)) {
checkbox.checked = true;
card.classList.add('duplicate-selected');
}
// Add change event to checkbox
checkbox.addEventListener('change', (e) => {
e.stopPropagation();
this.toggleCardSelection(recipe.id, card, checkbox);
});
// Make the entire card clickable for selection
card.addEventListener('click', (e) => {
// Don't toggle if clicking on the checkbox directly or card actions
if (e.target === checkbox || e.target.closest('.card-actions')) {
return;
}
// Toggle checkbox state
checkbox.checked = !checkbox.checked;
this.toggleCardSelection(recipe.id, card, checkbox);
});
card.appendChild(checkbox);
cardsDiv.appendChild(card);
});
groupDiv.appendChild(cardsDiv);
recipeGrid.appendChild(groupDiv);
});
}
// Helper method to toggle card selection state
toggleCardSelection(recipeId, card, checkbox) {
if (checkbox.checked) {
this.selectedForDeletion.add(recipeId);
card.classList.add('duplicate-selected');
} else {
this.selectedForDeletion.delete(recipeId);
card.classList.remove('duplicate-selected');
}
this.updateSelectedCount();
}
updateSelectedCount() {
const selectedCountEl = document.getElementById('duplicatesSelectedCount');
if (selectedCountEl) {
selectedCountEl.textContent = this.selectedForDeletion.size;
}
// Update delete button state
const deleteBtn = document.querySelector('.btn-delete-selected');
if (deleteBtn) {
deleteBtn.disabled = this.selectedForDeletion.size === 0;
deleteBtn.classList.toggle('disabled', this.selectedForDeletion.size === 0);
}
}
toggleSelectAllInGroup(fingerprint) {
const checkboxes = document.querySelectorAll(`.selector-checkbox[data-group-fingerprint="${fingerprint}"]`);
const allSelected = Array.from(checkboxes).every(checkbox => checkbox.checked);
// If all are selected, deselect all; otherwise select all
checkboxes.forEach(checkbox => {
checkbox.checked = !allSelected;
const recipeId = checkbox.dataset.recipeId;
const card = checkbox.closest('.lora-card');
if (!allSelected) {
this.selectedForDeletion.add(recipeId);
card.classList.add('duplicate-selected');
} else {
this.selectedForDeletion.delete(recipeId);
card.classList.remove('duplicate-selected');
}
});
// Update the button text
const button = document.querySelector(`.duplicate-group[data-fingerprint="${fingerprint}"] .btn-select-all`);
if (button) {
button.textContent = !allSelected ? "Deselect All" : "Select All";
}
this.updateSelectedCount();
}
selectAllInGroup(fingerprint) {
const checkboxes = document.querySelectorAll(`.selector-checkbox[data-group-fingerprint="${fingerprint}"]`);
checkboxes.forEach(checkbox => {
checkbox.checked = true;
this.selectedForDeletion.add(checkbox.dataset.recipeId);
checkbox.closest('.lora-card').classList.add('duplicate-selected');
});
// Update the button text
const button = document.querySelector(`.duplicate-group[data-fingerprint="${fingerprint}"] .btn-select-all`);
if (button) {
button.textContent = "Deselect All";
}
this.updateSelectedCount();
}
selectLatestInGroup(fingerprint) {
// Find all checkboxes in this group
const checkboxes = document.querySelectorAll(`.selector-checkbox[data-group-fingerprint="${fingerprint}"]`);
// Get all the recipes in this group
const group = this.duplicateGroups.find(g => g.fingerprint === fingerprint);
if (!group) return;
// Sort recipes by date (newest first)
const sortedRecipes = [...group.recipes].sort((a, b) => b.modified - a.modified);
// Skip the first (latest) one and select the rest for deletion
for (let i = 1; i < sortedRecipes.length; i++) {
const recipeId = sortedRecipes[i].id;
const checkbox = document.querySelector(`.selector-checkbox[data-recipe-id="${recipeId}"]`);
if (checkbox) {
checkbox.checked = true;
this.selectedForDeletion.add(recipeId);
checkbox.closest('.lora-card').classList.add('duplicate-selected');
}
}
// Make sure the latest one is not selected
const latestId = sortedRecipes[0].id;
const latestCheckbox = document.querySelector(`.selector-checkbox[data-recipe-id="${latestId}"]`);
if (latestCheckbox) {
latestCheckbox.checked = false;
this.selectedForDeletion.delete(latestId);
latestCheckbox.closest('.lora-card').classList.remove('duplicate-selected');
}
this.updateSelectedCount();
}
selectLatestDuplicates() {
// For each duplicate group, select all but the latest recipe
this.duplicateGroups.forEach(group => {
this.selectLatestInGroup(group.fingerprint);
});
}
async deleteSelectedDuplicates() {
if (this.selectedForDeletion.size === 0) {
showToast('No recipes selected for deletion', 'info');
return;
}
try {
// Show the delete confirmation modal instead of a simple confirm
const duplicateDeleteCount = document.getElementById('duplicateDeleteCount');
if (duplicateDeleteCount) {
duplicateDeleteCount.textContent = this.selectedForDeletion.size;
}
// Use the modal manager to show the confirmation modal
modalManager.showModal('duplicateDeleteModal');
} catch (error) {
console.error('Error preparing delete:', error);
showToast('Error: ' + error.message, 'error');
}
}
// Add new method to execute deletion after confirmation
async confirmDeleteDuplicates() {
try {
// Close the modal
modalManager.closeModal('duplicateDeleteModal');
// Prepare recipe IDs for deletion
const recipeIds = Array.from(this.selectedForDeletion);
// Call API to bulk delete
const response = await fetch('/api/recipes/bulk-delete', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ recipe_ids: recipeIds })
});
if (!response.ok) {
throw new Error('Failed to delete selected recipes');
}
const data = await response.json();
if (!data.success) {
throw new Error(data.error || 'Unknown error deleting recipes');
}
showToast(`Successfully deleted ${data.total_deleted} recipes`, 'success');
// Exit duplicate mode if deletions were successful
if (data.total_deleted > 0) {
this.exitDuplicateMode();
}
} catch (error) {
console.error('Error deleting recipes:', error);
showToast('Failed to delete recipes: ' + error.message, 'error');
}
}
}

View File

@@ -75,8 +75,48 @@ export class HeaderManager {
const supportToggle = document.getElementById('supportToggleBtn');
if (supportToggle) {
supportToggle.addEventListener('click', () => {
// Handle support panel logic
if (window.modalManager) {
window.modalManager.toggleModal('supportModal');
}
});
}
// Handle QR code toggle
const qrToggle = document.getElementById('toggleQRCode');
const qrContainer = document.getElementById('qrCodeContainer');
if (qrToggle && qrContainer) {
qrToggle.addEventListener('click', function() {
qrContainer.classList.toggle('show');
qrToggle.classList.toggle('active');
const toggleText = qrToggle.querySelector('.toggle-text');
if (qrContainer.classList.contains('show')) {
toggleText.textContent = 'Hide WeChat QR Code';
// Add small delay to ensure DOM is updated before scrolling
setTimeout(() => {
const supportModal = document.querySelector('.support-modal');
if (supportModal) {
supportModal.scrollTo({
top: supportModal.scrollHeight,
behavior: 'smooth'
});
}
}, 250);
} else {
toggleText.textContent = 'Show WeChat QR Code';
}
});
}
// Handle help toggle
// const helpToggle = document.querySelector('.help-toggle');
// if (helpToggle) {
// helpToggle.addEventListener('click', () => {
// if (window.modalManager) {
// window.modalManager.toggleModal('helpModal');
// }
// });
// }
}
}

View File

@@ -1,9 +1,205 @@
import { showToast, openCivitai, copyToClipboard } from '../utils/uiHelpers.js';
import { state } from '../state/index.js';
import { showToast, openCivitai, copyToClipboard, sendLoraToWorkflow, openExampleImagesFolder } from '../utils/uiHelpers.js';
import { state, getCurrentPageState } from '../state/index.js';
import { showLoraModal } from './loraModal/index.js';
import { bulkManager } from '../managers/BulkManager.js';
import { NSFW_LEVELS } from '../utils/constants.js';
import { replacePreview, deleteModel, saveModelMetadata } from '../api/loraApi.js'
import { replacePreview, saveModelMetadata } from '../api/loraApi.js'
import { showDeleteModal } from '../utils/modalUtils.js';
// Add a global event delegation handler
export function setupLoraCardEventDelegation() {
const gridElement = document.getElementById('loraGrid');
if (!gridElement) return;
// Remove any existing event listener to prevent duplication
gridElement.removeEventListener('click', handleLoraCardEvent);
// Add the event delegation handler
gridElement.addEventListener('click', handleLoraCardEvent);
}
// Event delegation handler for all lora card events
function handleLoraCardEvent(event) {
// Find the closest card element
const card = event.target.closest('.lora-card');
if (!card) return;
// Handle specific elements within the card
if (event.target.closest('.toggle-blur-btn')) {
event.stopPropagation();
toggleBlurContent(card);
return;
}
if (event.target.closest('.show-content-btn')) {
event.stopPropagation();
showBlurredContent(card);
return;
}
if (event.target.closest('.fa-star')) {
event.stopPropagation();
toggleFavorite(card);
return;
}
if (event.target.closest('.fa-globe')) {
event.stopPropagation();
if (card.dataset.from_civitai === 'true') {
openCivitai(card.dataset.name);
}
return;
}
if (event.target.closest('.fa-paper-plane')) {
event.stopPropagation();
sendLoraToComfyUI(card, event.shiftKey);
return;
}
if (event.target.closest('.fa-copy')) {
event.stopPropagation();
copyLoraSyntax(card);
return;
}
if (event.target.closest('.fa-image')) {
event.stopPropagation();
replacePreview(card.dataset.filepath);
return;
}
if (event.target.closest('.fa-folder-open')) {
event.stopPropagation();
openExampleImagesFolder(card.dataset.sha256);
return;
}
// If no specific element was clicked, handle the card click (show modal or toggle selection)
const pageState = getCurrentPageState();
if (state.bulkMode) {
// Toggle selection using the bulk manager
bulkManager.toggleCardSelection(card);
} else if (pageState && pageState.duplicatesMode) {
// In duplicates mode, don't open modal when clicking cards
return;
} else {
// Normal behavior - show modal
const loraMeta = {
sha256: card.dataset.sha256,
file_path: card.dataset.filepath,
model_name: card.dataset.name,
file_name: card.dataset.file_name,
folder: card.dataset.folder,
modified: card.dataset.modified,
file_size: card.dataset.file_size,
from_civitai: card.dataset.from_civitai === 'true',
base_model: card.dataset.base_model,
usage_tips: card.dataset.usage_tips,
notes: card.dataset.notes,
favorite: card.dataset.favorite === 'true',
// Parse civitai metadata from the card's dataset
civitai: (() => {
try {
// Attempt to parse the JSON string
return JSON.parse(card.dataset.meta || '{}');
} catch (e) {
console.error('Failed to parse civitai metadata:', e);
return {}; // Return empty object on error
}
})(),
tags: JSON.parse(card.dataset.tags || '[]'),
modelDescription: card.dataset.modelDescription || ''
};
showLoraModal(loraMeta);
}
}
// Helper functions for event handling
function toggleBlurContent(card) {
const preview = card.querySelector('.card-preview');
const isBlurred = preview.classList.toggle('blurred');
const icon = card.querySelector('.toggle-blur-btn i');
// Update the icon based on blur state
if (isBlurred) {
icon.className = 'fas fa-eye';
} else {
icon.className = 'fas fa-eye-slash';
}
// Toggle the overlay visibility
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = isBlurred ? 'flex' : 'none';
}
}
function showBlurredContent(card) {
const preview = card.querySelector('.card-preview');
preview.classList.remove('blurred');
// Update the toggle button icon
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
// Hide the overlay
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = 'none';
}
}
async function toggleFavorite(card) {
const starIcon = card.querySelector('.fa-star');
const isFavorite = starIcon.classList.contains('fas');
const newFavoriteState = !isFavorite;
try {
// Save the new favorite state to the server
await saveModelMetadata(card.dataset.filepath, {
favorite: newFavoriteState
});
// Update the UI
if (newFavoriteState) {
starIcon.classList.remove('far');
starIcon.classList.add('fas', 'favorite-active');
starIcon.title = 'Remove from favorites';
card.dataset.favorite = 'true';
showToast('Added to favorites', 'success');
} else {
starIcon.classList.remove('fas', 'favorite-active');
starIcon.classList.add('far');
starIcon.title = 'Add to favorites';
card.dataset.favorite = 'false';
showToast('Removed from favorites', 'success');
}
} catch (error) {
console.error('Failed to update favorite status:', error);
showToast('Failed to update favorite status', 'error');
}
}
// Function to send LoRA to ComfyUI workflow
async function sendLoraToComfyUI(card, replaceMode) {
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
const strength = usageTips.strength || 1;
const loraSyntax = `<lora:${card.dataset.file_name}:${strength}>`;
sendLoraToWorkflow(loraSyntax, replaceMode, 'lora');
}
// Add function to copy lora syntax
function copyLoraSyntax(card) {
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
const strength = usageTips.strength || 1;
const loraSyntax = `<lora:${card.dataset.file_name}:${strength}>`;
copyToClipboard(loraSyntax, 'LoRA syntax copied to clipboard');
}
export function createLoraCard(lora) {
const card = document.createElement('div');
@@ -93,12 +289,12 @@ export function createLoraCard(lora) {
title="${lora.from_civitai ? 'View on Civitai' : 'Not available from Civitai'}"
${!lora.from_civitai ? 'style="opacity: 0.5; cursor: not-allowed"' : ''}>
</i>
<i class="fas fa-paper-plane"
title="Send to ComfyUI (Click: Append, Shift+Click: Replace)">
</i>
<i class="fas fa-copy"
title="Copy LoRA Syntax">
</i>
<i class="fas fa-trash"
title="Delete Model">
</i>
</div>
</div>
${shouldBlur ? `
@@ -114,170 +310,20 @@ export function createLoraCard(lora) {
<span class="model-name">${lora.model_name}</span>
</div>
<div class="card-actions">
<i class="fas fa-image"
title="Replace Preview Image">
<i class="fas fa-folder-open"
title="Open Example Images Folder">
</i>
</div>
</div>
</div>
`;
// Main card click event - modified to handle bulk mode
card.addEventListener('click', () => {
// Check if we're in bulk mode
if (state.bulkMode) {
// Toggle selection using the bulk manager
bulkManager.toggleCardSelection(card);
} else {
// Normal behavior - show modal
const loraMeta = {
sha256: card.dataset.sha256,
file_path: card.dataset.filepath,
model_name: card.dataset.name,
file_name: card.dataset.file_name,
folder: card.dataset.folder,
modified: card.dataset.modified,
file_size: card.dataset.file_size,
from_civitai: card.dataset.from_civitai === 'true',
base_model: card.dataset.base_model,
usage_tips: card.dataset.usage_tips,
notes: card.dataset.notes,
favorite: card.dataset.favorite === 'true',
// Parse civitai metadata from the card's dataset
civitai: (() => {
try {
// Attempt to parse the JSON string
return JSON.parse(card.dataset.meta || '{}');
} catch (e) {
console.error('Failed to parse civitai metadata:', e);
return {}; // Return empty object on error
}
})(),
tags: JSON.parse(card.dataset.tags || '[]'),
modelDescription: card.dataset.modelDescription || ''
};
showLoraModal(loraMeta);
}
});
// Toggle blur button functionality
const toggleBlurBtn = card.querySelector('.toggle-blur-btn');
if (toggleBlurBtn) {
toggleBlurBtn.addEventListener('click', (e) => {
e.stopPropagation();
const preview = card.querySelector('.card-preview');
const isBlurred = preview.classList.toggle('blurred');
const icon = toggleBlurBtn.querySelector('i');
// Update the icon based on blur state
if (isBlurred) {
icon.className = 'fas fa-eye';
} else {
icon.className = 'fas fa-eye-slash';
}
// Toggle the overlay visibility
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = isBlurred ? 'flex' : 'none';
}
});
}
// Show content button functionality
const showContentBtn = card.querySelector('.show-content-btn');
if (showContentBtn) {
showContentBtn.addEventListener('click', (e) => {
e.stopPropagation();
const preview = card.querySelector('.card-preview');
preview.classList.remove('blurred');
// Update the toggle button icon
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
// Hide the overlay
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = 'none';
}
});
}
// Favorite button click event
card.querySelector('.fa-star')?.addEventListener('click', async e => {
e.stopPropagation();
const starIcon = e.currentTarget;
const isFavorite = starIcon.classList.contains('fas');
const newFavoriteState = !isFavorite;
try {
// Save the new favorite state to the server
await saveModelMetadata(card.dataset.filepath, {
favorite: newFavoriteState
});
// Update the UI
if (newFavoriteState) {
starIcon.classList.remove('far');
starIcon.classList.add('fas', 'favorite-active');
starIcon.title = 'Remove from favorites';
card.dataset.favorite = 'true';
showToast('Added to favorites', 'success');
} else {
starIcon.classList.remove('fas', 'favorite-active');
starIcon.classList.add('far');
starIcon.title = 'Add to favorites';
card.dataset.favorite = 'false';
showToast('Removed from favorites', 'success');
}
} catch (error) {
console.error('Failed to update favorite status:', error);
showToast('Failed to update favorite status', 'error');
}
});
// Copy button click event
card.querySelector('.fa-copy')?.addEventListener('click', async e => {
e.stopPropagation();
const usageTips = JSON.parse(card.dataset.usage_tips || '{}');
const strength = usageTips.strength || 1;
const loraSyntax = `<lora:${card.dataset.file_name}:${strength}>`;
await copyToClipboard(loraSyntax, 'LoRA syntax copied');
});
// Civitai button click event
if (lora.from_civitai) {
card.querySelector('.fa-globe')?.addEventListener('click', e => {
e.stopPropagation();
openCivitai(lora.model_name);
});
}
// Delete button click event
card.querySelector('.fa-trash')?.addEventListener('click', e => {
e.stopPropagation();
deleteModel(lora.file_path);
});
// Replace preview button click event
card.querySelector('.fa-image')?.addEventListener('click', e => {
e.stopPropagation();
replacePreview(lora.file_path);
});
// Apply bulk mode styling if currently in bulk mode
if (state.bulkMode) {
const actions = card.querySelectorAll('.card-actions');
actions.forEach(actionGroup => {
actionGroup.style.display = 'none';
});
// Add a special class for virtual scroll positioning if needed
if (state.virtualScroller) {
card.classList.add('virtual-scroll-item');
}
// Add autoplayOnHover handlers for video elements if needed
// Add video auto-play on hover functionality if needed
const videoElement = card.querySelector('video');
if (videoElement && autoplayOnHover) {
const cardPreview = card.querySelector('.card-preview');
@@ -286,15 +332,10 @@ export function createLoraCard(lora) {
videoElement.removeAttribute('autoplay');
videoElement.pause();
// Add mouse events to trigger play/pause
cardPreview.addEventListener('mouseenter', () => {
videoElement.play();
});
cardPreview.addEventListener('mouseleave', () => {
videoElement.pause();
videoElement.currentTime = 0;
});
// Add mouse events to trigger play/pause using event attributes
// This approach reduces the number of event listeners created
cardPreview.setAttribute('onmouseenter', 'this.querySelector("video")?.play()');
cardPreview.setAttribute('onmouseleave', 'const v=this.querySelector("video"); if(v){v.pause();v.currentTime=0;}');
}
return card;
@@ -307,7 +348,7 @@ export function updateCardsForBulkMode(isBulkMode) {
document.body.classList.toggle('bulk-mode', isBulkMode);
// Get all lora cards
// Get all lora cards - this can now be from the DOM or through the virtual scroller
const loraCards = document.querySelectorAll('.lora-card');
loraCards.forEach(card => {
@@ -329,6 +370,11 @@ export function updateCardsForBulkMode(isBulkMode) {
}
});
// If using virtual scroller, we need to rerender after toggling bulk mode
if (state.virtualScroller && typeof state.virtualScroller.scheduleRender === 'function') {
state.virtualScroller.scheduleRender();
}
// Apply selection state to cards if entering bulk mode
if (isBulkMode) {
bulkManager.applySelectionState();

View File

@@ -0,0 +1,599 @@
// Model Duplicates Manager Component for LoRAs and Checkpoints
import { showToast } from '../utils/uiHelpers.js';
import { state, getCurrentPageState } from '../state/index.js';
import { formatDate } from '../utils/formatters.js';
export class ModelDuplicatesManager {
constructor(pageManager, modelType = 'loras') {
this.pageManager = pageManager;
this.duplicateGroups = [];
this.inDuplicateMode = false;
this.selectedForDeletion = new Set();
this.modelType = modelType; // Use the provided modelType or default to 'loras'
// Bind methods
this.renderModelCard = this.renderModelCard.bind(this);
this.renderTooltip = this.renderTooltip.bind(this);
this.checkDuplicatesCount = this.checkDuplicatesCount.bind(this);
// Keep track of which controls need to be re-enabled
this.disabledControls = [];
// Check for duplicates on load
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', this.checkDuplicatesCount);
} else {
this.checkDuplicatesCount();
}
}
// Method to check for duplicates count using existing endpoint
async checkDuplicatesCount() {
try {
const endpoint = `/api/${this.modelType}/find-duplicates`;
const response = await fetch(endpoint);
if (!response.ok) {
throw new Error(`Failed to get duplicates count: ${response.statusText}`);
}
const data = await response.json();
if (data.success) {
const duplicatesCount = (data.duplicates || []).length;
this.updateDuplicatesBadge(duplicatesCount);
} else {
this.updateDuplicatesBadge(0);
}
} catch (error) {
console.error('Error checking duplicates count:', error);
this.updateDuplicatesBadge(0);
}
}
// Method to update the badge
updateDuplicatesBadge(count) {
const badge = document.getElementById('duplicatesBadge');
if (!badge) return;
if (count > 0) {
badge.textContent = count;
badge.classList.add('pulse');
} else {
badge.textContent = '';
badge.classList.remove('pulse');
}
}
// Toggle method to enter/exit duplicates mode
toggleDuplicateMode() {
if (this.inDuplicateMode) {
this.exitDuplicateMode();
} else {
this.findDuplicates();
}
}
async findDuplicates() {
try {
// Determine API endpoint based on model type
const endpoint = `/api/${this.modelType}/find-duplicates`;
const response = await fetch(endpoint);
if (!response.ok) {
throw new Error(`Failed to find duplicates: ${response.statusText}`);
}
const data = await response.json();
if (!data.success) {
throw new Error(data.error || 'Unknown error finding duplicates');
}
this.duplicateGroups = data.duplicates || [];
// Update the badge with the current count
this.updateDuplicatesBadge(this.duplicateGroups.length);
if (this.duplicateGroups.length === 0) {
showToast('No duplicate models found', 'info');
return false;
}
this.enterDuplicateMode();
return true;
} catch (error) {
console.error('Error finding duplicates:', error);
showToast('Failed to find duplicates: ' + error.message, 'error');
return false;
}
}
enterDuplicateMode() {
this.inDuplicateMode = true;
this.selectedForDeletion.clear();
// Update state
const pageState = getCurrentPageState();
pageState.duplicatesMode = true;
// Show duplicates banner
const banner = document.getElementById('duplicatesBanner');
const countSpan = document.getElementById('duplicatesCount');
if (banner && countSpan) {
countSpan.textContent = `Found ${this.duplicateGroups.length} duplicate group${this.duplicateGroups.length !== 1 ? 's' : ''}`;
banner.style.display = 'block';
// Setup help tooltip behavior
this.setupHelpTooltip();
}
// Disable virtual scrolling if active
if (state.virtualScroller) {
state.virtualScroller.disable();
}
// Add duplicate-mode class to the body
document.body.classList.add('duplicate-mode');
// Render duplicate groups
this.renderDuplicateGroups();
// Update selected count
this.updateSelectedCount();
// Update Duplicates button to show active state
const duplicatesBtn = document.getElementById('findDuplicatesBtn');
if (duplicatesBtn) {
duplicatesBtn.classList.add('active');
duplicatesBtn.title = 'Exit Duplicates Mode';
// Change icon and text to indicate it's now an exit button
duplicatesBtn.innerHTML = '<i class="fas fa-times"></i> Exit Duplicates';
}
// Disable all control buttons except the duplicates button
this.disableControlButtons();
}
exitDuplicateMode() {
this.inDuplicateMode = false;
this.selectedForDeletion.clear();
// Update state
const pageState = getCurrentPageState();
pageState.duplicatesMode = false;
// Hide duplicates banner
const banner = document.getElementById('duplicatesBanner');
if (banner) {
banner.style.display = 'none';
}
// Remove duplicate-mode class from the body
document.body.classList.remove('duplicate-mode');
// Clear the model grid first
const modelGrid = document.getElementById(this.modelType === 'loras' ? 'loraGrid' : 'checkpointGrid');
if (modelGrid) {
modelGrid.innerHTML = '';
}
// Re-enable virtual scrolling
state.virtualScroller.enable();
// Restore Duplicates button to its original state
const duplicatesBtn = document.getElementById('findDuplicatesBtn');
if (duplicatesBtn) {
duplicatesBtn.classList.remove('active');
duplicatesBtn.title = 'Find duplicate models';
duplicatesBtn.innerHTML = '<i class="fas fa-clone"></i> Duplicates <span id="duplicatesBadge" class="badge"></span>';
// Restore badge
const newBadge = duplicatesBtn.querySelector('#duplicatesBadge');
const oldBadge = document.getElementById('duplicatesBadge');
if (oldBadge && oldBadge.textContent) {
newBadge.textContent = oldBadge.textContent;
newBadge.classList.add('pulse');
}
}
// Re-enable all control buttons
this.enableControlButtons();
this.checkDuplicatesCount();
}
// Disable all control buttons except the duplicates button
disableControlButtons() {
this.disabledControls = [];
// Select all control buttons except the duplicates button
const controlButtons = document.querySelectorAll('.control-group button:not(#findDuplicatesBtn), .dropdown-group, .toggle-folders-btn, #favoriteFilterBtn');
controlButtons.forEach(button => {
// Only disable enabled buttons (don't disable already disabled buttons)
if (!button.disabled && !button.classList.contains('disabled')) {
this.disabledControls.push(button);
button.disabled = true;
button.classList.add('disabled-during-duplicates');
}
});
}
// Re-enable all previously disabled control buttons
enableControlButtons() {
this.disabledControls.forEach(button => {
button.disabled = false;
button.classList.remove('disabled-during-duplicates');
});
this.disabledControls = [];
}
renderDuplicateGroups() {
const modelGrid = document.getElementById(this.modelType === 'loras' ? 'loraGrid' : 'checkpointGrid');
if (!modelGrid) return;
// Clear existing content
modelGrid.innerHTML = '';
// Render each duplicate group
this.duplicateGroups.forEach((group, groupIndex) => {
const groupDiv = document.createElement('div');
groupDiv.className = 'duplicate-group';
groupDiv.dataset.hash = group.hash;
// Create group header
const header = document.createElement('div');
header.className = 'duplicate-group-header';
header.innerHTML = `
<span>Duplicate Group #${groupIndex + 1} (${group.models.length} models with same hash: ${group.hash})</span>
<span>
<button class="btn-select-all" onclick="modelDuplicatesManager.toggleSelectAllInGroup('${group.hash}')">
Select All
</button>
</span>
`;
groupDiv.appendChild(header);
// Create cards container
const cardsDiv = document.createElement('div');
cardsDiv.className = 'card-group-container';
// Add scrollable class if there are many models in the group
if (group.models.length > 6) {
cardsDiv.classList.add('scrollable');
// Add expand/collapse toggle button
const toggleBtn = document.createElement('button');
toggleBtn.className = 'group-toggle-btn';
toggleBtn.innerHTML = '<i class="fas fa-chevron-down"></i>';
toggleBtn.title = "Expand/Collapse";
toggleBtn.onclick = function() {
cardsDiv.classList.toggle('scrollable');
this.innerHTML = cardsDiv.classList.contains('scrollable') ?
'<i class="fas fa-chevron-down"></i>' :
'<i class="fas fa-chevron-up"></i>';
};
groupDiv.appendChild(toggleBtn);
}
// Add all model cards in this group
group.models.forEach(model => {
const card = this.renderModelCard(model, group.hash);
cardsDiv.appendChild(card);
});
groupDiv.appendChild(cardsDiv);
modelGrid.appendChild(groupDiv);
});
}
renderModelCard(model, groupHash) {
// Create basic card structure
const card = document.createElement('div');
card.className = 'lora-card duplicate';
card.dataset.hash = model.sha256;
card.dataset.filePath = model.file_path;
// Create card content using structure similar to createLoraCard in LoraCard.js
const previewContainer = document.createElement('div');
previewContainer.className = 'card-preview';
// Determine if preview is a video
const isVideo = model.preview_url && model.preview_url.endsWith('.mp4');
let preview;
if (isVideo) {
// Create video element for MP4 previews
preview = document.createElement('video');
preview.loading = 'lazy';
preview.controls = true;
preview.muted = true;
preview.loop = true;
const source = document.createElement('source');
source.src = model.preview_url;
source.type = 'video/mp4';
preview.appendChild(source);
} else {
// Create image element for standard previews
preview = document.createElement('img');
preview.loading = 'lazy';
preview.alt = model.model_name;
if (model.preview_url) {
preview.src = model.preview_url;
} else {
// Use placeholder
preview.src = '/loras_static/images/no-preview.png';
}
}
// Add NSFW blur if needed
if (model.preview_nsfw_level > 0) {
preview.classList.add('nsfw');
}
previewContainer.appendChild(preview);
// Move tooltip listeners to the preview container for consistent behavior
// regardless of whether the preview is an image or video
previewContainer.addEventListener('mouseover', () => this.renderTooltip(card, model));
previewContainer.addEventListener('mouseout', () => {
const tooltip = document.querySelector('.model-tooltip');
if (tooltip) tooltip.remove();
});
// Add card footer with just model name
const footer = document.createElement('div');
footer.className = 'card-footer';
const modelInfo = document.createElement('div');
modelInfo.className = 'model-info';
const modelName = document.createElement('span');
modelName.className = 'model-name';
modelName.textContent = model.model_name;
modelInfo.appendChild(modelName);
footer.appendChild(modelInfo);
previewContainer.appendChild(footer);
card.appendChild(previewContainer);
// Add selection checkbox
const checkbox = document.createElement('input');
checkbox.type = 'checkbox';
checkbox.className = 'selector-checkbox';
checkbox.dataset.filePath = model.file_path;
checkbox.dataset.groupHash = groupHash;
// Check if already selected
if (this.selectedForDeletion.has(model.file_path)) {
checkbox.checked = true;
card.classList.add('duplicate-selected');
}
// Add change event to checkbox
checkbox.addEventListener('change', (e) => {
e.stopPropagation();
this.toggleCardSelection(model.file_path, card, checkbox);
});
// Make the entire card clickable for selection
card.addEventListener('click', (e) => {
// Don't toggle if clicking on the checkbox directly or card actions
if (e.target === checkbox || e.target.closest('.card-actions')) {
return;
}
// Toggle checkbox state
checkbox.checked = !checkbox.checked;
this.toggleCardSelection(model.file_path, card, checkbox);
});
card.appendChild(checkbox);
return card;
}
renderTooltip(card, model) {
// Remove any existing tooltips
const existingTooltip = document.querySelector('.model-tooltip');
if (existingTooltip) existingTooltip.remove();
// Create tooltip
const tooltip = document.createElement('div');
tooltip.className = 'model-tooltip';
// Add model information to tooltip
tooltip.innerHTML = `
<div class="tooltip-header">${model.model_name}</div>
<div class="tooltip-info">
<div><strong>Version:</strong> ${model.civitai?.name || 'Unknown'}</div>
<div><strong>Filename:</strong> ${model.file_name}</div>
<div><strong>Path:</strong> ${model.file_path}</div>
<div><strong>Base Model:</strong> ${model.base_model || 'Unknown'}</div>
<div><strong>Modified:</strong> ${formatDate(model.modified)}</div>
</div>
`;
// Position tooltip relative to card
const cardRect = card.getBoundingClientRect();
tooltip.style.top = `${cardRect.top + window.scrollY - 10}px`;
tooltip.style.left = `${cardRect.left + window.scrollX + cardRect.width + 10}px`;
// Add tooltip to document
document.body.appendChild(tooltip);
// Check if tooltip is outside viewport and adjust if needed
const tooltipRect = tooltip.getBoundingClientRect();
if (tooltipRect.right > window.innerWidth) {
tooltip.style.left = `${cardRect.left + window.scrollX - tooltipRect.width - 10}px`;
}
}
// Helper method to toggle card selection state
toggleCardSelection(filePath, card, checkbox) {
if (checkbox.checked) {
this.selectedForDeletion.add(filePath);
card.classList.add('duplicate-selected');
} else {
this.selectedForDeletion.delete(filePath);
card.classList.remove('duplicate-selected');
}
this.updateSelectedCount();
}
updateSelectedCount() {
const selectedCountEl = document.getElementById('duplicatesSelectedCount');
if (selectedCountEl) {
selectedCountEl.textContent = this.selectedForDeletion.size;
}
// Update delete button state
const deleteBtn = document.querySelector('.btn-delete-selected');
if (deleteBtn) {
deleteBtn.disabled = this.selectedForDeletion.size === 0;
deleteBtn.classList.toggle('disabled', this.selectedForDeletion.size === 0);
}
}
toggleSelectAllInGroup(hash) {
const checkboxes = document.querySelectorAll(`.selector-checkbox[data-group-hash="${hash}"]`);
const allSelected = Array.from(checkboxes).every(checkbox => checkbox.checked);
// If all are selected, deselect all; otherwise select all
checkboxes.forEach(checkbox => {
checkbox.checked = !allSelected;
const filePath = checkbox.dataset.filePath;
const card = checkbox.closest('.lora-card');
if (!allSelected) {
this.selectedForDeletion.add(filePath);
card.classList.add('duplicate-selected');
} else {
this.selectedForDeletion.delete(filePath);
card.classList.remove('duplicate-selected');
}
});
// Update the button text
const button = document.querySelector(`.duplicate-group[data-hash="${hash}"] .btn-select-all`);
if (button) {
button.textContent = !allSelected ? "Deselect All" : "Select All";
}
this.updateSelectedCount();
}
async deleteSelectedDuplicates() {
if (this.selectedForDeletion.size === 0) {
showToast('No models selected for deletion', 'info');
return;
}
try {
// Show the delete confirmation modal instead of a simple confirm
const modelDuplicateDeleteCount = document.getElementById('modelDuplicateDeleteCount');
if (modelDuplicateDeleteCount) {
modelDuplicateDeleteCount.textContent = this.selectedForDeletion.size;
}
// Use the modal manager to show the confirmation modal
modalManager.showModal('modelDuplicateDeleteModal');
} catch (error) {
console.error('Error preparing delete:', error);
showToast('Error: ' + error.message, 'error');
}
}
// Execute deletion after confirmation
async confirmDeleteDuplicates() {
try {
// Close the modal
modalManager.closeModal('modelDuplicateDeleteModal');
// Prepare file paths for deletion
const filePaths = Array.from(this.selectedForDeletion);
// Call API to bulk delete
const response = await fetch(`/api/${this.modelType}/bulk-delete`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ file_paths: filePaths })
});
if (!response.ok) {
throw new Error('Failed to delete selected models');
}
const data = await response.json();
if (!data.success) {
throw new Error(data.error || 'Unknown error deleting models');
}
showToast(`Successfully deleted ${data.total_deleted} models`, 'success');
// Exit duplicate mode if deletions were successful
if (data.total_deleted > 0) {
// Check duplicates count after deletion
this.checkDuplicatesCount();
this.exitDuplicateMode();
}
} catch (error) {
console.error('Error deleting models:', error);
showToast('Failed to delete models: ' + error.message, 'error');
}
}
// Public method to update the badge after refresh
updateDuplicatesBadgeAfterRefresh() {
// Use this method after refresh operations
this.checkDuplicatesCount();
}
// Add this new method for tooltip behavior
setupHelpTooltip() {
const helpIcon = document.getElementById('duplicatesHelp');
const helpTooltip = document.getElementById('duplicatesHelpTooltip');
if (!helpIcon || !helpTooltip) return;
helpIcon.addEventListener('mouseenter', (e) => {
// Get the container's positioning context
const bannerContent = helpIcon.closest('.banner-content');
// Get positions relative to the viewport
const iconRect = helpIcon.getBoundingClientRect();
const bannerRect = bannerContent.getBoundingClientRect();
// Set initial position relative to the banner content
helpTooltip.style.display = 'block';
helpTooltip.style.top = `${iconRect.bottom - bannerRect.top + 10}px`;
helpTooltip.style.left = `${iconRect.left - bannerRect.left - 10}px`;
// Check if the tooltip is going off-screen to the right
const tooltipRect = helpTooltip.getBoundingClientRect();
const viewportWidth = window.innerWidth;
if (tooltipRect.right > viewportWidth - 20) {
// Reposition relative to container if too close to right edge
helpTooltip.style.left = `${bannerContent.offsetWidth - tooltipRect.width - 20}px`;
}
});
// Rest of the event listeners remain unchanged
helpIcon.addEventListener('mouseleave', () => {
helpTooltip.style.display = 'none';
});
document.addEventListener('click', (e) => {
if (!helpIcon.contains(e.target)) {
helpTooltip.style.display = 'none';
}
});
}
}

View File

@@ -1,12 +1,16 @@
// Recipe Card Component
import { showToast, copyToClipboard } from '../utils/uiHelpers.js';
import { showToast, copyToClipboard, sendLoraToWorkflow } from '../utils/uiHelpers.js';
import { modalManager } from '../managers/ModalManager.js';
import { getCurrentPageState } from '../state/index.js';
class RecipeCard {
constructor(recipe, clickHandler) {
this.recipe = recipe;
this.clickHandler = clickHandler;
this.element = this.createCardElement();
// Store reference to this instance on the DOM element for updates
this.element._recipeCardInstance = this;
}
createCardElement() {
@@ -33,33 +37,41 @@ class RecipeCard {
(this.recipe.file_path ? `/loras_static/root1/preview/${this.recipe.file_path.split('/').pop()}` :
'/loras_static/images/no-preview.png');
// Check if in duplicates mode
const pageState = getCurrentPageState();
const isDuplicatesMode = pageState.duplicatesMode;
card.innerHTML = `
<div class="recipe-indicator" title="Recipe">R</div>
${!isDuplicatesMode ? `<div class="recipe-indicator" title="Recipe">R</div>` : ''}
<div class="card-preview">
<img src="${imageUrl}" alt="${this.recipe.title}">
${!isDuplicatesMode ? `
<div class="card-header">
<div class="base-model-wrapper">
${baseModel ? `<span class="base-model-label" title="${baseModel}">${baseModel}</span>` : ''}
</div>
<div class="card-actions">
<i class="fas fa-share-alt" title="Share Recipe"></i>
<i class="fas fa-copy" title="Copy Recipe Syntax"></i>
<i class="fas fa-paper-plane" title="Send Recipe to Workflow (Click: Append, Shift+Click: Replace)"></i>
<i class="fas fa-trash" title="Delete Recipe"></i>
</div>
</div>
` : ''}
<div class="card-footer">
<div class="model-info">
<span class="model-name">${this.recipe.title}</span>
</div>
${!isDuplicatesMode ? `
<div class="lora-count ${allLorasAvailable ? 'ready' : (lorasCount > 0 ? 'missing' : '')}"
title="${this.getLoraStatusTitle(lorasCount, missingLorasCount)}">
<i class="fas fa-layer-group"></i> ${lorasCount}
</div>
` : ''}
</div>
</div>
`;
this.attachEventListeners(card);
this.attachEventListeners(card, isDuplicatesMode);
return card;
}
@@ -69,58 +81,59 @@ class RecipeCard {
return `${missingCount} of ${totalCount} LoRAs missing`;
}
attachEventListeners(card) {
// Recipe card click event
card.addEventListener('click', () => {
this.clickHandler(this.recipe);
});
// Share button click event - prevent propagation to card
card.querySelector('.fa-share-alt')?.addEventListener('click', (e) => {
e.stopPropagation();
this.shareRecipe();
});
// Copy button click event - prevent propagation to card
card.querySelector('.fa-copy')?.addEventListener('click', (e) => {
e.stopPropagation();
this.copyRecipeSyntax();
});
// Delete button click event - prevent propagation to card
card.querySelector('.fa-trash')?.addEventListener('click', (e) => {
e.stopPropagation();
this.showDeleteConfirmation();
});
attachEventListeners(card, isDuplicatesMode) {
// Recipe card click event - only attach if not in duplicates mode
if (!isDuplicatesMode) {
card.addEventListener('click', () => {
this.clickHandler(this.recipe);
});
// Share button click event - prevent propagation to card
card.querySelector('.fa-share-alt')?.addEventListener('click', (e) => {
e.stopPropagation();
this.shareRecipe();
});
// Send button click event - prevent propagation to card
card.querySelector('.fa-paper-plane')?.addEventListener('click', (e) => {
e.stopPropagation();
this.sendRecipeToWorkflow(e.shiftKey);
});
// Delete button click event - prevent propagation to card
card.querySelector('.fa-trash')?.addEventListener('click', (e) => {
e.stopPropagation();
this.showDeleteConfirmation();
});
}
}
copyRecipeSyntax() {
// Replace copyRecipeSyntax with sendRecipeToWorkflow
sendRecipeToWorkflow(replaceMode = false) {
try {
// Get recipe ID
const recipeId = this.recipe.id;
if (!recipeId) {
showToast('Cannot copy recipe syntax: Missing recipe ID', 'error');
showToast('Cannot send recipe: Missing recipe ID', 'error');
return;
}
// Fallback if button not found
fetch(`/api/recipe/${recipeId}/syntax`)
.then(response => response.json())
.then(data => {
if (data.success && data.syntax) {
return copyToClipboard(data.syntax, 'Recipe syntax copied to clipboard');
return sendLoraToWorkflow(data.syntax, replaceMode, 'recipe');
} else {
throw new Error(data.error || 'No syntax returned');
}
})
.catch(err => {
console.error('Failed to copy: ', err);
showToast('Failed to copy recipe syntax', 'error');
console.error('Failed to send recipe to workflow: ', err);
showToast('Failed to send recipe to workflow', 'error');
});
} catch (error) {
console.error('Error copying recipe syntax:', error);
showToast('Error copying recipe syntax', 'error');
console.error('Error sending recipe to workflow:', error);
showToast('Error sending recipe to workflow', 'error');
}
}

View File

@@ -2,6 +2,7 @@
import { showToast, copyToClipboard } from '../utils/uiHelpers.js';
import { state } from '../state/index.js';
import { setSessionItem, removeSessionItem } from '../utils/storageHelpers.js';
import { updateRecipeCard } from '../utils/cardUpdater.js';
class RecipeModal {
constructor() {
@@ -82,7 +83,7 @@ class RecipeModal {
showRecipeDetails(recipe) {
// Store the full recipe for editing
this.currentRecipe = JSON.parse(JSON.stringify(recipe)); // 深拷贝以避免对原始对象的修改
this.currentRecipe = recipe;
// Set modal title with edit icon
const modalTitle = document.getElementById('recipeModalTitle');
@@ -245,6 +246,45 @@ class RecipeModal {
imgElement.alt = recipe.title || 'Recipe Preview';
mediaContainer.appendChild(imgElement);
}
// Add source URL container if the recipe has a source_path
const sourceUrlContainer = document.createElement('div');
sourceUrlContainer.className = 'source-url-container';
const hasSourceUrl = recipe.source_path && recipe.source_path.trim().length > 0;
const sourceUrl = hasSourceUrl ? recipe.source_path : '';
const isValidUrl = hasSourceUrl && (sourceUrl.startsWith('http://') || sourceUrl.startsWith('https://'));
sourceUrlContainer.innerHTML = `
<div class="source-url-content">
<span class="source-url-icon"><i class="fas fa-link"></i></span>
<span class="source-url-text" title="${isValidUrl ? 'Click to open source URL' : 'No valid URL'}">${
hasSourceUrl ? sourceUrl : 'No source URL'
}</span>
</div>
<button class="source-url-edit-btn" title="Edit source URL">
<i class="fas fa-pencil-alt"></i>
</button>
`;
// Add source URL editor
const sourceUrlEditor = document.createElement('div');
sourceUrlEditor.className = 'source-url-editor';
sourceUrlEditor.innerHTML = `
<input type="text" class="source-url-input" placeholder="Enter source URL (e.g., https://civitai.com/...)" value="${sourceUrl}">
<div class="source-url-actions">
<button class="source-url-cancel-btn">Cancel</button>
<button class="source-url-save-btn">Save</button>
</div>
`;
// Append both containers to the media container
mediaContainer.appendChild(sourceUrlContainer);
mediaContainer.appendChild(sourceUrlEditor);
// Set up event listeners for source URL functionality
setTimeout(() => {
this.setupSourceUrlHandlers();
}, 50);
}
// Set generation parameters
@@ -451,8 +491,6 @@ class RecipeModal {
lorasListElement.innerHTML = '<div class="no-loras">No LoRAs associated with this recipe</div>';
this.recipeLorasSyntax = '';
}
console.log(this.currentRecipe.loras);
// Show the modal
modalManager.showModal('recipeModal');
@@ -648,50 +686,8 @@ class RecipeModal {
// 更新当前recipe对象的属性
Object.assign(this.currentRecipe, updates);
// 确保这个更新也传播到卡片视图
// 尝试找到可能显示这个recipe的卡片并更新它
try {
const recipeCards = document.querySelectorAll('.recipe-card');
recipeCards.forEach(card => {
if (card.dataset.recipeId === this.recipeId) {
// 更新卡片标题
if (updates.title) {
const titleElement = card.querySelector('.recipe-title');
if (titleElement) {
titleElement.textContent = updates.title;
}
}
// 更新卡片标签
if (updates.tags) {
const tagsElement = card.querySelector('.recipe-tags');
if (tagsElement) {
if (updates.tags.length > 0) {
tagsElement.innerHTML = updates.tags.map(
tag => `<div class="recipe-tag">${tag}</div>`
).join('');
} else {
tagsElement.innerHTML = '';
}
}
}
}
});
} catch (err) {
console.log("Non-critical error updating recipe cards:", err);
}
// 重要强制刷新recipes列表确保从服务器获取最新数据
try {
if (window.recipeManager && typeof window.recipeManager.loadRecipes === 'function') {
// 异步刷新recipes列表不阻塞用户界面
setTimeout(() => {
window.recipeManager.loadRecipes(true);
}, 500);
}
} catch (err) {
console.log("Error refreshing recipes list:", err);
}
// Update the recipe card in the UI
updateRecipeCard(this.recipeId, updates);
} else {
showToast(`Failed to update recipe: ${data.error}`, 'error');
}
@@ -951,8 +947,8 @@ class RecipeModal {
let loraSyntaxMatch = inputValue.match(/<lora:([^:>]+)(?::[^>]+)?>/);
let fileName = loraSyntaxMatch ? loraSyntaxMatch[1] : inputValue.trim();
// Remove any file extension if present
fileName = fileName.replace(/\.\w+$/, '');
// Remove .safetensors extension if present
fileName = fileName.replace(/\.safetensors$/, '');
// Get the deleted lora data
const deletedLora = this.currentRecipe.loras[loraIndex];
@@ -1069,6 +1065,56 @@ class RecipeModal {
});
});
}
// New method to set up source URL handlers
setupSourceUrlHandlers() {
const sourceUrlContainer = document.querySelector('.source-url-container');
const sourceUrlEditor = document.querySelector('.source-url-editor');
const sourceUrlText = sourceUrlContainer.querySelector('.source-url-text');
const sourceUrlEditBtn = sourceUrlContainer.querySelector('.source-url-edit-btn');
const sourceUrlCancelBtn = sourceUrlEditor.querySelector('.source-url-cancel-btn');
const sourceUrlSaveBtn = sourceUrlEditor.querySelector('.source-url-save-btn');
const sourceUrlInput = sourceUrlEditor.querySelector('.source-url-input');
// Show editor on edit button click
sourceUrlEditBtn.addEventListener('click', () => {
sourceUrlContainer.classList.add('hide');
sourceUrlEditor.classList.add('active');
sourceUrlInput.focus();
});
// Cancel editing
sourceUrlCancelBtn.addEventListener('click', () => {
sourceUrlEditor.classList.remove('active');
sourceUrlContainer.classList.remove('hide');
sourceUrlInput.value = this.currentRecipe.source_path || '';
});
// Save new source URL
sourceUrlSaveBtn.addEventListener('click', () => {
const newSourceUrl = sourceUrlInput.value.trim();
if (newSourceUrl && newSourceUrl !== this.currentRecipe.source_path) {
// Update source URL in the UI
sourceUrlText.textContent = newSourceUrl;
sourceUrlText.title = newSourceUrl.startsWith('http://') || newSourceUrl.startsWith('https://') ? 'Click to open source URL' : 'No valid URL';
// Update the recipe on the server
this.updateRecipeMetadata({ source_path: newSourceUrl });
}
// Hide editor
sourceUrlEditor.classList.remove('active');
sourceUrlContainer.classList.remove('hide');
});
// Open source URL in a new tab if it's valid
sourceUrlText.addEventListener('click', () => {
const url = sourceUrlText.textContent.trim();
if (url.startsWith('http://') || url.startsWith('https://')) {
window.open(url, '_blank');
}
});
}
}
export { RecipeModal };

View File

@@ -0,0 +1,319 @@
// AlphabetBar.js - Component for alphabet filtering
import { getCurrentPageState, setCurrentPageType } from '../../state/index.js';
import { getStorageItem, setStorageItem } from '../../utils/storageHelpers.js';
import { resetAndReload } from '../../api/loraApi.js';
/**
* AlphabetBar class - Handles the alphabet filtering UI and interactions
*/
export class AlphabetBar {
constructor(pageType = 'loras') {
// Store the page type
this.pageType = pageType;
// Get the current page state
this.pageState = getCurrentPageState();
// Initialize letter counts
this.letterCounts = {};
// Initialize the component
this.initializeComponent();
}
/**
* Initialize the alphabet bar component
*/
async initializeComponent() {
// Get letter counts from API
await this.fetchLetterCounts();
// Initialize event listeners
this.initEventListeners();
// Restore the active letter filter from storage if available
this.restoreActiveLetterFilter();
// Restore collapse state from storage
this.restoreCollapseState();
// Update the toggle button indicator if there's an active letter filter
this.updateToggleIndicator();
}
/**
* Fetch letter counts from the API
*/
async fetchLetterCounts() {
try {
const response = await fetch('/api/loras/letter-counts');
if (!response.ok) {
throw new Error(`Failed to fetch letter counts: ${response.statusText}`);
}
const data = await response.json();
if (data.success && data.letter_counts) {
this.letterCounts = data.letter_counts;
// Update the count display in the UI
this.updateLetterCountsDisplay();
}
} catch (error) {
console.error('Error fetching letter counts:', error);
}
}
/**
* Update the letter counts display in the UI
*/
updateLetterCountsDisplay() {
const letterChips = document.querySelectorAll('.letter-chip');
letterChips.forEach(chip => {
const letter = chip.dataset.letter;
const count = this.letterCounts[letter] || 0;
// Update the title attribute for tooltip display
if (count > 0) {
chip.title = `${letter}: ${count} LoRAs`;
chip.classList.remove('disabled');
} else {
chip.title = `${letter}: No LoRAs`;
chip.classList.add('disabled');
}
// Keep the count span for backward compatibility
const countSpan = chip.querySelector('.count');
if (countSpan) {
countSpan.textContent = ` (${count})`;
}
});
}
/**
* Initialize event listeners for the alphabet bar
*/
initEventListeners() {
const alphabetBar = document.querySelector('.alphabet-bar');
const toggleButton = document.querySelector('.toggle-alphabet-bar');
const alphabetBarContainer = document.querySelector('.alphabet-bar-container');
if (alphabetBar) {
// Use event delegation for letter chips
alphabetBar.addEventListener('click', (e) => {
const letterChip = e.target.closest('.letter-chip');
if (letterChip && !letterChip.classList.contains('disabled')) {
this.handleLetterClick(letterChip);
}
});
// Add toggle button listener
if (toggleButton && alphabetBarContainer) {
toggleButton.addEventListener('click', () => {
alphabetBarContainer.classList.toggle('collapsed');
// If expanding and there's an active letter, scroll it into view
if (!alphabetBarContainer.classList.contains('collapsed')) {
this.scrollActiveLetterIntoView();
}
// Save collapse state to storage
setStorageItem(`${this.pageType}_alphabetBarCollapsed`,
alphabetBarContainer.classList.contains('collapsed'));
// Update toggle indicator
this.updateToggleIndicator();
});
}
// Add keyboard shortcut listeners
document.addEventListener('keydown', (e) => {
// Alt + letter shortcuts
if (e.altKey && !e.ctrlKey && !e.metaKey) {
const key = e.key.toUpperCase();
// Check if it's a letter A-Z
if (/^[A-Z]$/.test(key)) {
const letterChip = document.querySelector(`.letter-chip[data-letter="${key}"]`);
if (letterChip && !letterChip.classList.contains('disabled')) {
this.handleLetterClick(letterChip);
e.preventDefault();
}
}
// Special cases for non-letter filters
else if (e.key === '0' || e.key === ')') {
// Alt+0 for numbers (#)
const letterChip = document.querySelector('.letter-chip[data-letter="#"]');
if (letterChip && !letterChip.classList.contains('disabled')) {
this.handleLetterClick(letterChip);
e.preventDefault();
}
} else if (e.key === '2' || e.key === '@') {
// Alt+@ for special characters
const letterChip = document.querySelector('.letter-chip[data-letter="@"]');
if (letterChip && !letterChip.classList.contains('disabled')) {
this.handleLetterClick(letterChip);
e.preventDefault();
}
} else if (e.key === 'c' || e.key === 'C') {
// Alt+C for CJK characters
const letterChip = document.querySelector('.letter-chip[data-letter="漢"]');
if (letterChip && !letterChip.classList.contains('disabled')) {
this.handleLetterClick(letterChip);
e.preventDefault();
}
}
}
});
}
}
/**
* Restore the collapse state from storage
*/
restoreCollapseState() {
const alphabetBarContainer = document.querySelector('.alphabet-bar-container');
if (alphabetBarContainer) {
const isCollapsed = getStorageItem(`${this.pageType}_alphabetBarCollapsed`);
// If there's a stored preference, apply it
if (isCollapsed !== null) {
if (isCollapsed) {
alphabetBarContainer.classList.add('collapsed');
} else {
alphabetBarContainer.classList.remove('collapsed');
}
}
}
}
/**
* Handle letter chip click
* @param {HTMLElement} letterChip - The letter chip that was clicked
*/
handleLetterClick(letterChip) {
const letter = letterChip.dataset.letter;
const wasActive = letterChip.classList.contains('active');
// Remove active class from all letter chips
document.querySelectorAll('.letter-chip').forEach(chip => {
chip.classList.remove('active');
});
if (!wasActive) {
// Set the new active letter
letterChip.classList.add('active');
this.pageState.activeLetterFilter = letter;
// Save to storage
setStorageItem(`${this.pageType}_activeLetterFilter`, letter);
} else {
// Clear the active letter filter
this.pageState.activeLetterFilter = null;
// Remove from storage
setStorageItem(`${this.pageType}_activeLetterFilter`, null);
}
// Update visual indicator on toggle button
this.updateToggleIndicator();
// Trigger a reload with the new filter
resetAndReload(true);
}
/**
* Restore the active letter filter from storage
*/
restoreActiveLetterFilter() {
const activeLetterFilter = getStorageItem(`${this.pageType}_activeLetterFilter`);
if (activeLetterFilter) {
const letterChip = document.querySelector(`.letter-chip[data-letter="${activeLetterFilter}"]`);
if (letterChip && !letterChip.classList.contains('disabled')) {
letterChip.classList.add('active');
this.pageState.activeLetterFilter = activeLetterFilter;
// Scroll the active letter into view if the alphabet bar is expanded
this.scrollActiveLetterIntoView();
}
}
}
/**
* Clear the active letter filter
*/
clearActiveLetterFilter() {
// Remove active class from all letter chips
document.querySelectorAll('.letter-chip').forEach(chip => {
chip.classList.remove('active');
});
// Clear the active letter filter
this.pageState.activeLetterFilter = null;
// Remove from storage
setStorageItem(`${this.pageType}_activeLetterFilter`, null);
// Update the toggle button indicator
this.updateToggleIndicator();
}
/**
* Update letter counts with new data
* @param {Object} newCounts - New letter count data
*/
updateCounts(newCounts) {
this.letterCounts = { ...newCounts };
this.updateLetterCountsDisplay();
}
/**
* Update the toggle button visual indicator based on active filter
*/
updateToggleIndicator() {
const toggleButton = document.querySelector('.toggle-alphabet-bar');
const hasActiveFilter = this.pageState.activeLetterFilter !== null;
if (toggleButton) {
if (hasActiveFilter) {
toggleButton.classList.add('has-active-letter');
} else {
toggleButton.classList.remove('has-active-letter');
}
}
}
/**
* Scroll the active letter into view if the alphabet bar is expanded
*/
scrollActiveLetterIntoView() {
if (!this.pageState.activeLetterFilter) return;
const alphabetBarContainer = document.querySelector('.alphabet-bar-container');
if (alphabetBarContainer) {
const activeLetterChip = document.querySelector(`.letter-chip.active`);
if (activeLetterChip) {
// Use a small timeout to ensure the alphabet bar is fully expanded
setTimeout(() => {
activeLetterChip.scrollIntoView({
behavior: 'smooth',
block: 'center',
inline: 'center'
});
}, 300);
}
}
}
}

View File

@@ -0,0 +1,14 @@
// Alphabet component index file
import { AlphabetBar } from './AlphabetBar.js';
// Export the class
export { AlphabetBar };
/**
* Factory function to create the appropriate alphabet bar
* @param {string} pageType - The type of page ('loras' or 'checkpoints')
* @returns {AlphabetBar} - The alphabet bar instance
*/
export function createAlphabetBar(pageType) {
return new AlphabetBar(pageType);
}

View File

@@ -5,7 +5,7 @@
import { showToast } from '../../utils/uiHelpers.js';
import { BASE_MODELS } from '../../utils/constants.js';
import { updateCheckpointCard } from '../../utils/cardUpdater.js';
import { saveModelMetadata } from '../../api/checkpointApi.js';
import { saveModelMetadata, renameCheckpointFile } from '../../api/checkpointApi.js';
/**
* Set up model name editing functionality
@@ -17,6 +17,9 @@ export function setupModelNameEditing(filePath) {
if (!modelNameContent || !editBtn) return;
// Store the file path in a data attribute for later use
modelNameContent.dataset.filePath = filePath;
// Show edit button on hover
const modelNameHeader = document.querySelector('.model-name-header');
modelNameHeader.addEventListener('mouseenter', () => {
@@ -24,14 +27,17 @@ export function setupModelNameEditing(filePath) {
});
modelNameHeader.addEventListener('mouseleave', () => {
if (!modelNameContent.getAttribute('data-editing')) {
if (!modelNameHeader.classList.contains('editing')) {
editBtn.classList.remove('visible');
}
});
// Handle edit button click
editBtn.addEventListener('click', () => {
modelNameContent.setAttribute('data-editing', 'true');
modelNameHeader.classList.add('editing');
modelNameContent.setAttribute('contenteditable', 'true');
// Store original value for comparison later
modelNameContent.dataset.originalValue = modelNameContent.textContent.trim();
modelNameContent.focus();
// Place cursor at the end
@@ -47,33 +53,25 @@ export function setupModelNameEditing(filePath) {
editBtn.classList.add('visible');
});
// Handle focus out
modelNameContent.addEventListener('blur', function() {
this.removeAttribute('data-editing');
editBtn.classList.remove('visible');
if (this.textContent.trim() === '') {
// Restore original model name if empty
// Use the passed filePath to find the card
const checkpointCard = document.querySelector(`.checkpoint-card[data-filepath="${filePath}"]`);
if (checkpointCard) {
this.textContent = checkpointCard.dataset.model_name;
}
}
});
// Handle enter key
// Handle keyboard events in edit mode
modelNameContent.addEventListener('keydown', function(e) {
if (!this.getAttribute('contenteditable')) return;
if (e.key === 'Enter') {
e.preventDefault();
// Use the passed filePath
saveModelName(filePath);
this.blur();
this.blur(); // Trigger save on Enter
} else if (e.key === 'Escape') {
e.preventDefault();
// Restore original value
this.textContent = this.dataset.originalValue;
exitEditMode();
}
});
// Limit model name length
modelNameContent.addEventListener('input', function() {
if (!this.getAttribute('contenteditable')) return;
if (this.textContent.length > 100) {
this.textContent = this.textContent.substring(0, 100);
// Place cursor at the end
@@ -87,44 +85,59 @@ export function setupModelNameEditing(filePath) {
showToast('Model name is limited to 100 characters', 'warning');
}
});
}
/**
* Save model name
* @param {string} filePath - File path
*/
async function saveModelName(filePath) {
const modelNameElement = document.querySelector('.model-name-content');
const newModelName = modelNameElement.textContent.trim();
// Validate model name
if (!newModelName) {
showToast('Model name cannot be empty', 'error');
return;
}
// Handle focus out - save changes
modelNameContent.addEventListener('blur', async function() {
if (!this.getAttribute('contenteditable')) return;
const newModelName = this.textContent.trim();
const originalValue = this.dataset.originalValue;
// Basic validation
if (!newModelName) {
// Restore original value if empty
this.textContent = originalValue;
showToast('Model name cannot be empty', 'error');
exitEditMode();
return;
}
if (newModelName === originalValue) {
// No changes, just exit edit mode
exitEditMode();
return;
}
try {
// Get the file path from the dataset
const filePath = this.dataset.filePath;
await saveModelMetadata(filePath, { model_name: newModelName });
// Update the corresponding checkpoint card's dataset and display
updateCheckpointCard(filePath, { model_name: newModelName });
// BUGFIX: Directly update the card's dataset.name attribute to ensure
// it's correctly read when reopening the modal
const checkpointCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (checkpointCard) {
checkpointCard.dataset.name = newModelName;
}
showToast('Model name updated successfully', 'success');
} catch (error) {
console.error('Error updating model name:', error);
this.textContent = originalValue; // Restore original model name
showToast('Failed to update model name', 'error');
} finally {
exitEditMode();
}
});
// Check if model name is too long
if (newModelName.length > 100) {
showToast('Model name is too long (maximum 100 characters)', 'error');
// Truncate the displayed text
modelNameElement.textContent = newModelName.substring(0, 100);
return;
}
try {
await saveModelMetadata(filePath, { model_name: newModelName });
// Update the card with the new model name
updateCheckpointCard(filePath, { name: newModelName });
showToast('Model name updated successfully', 'success');
// No need to reload the entire page
// setTimeout(() => {
// window.location.reload();
// }, 1500);
} catch (error) {
showToast('Failed to update model name', 'error');
function exitEditMode() {
modelNameContent.removeAttribute('contenteditable');
modelNameHeader.classList.remove('editing');
editBtn.classList.remove('visible');
}
}
@@ -138,6 +151,9 @@ export function setupBaseModelEditing(filePath) {
if (!baseModelContent || !editBtn) return;
// Store the file path in a data attribute for later use
baseModelContent.dataset.filePath = filePath;
// Show edit button on hover
const baseModelDisplay = document.querySelector('.base-model-display');
baseModelDisplay.addEventListener('mouseenter', () => {
@@ -171,12 +187,13 @@ export function setupBaseModelEditing(filePath) {
'Stable Diffusion 2.x': [BASE_MODELS.SD_2_0, BASE_MODELS.SD_2_1],
'Stable Diffusion 3.x': [BASE_MODELS.SD_3, BASE_MODELS.SD_3_5, BASE_MODELS.SD_3_5_MEDIUM, BASE_MODELS.SD_3_5_LARGE, BASE_MODELS.SD_3_5_LARGE_TURBO],
'SDXL': [BASE_MODELS.SDXL, BASE_MODELS.SDXL_LIGHTNING, BASE_MODELS.SDXL_HYPER],
'Video Models': [BASE_MODELS.SVD, BASE_MODELS.WAN_VIDEO, BASE_MODELS.HUNYUAN_VIDEO],
'Video Models': [BASE_MODELS.SVD, BASE_MODELS.LTXV, BASE_MODELS.WAN_VIDEO, BASE_MODELS.HUNYUAN_VIDEO],
'Other Models': [
BASE_MODELS.FLUX_1_D, BASE_MODELS.FLUX_1_S, BASE_MODELS.AURAFLOW,
BASE_MODELS.PIXART_A, BASE_MODELS.PIXART_E, BASE_MODELS.HUNYUAN_1,
BASE_MODELS.LUMINA, BASE_MODELS.KOLORS, BASE_MODELS.NOOBAI,
BASE_MODELS.ILLUSTRIOUS, BASE_MODELS.PONY, BASE_MODELS.UNKNOWN
BASE_MODELS.ILLUSTRIOUS, BASE_MODELS.PONY, BASE_MODELS.HIDREAM,
BASE_MODELS.UNKNOWN
]
};
@@ -302,6 +319,9 @@ export function setupFileNameEditing(filePath) {
if (!fileNameContent || !editBtn) return;
// Store the original file path
fileNameContent.dataset.filePath = filePath;
// Show edit button on hover
const fileNameWrapper = document.querySelector('.file-name-wrapper');
fileNameWrapper.addEventListener('mouseenter', () => {
@@ -399,19 +419,8 @@ export function setupFileNameEditing(filePath) {
try {
// Use the passed filePath (which includes the original filename)
// Call API to rename the file
const response = await fetch('/api/rename_checkpoint', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath, // Use the full original path
new_file_name: newFileName
})
});
const result = await response.json();
// Call API to rename the file using the new function from checkpointApi.js
const result = await renameCheckpointFile(filePath, newFileName);
if (result.success) {
showToast('File name updated successfully', 'success');

View File

@@ -2,44 +2,22 @@
* ShowcaseView.js
* Handles showcase content (images, videos) display for checkpoint modal
*/
import { showToast, copyToClipboard } from '../../utils/uiHelpers.js';
import {
toggleShowcase,
setupShowcaseScroll,
scrollToTop
} from '../../utils/uiHelpers.js';
import { state } from '../../state/index.js';
import { NSFW_LEVELS } from '../../utils/constants.js';
/**
* Get the local URL for an example image if available
* @param {Object} img - Image object
* @param {number} index - Image index
* @param {string} modelHash - Model hash
* @returns {string|null} - Local URL or null if not available
*/
function getLocalExampleImageUrl(img, index, modelHash) {
if (!modelHash) return null;
// Get remote extension
const remoteExt = (img.url || '').split('?')[0].split('.').pop().toLowerCase();
// If it's a video (mp4), use that extension
if (remoteExt === 'mp4') {
return `/example_images_static/${modelHash}/image_${index + 1}.mp4`;
}
// For images, check if optimization is enabled (defaults to true)
const optimizeImages = state.settings.optimizeExampleImages !== false;
// Use .webp for images if optimization enabled, otherwise use original extension
const extension = optimizeImages ? 'webp' : remoteExt;
return `/example_images_static/${modelHash}/image_${index + 1}.${extension}`;
}
/**
* Render showcase content
* @param {Array} images - Array of images/videos to show
* @param {string} modelHash - Model hash for identifying local files
* @param {Array} exampleFiles - Local example files already fetched
* @returns {string} HTML content
*/
export function renderShowcaseContent(images, modelHash) {
export function renderShowcaseContent(images, exampleFiles = []) {
if (!images?.length) return '<div class="no-examples">No example images available</div>';
// Filter images based on SFW setting
@@ -82,9 +60,85 @@ export function renderShowcaseContent(images, modelHash) {
${hiddenNotification}
<div class="carousel-container">
${filteredImages.map((img, index) => {
// Try to get local URL for the example image
const localUrl = getLocalExampleImageUrl(img, index, modelHash);
return generateMediaWrapper(img, localUrl);
// Find matching file in our list of actual files
let localFile = null;
if (exampleFiles.length > 0) {
// Try to find the corresponding file by index first
localFile = exampleFiles.find(file => {
const match = file.name.match(/image_(\d+)\./);
return match && parseInt(match[1]) === index;
});
// If not found by index, just use the same position in the array if available
if (!localFile && index < exampleFiles.length) {
localFile = exampleFiles[index];
}
}
const remoteUrl = img.url || '';
const localUrl = localFile ? localFile.path : '';
const isVideo = localFile ? localFile.is_video :
remoteUrl.endsWith('.mp4') || remoteUrl.endsWith('.webm');
// Calculate appropriate aspect ratio
const aspectRatio = (img.height / img.width) * 100;
const containerWidth = 800; // modal content maximum width
const minHeightPercent = 40;
const maxHeightPercent = (window.innerHeight * 0.6 / containerWidth) * 100;
const heightPercent = Math.max(
minHeightPercent,
Math.min(maxHeightPercent, aspectRatio)
);
// Check if media should be blurred
const nsfwLevel = img.nsfwLevel !== undefined ? img.nsfwLevel : 0;
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
// Determine NSFW warning text based on level
let nsfwText = "Mature Content";
if (nsfwLevel >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (nsfwLevel >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (nsfwLevel >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
// Extract metadata from the image
const meta = img.meta || {};
const prompt = meta.prompt || '';
const negativePrompt = meta.negative_prompt || meta.negativePrompt || '';
const size = meta.Size || `${img.width}x${img.height}`;
const seed = meta.seed || '';
const model = meta.Model || '';
const steps = meta.steps || '';
const sampler = meta.sampler || '';
const cfgScale = meta.cfgScale || '';
const clipSkip = meta.clipSkip || '';
// Check if we have any meaningful generation parameters
const hasParams = seed || model || steps || sampler || cfgScale || clipSkip;
const hasPrompts = prompt || negativePrompt;
// Create metadata panel content
const metadataPanel = generateMetadataPanel(
hasParams, hasPrompts,
prompt, negativePrompt,
size, seed, model, steps, sampler, cfgScale, clipSkip
);
// Check if this is a video or image
if (isVideo) {
return generateVideoWrapper(
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
localUrl, remoteUrl
);
}
return generateImageWrapper(
img, heightPercent, shouldBlur, nsfwText, metadataPanel,
localUrl, remoteUrl
);
}).join('')}
</div>
</div>
@@ -96,11 +150,8 @@ export function renderShowcaseContent(images, modelHash) {
* @param {Object} media - Media object with image or video data
* @returns {string} HTML content
*/
function generateMediaWrapper(media, localUrl = null) {
// Calculate appropriate aspect ratio:
// 1. Keep original aspect ratio
// 2. Limit maximum height to 60% of viewport height
// 3. Ensure minimum height is 40% of container width
function generateMediaWrapper(media, urls) {
// Calculate appropriate aspect ratio
const aspectRatio = (media.height / media.width) * 100;
const containerWidth = 800; // modal content maximum width
const minHeightPercent = 40;
@@ -149,10 +200,10 @@ function generateMediaWrapper(media, localUrl = null) {
// Check if this is a video or image
if (media.type === 'video') {
return generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl);
return generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, urls);
}
return generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl);
return generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, urls);
}
/**
@@ -225,7 +276,7 @@ function generateMetadataPanel(hasParams, hasPrompts, prompt, negativePrompt, si
/**
* Generate video wrapper HTML
*/
function generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl = null) {
function generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl) {
return `
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%">
${shouldBlur ? `
@@ -236,9 +287,9 @@ function generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metada
<video controls autoplay muted loop crossorigin="anonymous"
referrerpolicy="no-referrer"
data-local-src="${localUrl || ''}"
data-remote-src="${media.url}"
data-remote-src="${remoteUrl}"
class="lazy ${shouldBlur ? 'blurred' : ''}">
<source data-local-src="${localUrl || ''}" data-remote-src="${media.url}" type="video/mp4">
<source data-local-src="${localUrl || ''}" data-remote-src="${remoteUrl}" type="video/mp4">
Your browser does not support video playback
</video>
${shouldBlur ? `
@@ -257,7 +308,7 @@ function generateVideoWrapper(media, heightPercent, shouldBlur, nsfwText, metada
/**
* Generate image wrapper HTML
*/
function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl = null) {
function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metadataPanel, localUrl, remoteUrl) {
return `
<div class="media-wrapper ${shouldBlur ? 'nsfw-media-wrapper' : ''}" style="padding-bottom: ${heightPercent}%">
${shouldBlur ? `
@@ -265,8 +316,8 @@ function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metada
<i class="fas fa-eye"></i>
</button>
` : ''}
<img data-local-src="${localUrl || ''}"
data-remote-src="${media.url}"
<img data-local-src="${localUrl || ''}"
data-remote-src="${remoteUrl}"
alt="Preview"
crossorigin="anonymous"
referrerpolicy="no-referrer"
@@ -286,296 +337,10 @@ function generateImageWrapper(media, heightPercent, shouldBlur, nsfwText, metada
`;
}
/**
* Toggle showcase expansion
*/
export function toggleShowcase(element) {
const carousel = element.nextElementSibling;
const isCollapsed = carousel.classList.contains('collapsed');
const indicator = element.querySelector('span');
const icon = element.querySelector('i');
carousel.classList.toggle('collapsed');
if (isCollapsed) {
const count = carousel.querySelectorAll('.media-wrapper').length;
indicator.textContent = `Scroll or click to hide examples`;
icon.classList.replace('fa-chevron-down', 'fa-chevron-up');
initLazyLoading(carousel);
// Initialize NSFW content blur toggle handlers
initNsfwBlurHandlers(carousel);
// Initialize metadata panel interaction handlers
initMetadataPanelHandlers(carousel);
} else {
const count = carousel.querySelectorAll('.media-wrapper').length;
indicator.textContent = `Scroll or click to show ${count} examples`;
icon.classList.replace('fa-chevron-up', 'fa-chevron-down');
}
}
// Use the shared setupShowcaseScroll function with the correct modal ID
export { setupShowcaseScroll, scrollToTop, toggleShowcase };
/**
* Initialize metadata panel interaction handlers
*/
function initMetadataPanelHandlers(container) {
const mediaWrappers = container.querySelectorAll('.media-wrapper');
mediaWrappers.forEach(wrapper => {
const metadataPanel = wrapper.querySelector('.image-metadata-panel');
if (!metadataPanel) return;
// Prevent events from bubbling
metadataPanel.addEventListener('click', (e) => {
e.stopPropagation();
});
// Handle copy prompt buttons
const copyBtns = metadataPanel.querySelectorAll('.copy-prompt-btn');
copyBtns.forEach(copyBtn => {
const promptIndex = copyBtn.dataset.promptIndex;
const promptElement = wrapper.querySelector(`#prompt-${promptIndex}`);
copyBtn.addEventListener('click', async (e) => {
e.stopPropagation();
if (!promptElement) return;
try {
await copyToClipboard(promptElement.textContent, 'Prompt copied to clipboard');
} catch (err) {
console.error('Copy failed:', err);
showToast('Copy failed', 'error');
}
});
});
// Prevent panel scroll from causing modal scroll
metadataPanel.addEventListener('wheel', (e) => {
e.stopPropagation();
});
});
}
/**
* Initialize blur toggle handlers
*/
function initNsfwBlurHandlers(container) {
// Handle toggle blur buttons
const toggleButtons = container.querySelectorAll('.toggle-blur-btn');
toggleButtons.forEach(btn => {
btn.addEventListener('click', (e) => {
e.stopPropagation();
const wrapper = btn.closest('.media-wrapper');
const media = wrapper.querySelector('img, video');
const isBlurred = media.classList.toggle('blurred');
const icon = btn.querySelector('i');
// Update the icon based on blur state
if (isBlurred) {
icon.className = 'fas fa-eye';
} else {
icon.className = 'fas fa-eye-slash';
}
// Toggle the overlay visibility
const overlay = wrapper.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = isBlurred ? 'flex' : 'none';
}
});
});
// Handle "Show" buttons in overlays
const showButtons = container.querySelectorAll('.show-content-btn');
showButtons.forEach(btn => {
btn.addEventListener('click', (e) => {
e.stopPropagation();
const wrapper = btn.closest('.media-wrapper');
const media = wrapper.querySelector('img, video');
media.classList.remove('blurred');
// Update the toggle button icon
const toggleBtn = wrapper.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
// Hide the overlay
const overlay = wrapper.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = 'none';
}
});
});
}
/**
* Initialize lazy loading for images and videos
*/
function initLazyLoading(container) {
const lazyElements = container.querySelectorAll('.lazy');
const lazyLoad = (element) => {
const localSrc = element.dataset.localSrc;
const remoteSrc = element.dataset.remoteSrc;
// Check if element is an image or video
if (element.tagName.toLowerCase() === 'video') {
// Try local first, then remote
tryLocalOrFallbackToRemote(element, localSrc, remoteSrc);
} else {
// For images, we'll use an Image object to test if local file exists
tryLocalImageOrFallbackToRemote(element, localSrc, remoteSrc);
}
element.classList.remove('lazy');
};
// Try to load local image first, fall back to remote if local fails
const tryLocalImageOrFallbackToRemote = (imgElement, localSrc, remoteSrc) => {
// Only try local if we have a local path
if (localSrc) {
const testImg = new Image();
testImg.onload = () => {
// Local image loaded successfully
imgElement.src = localSrc;
};
testImg.onerror = () => {
// Local image failed, use remote
imgElement.src = remoteSrc;
};
// Start loading test image
testImg.src = localSrc;
} else {
// No local path, use remote directly
imgElement.src = remoteSrc;
}
};
// Try to load local video first, fall back to remote if local fails
const tryLocalOrFallbackToRemote = (videoElement, localSrc, remoteSrc) => {
// Only try local if we have a local path
if (localSrc) {
// Try to fetch local file headers to see if it exists
fetch(localSrc, { method: 'HEAD' })
.then(response => {
if (response.ok) {
// Local video exists, use it
videoElement.src = localSrc;
videoElement.querySelector('source').src = localSrc;
} else {
// Local video doesn't exist, use remote
videoElement.src = remoteSrc;
videoElement.querySelector('source').src = remoteSrc;
}
videoElement.load();
})
.catch(() => {
// Error fetching, use remote
videoElement.src = remoteSrc;
videoElement.querySelector('source').src = remoteSrc;
videoElement.load();
});
} else {
// No local path, use remote directly
videoElement.src = remoteSrc;
videoElement.querySelector('source').src = remoteSrc;
videoElement.load();
}
};
const observer = new IntersectionObserver((entries) => {
entries.forEach(entry => {
if (entry.isIntersecting) {
lazyLoad(entry.target);
observer.unobserve(entry.target);
}
});
});
lazyElements.forEach(element => observer.observe(element));
}
/**
* Set up showcase scroll functionality
*/
export function setupShowcaseScroll() {
// Listen for wheel events
document.addEventListener('wheel', (event) => {
const modalContent = document.querySelector('#checkpointModal .modal-content');
if (!modalContent) return;
const showcase = modalContent.querySelector('.showcase-section');
if (!showcase) return;
const carousel = showcase.querySelector('.carousel');
const scrollIndicator = showcase.querySelector('.scroll-indicator');
if (carousel?.classList.contains('collapsed') && event.deltaY > 0) {
const isNearBottom = modalContent.scrollHeight - modalContent.scrollTop - modalContent.clientHeight < 100;
if (isNearBottom) {
toggleShowcase(scrollIndicator);
event.preventDefault();
}
}
}, { passive: false });
// Use MutationObserver to set up back-to-top button when modal content is added
const observer = new MutationObserver((mutations) => {
for (const mutation of mutations) {
if (mutation.type === 'childList' && mutation.addedNodes.length) {
const checkpointModal = document.getElementById('checkpointModal');
if (checkpointModal && checkpointModal.querySelector('.modal-content')) {
setupBackToTopButton(checkpointModal.querySelector('.modal-content'));
}
}
}
});
// Start observing the document body for changes
observer.observe(document.body, { childList: true, subtree: true });
// Also try to set up the button immediately in case the modal is already open
const modalContent = document.querySelector('#checkpointModal .modal-content');
if (modalContent) {
setupBackToTopButton(modalContent);
}
}
/**
* Set up back-to-top button
*/
function setupBackToTopButton(modalContent) {
// Remove any existing scroll listeners to avoid duplicates
modalContent.onscroll = null;
// Add new scroll listener
modalContent.addEventListener('scroll', () => {
const backToTopBtn = modalContent.querySelector('.back-to-top');
if (backToTopBtn) {
if (modalContent.scrollTop > 300) {
backToTopBtn.classList.add('visible');
} else {
backToTopBtn.classList.remove('visible');
}
}
});
// Trigger a scroll event to check initial position
modalContent.dispatchEvent(new Event('scroll'));
}
/**
* Scroll to top of modal content
*/
export function scrollToTop(button) {
const modalContent = button.closest('.modal-content');
if (modalContent) {
modalContent.scrollTo({
top: 0,
behavior: 'smooth'
});
}
}
// Initialize the showcase scroll when this module is imported
document.addEventListener('DOMContentLoaded', () => {
setupShowcaseScroll('checkpointModal');
});

View File

@@ -3,8 +3,7 @@
*
* Modularized checkpoint modal component that handles checkpoint model details display
*/
import { showToast } from '../../utils/uiHelpers.js';
import { state } from '../../state/index.js';
import { showToast, getExampleImageFiles, initLazyLoading, initNsfwBlurHandlers, initMetadataPanelHandlers } from '../../utils/uiHelpers.js';
import { modalManager } from '../../managers/ModalManager.js';
import { renderShowcaseContent, toggleShowcase, setupShowcaseScroll, scrollToTop } from './ShowcaseView.js';
import { setupTabSwitching, loadModelDescription } from './ModelDescription.js';
@@ -16,6 +15,7 @@ import {
import { saveModelMetadata } from '../../api/checkpointApi.js';
import { renderCompactTags, setupTagTooltip, formatFileSize } from './utils.js';
import { updateCheckpointCard } from '../../utils/cardUpdater.js';
import { state } from '../../state/index.js';
/**
* Display the checkpoint modal with the given checkpoint data
@@ -27,11 +27,25 @@ export function showCheckpointModal(checkpoint) {
<button class="close" onclick="modalManager.closeModal('checkpointModal')">&times;</button>
<header class="modal-header">
<div class="model-name-header">
<h2 class="model-name-content" contenteditable="true" spellcheck="false">${checkpoint.model_name || 'Checkpoint Details'}</h2>
<h2 class="model-name-content">${checkpoint.model_name || 'Checkpoint Details'}</h2>
<button class="edit-model-name-btn" title="Edit model name">
<i class="fas fa-pencil-alt"></i>
</button>
</div>
${checkpoint.civitai?.creator ? `
<div class="creator-info">
${checkpoint.civitai.creator.image ?
`<div class="creator-avatar">
<img src="${checkpoint.civitai.creator.image}" alt="${checkpoint.civitai.creator.username}" onerror="this.onerror=null; this.src='static/icons/user-placeholder.png';">
</div>` :
`<div class="creator-avatar creator-placeholder">
<i class="fas fa-user"></i>
</div>`
}
<span class="creator-username">${checkpoint.civitai.creator.username}</span>
</div>` : ''}
${renderCompactTags(checkpoint.tags || [])}
</header>
@@ -83,7 +97,7 @@ export function showCheckpointModal(checkpoint) {
</div>
<div class="info-item full-width">
<label>About this version</label>
<div class="description-text">${checkpoint.description || 'N/A'}</div>
<div class="description-text">${checkpoint.civitai?.description || 'N/A'}</div>
</div>
</div>
</div>
@@ -96,7 +110,9 @@ export function showCheckpointModal(checkpoint) {
<div class="tab-content">
<div id="showcase-tab" class="tab-pane active">
${renderShowcaseContent(checkpoint.civitai?.images || [], checkpoint.sha256)}
<div class="recipes-loading">
<i class="fas fa-spinner fa-spin"></i> Loading recipes...
</div>
</div>
<div id="description-tab" class="tab-pane">
@@ -132,6 +148,69 @@ export function showCheckpointModal(checkpoint) {
if (checkpoint.civitai?.modelId && !checkpoint.modelDescription) {
loadModelDescription(checkpoint.civitai.modelId, checkpoint.file_path);
}
// Load example images asynchronously
loadExampleImages(checkpoint.civitai?.images, checkpoint.sha256, checkpoint.file_path);
}
/**
* Load example images asynchronously
* @param {Array} images - Array of image objects
* @param {string} modelHash - Model hash for fetching local files
* @param {string} filePath - File path for fetching local files
*/
async function loadExampleImages(images, modelHash, filePath) {
try {
const showcaseTab = document.getElementById('showcase-tab');
if (!showcaseTab) return;
// First fetch local example files
let localFiles = [];
try {
// Choose endpoint based on centralized examples setting
const useCentralized = state.global.settings.useCentralizedExamples !== false;
const endpoint = useCentralized ? '/api/example-image-files' : '/api/model-example-files';
// Use different params based on endpoint
const params = useCentralized ?
`model_hash=${modelHash}` :
`file_path=${encodeURIComponent(filePath)}`;
const response = await fetch(`${endpoint}?${params}`);
const result = await response.json();
if (result.success) {
localFiles = result.files;
}
} catch (error) {
console.error("Failed to get example files:", error);
}
// Then render with both remote images and local files
showcaseTab.innerHTML = renderShowcaseContent(images, localFiles);
// Re-initialize the showcase event listeners
const carousel = showcaseTab.querySelector('.carousel');
if (carousel) {
// Only initialize if we actually have examples and they're expanded
if (!carousel.classList.contains('collapsed')) {
initLazyLoading(carousel);
initNsfwBlurHandlers(carousel);
initMetadataPanelHandlers(carousel);
}
}
} catch (error) {
console.error('Error loading example images:', error);
const showcaseTab = document.getElementById('showcase-tab');
if (showcaseTab) {
showcaseTab.innerHTML = `
<div class="error-message">
<i class="fas fa-exclamation-circle"></i>
Error loading example images
</div>
`;
}
}
}
/**

View File

@@ -33,8 +33,8 @@ export class CheckpointsControls extends PageControls {
return await resetAndReload(updateFolders);
},
refreshModels: async () => {
return await refreshCheckpoints();
refreshModels: async (fullRebuild = false) => {
return await refreshCheckpoints(fullRebuild);
},
// Add fetch from Civitai functionality for checkpoints

Some files were not shown because too many files have changed in this diff Show More