Compare commits

...

271 Commits

Author SHA1 Message Date
Will Miao
cede387783 Bump version to 0.8.6 in pyproject.toml 2025-04-14 08:42:00 +08:00
Will Miao
b206427d50 feat: Update README to include enhanced checkpoint management features and improved initial loading details 2025-04-14 08:40:42 +08:00
Will Miao
47d96e2037 feat: Simplify recipe page initialization and enhance error handling for recipe cache loading 2025-04-14 07:03:34 +08:00
Will Miao
e51f7cc1a7 feat: Enhance checkpoint download manager to save active folder preference and update UI accordingly 2025-04-13 22:12:18 +08:00
Will Miao
40381d4b11 feat: Optimize session management and enhance download functionality with resumable support 2025-04-13 21:51:21 +08:00
Will Miao
76fc9e5a3d feat: Add WebSocket support for checkpoint download progress and update related components 2025-04-13 21:31:01 +08:00
Will Miao
9822f2c614 feat: Add Civitai model version retrieval for Checkpoints and update error handling in download managers 2025-04-13 20:36:19 +08:00
Will Miao
8854334ab5 Add tip images 2025-04-13 18:46:44 +08:00
Will Miao
53080844d2 feat: Refactor progress bar classes for initialization component to improve clarity and avoid conflicts 2025-04-13 18:42:36 +08:00
Will Miao
76fd722e33 feat: Improve card layout by adding overflow hidden and fixing flexbox sizing issues 2025-04-13 18:20:15 +08:00
Will Miao
fa27513f76 feat: Enhance infinite scroll functionality with improved observer settings and scroll event handling 2025-04-13 17:58:14 +08:00
Will Miao
72c6f91130 feat: Update initialization component with loading progress and tips carousel 2025-04-13 14:03:02 +08:00
Will Miao
5918f35b8b feat: Add keyboard shortcuts for search input focus and selection 2025-04-13 13:12:32 +08:00
Will Miao
0b11e6e6d0 feat: Enhance initialization component with progress tracking and UI improvements 2025-04-13 12:58:38 +08:00
Will Miao
a043b487bd feat: Add initialization progress WebSocket and UI components
- Implement WebSocket route for initialization progress updates
- Create initialization component with progress bar and stages
- Add styles for initialization UI
- Update base template to include initialization component
- Enhance model scanner to broadcast progress during initialization
2025-04-13 10:41:27 +08:00
pixelpaws
3982489e67 Merge pull request #97 from willmiao/dev
feat: Enhance checkpoint handling by initializing paths and adding st…
2025-04-12 19:10:13 +08:00
Will Miao
5f3c515323 feat: Enhance checkpoint handling by initializing paths and adding static routes 2025-04-12 19:06:17 +08:00
pixelpaws
6e1297d734 Merge pull request #96 from willmiao/dev
Dev
2025-04-12 17:01:07 +08:00
Will Miao
8f3cbdd257 fix: Simplify session item retrieval in loadMoreModels function 2025-04-12 16:54:27 +08:00
Will Miao
2fc06ae64e Refactor file name update in Lora card
- Updated the setupFileNameEditing function to pass the new file name in the updates object when calling updateLoraCard.
- Removed the page reload after file name change to improve user experience.
- Enhanced the updateLoraCard function to handle the 'file_name' update, ensuring the dataset reflects the new file name correctly.
2025-04-12 16:35:35 +08:00
Will Miao
515aa1d2bd fix: Improve error logging and update lora monitor path handling 2025-04-12 16:24:29 +08:00
Will Miao
ff7a36394a refactor: Optimize event handling for folder tags using delegation 2025-04-12 16:15:29 +08:00
Will Miao
5261ab249a Fix checkpoints sort_by 2025-04-12 13:39:32 +08:00
Will Miao
c3192351da feat: Add support for reading SHA256 from .sha256 file in get_file_info function 2025-04-12 11:59:40 +08:00
Will Miao
ce30d067a6 feat: Import and expose loadMoreLoras function in LoraPageManager 2025-04-12 11:46:26 +08:00
Will Miao
e84a8a72c5 feat: Add save metadata route and update checkpoint card functionality 2025-04-12 11:18:21 +08:00
Will Miao
10a4fe04d1 refactor: Update API endpoint for saving model metadata to use consistent route structure 2025-04-12 09:03:34 +08:00
Will Miao
d5ce6441e3 refactor: Simplify service initialization in LoraRoutes and RecipeRoutes, and adjust logging level in ServiceRegistry 2025-04-12 09:01:09 +08:00
Will Miao
a8d21fb1d6 refactor: Remove unused service imports and add new route for scanning LoRA files 2025-04-12 07:49:11 +08:00
Will Miao
9277d8d8f8 refactor: Disable file monitoring functionality with ENABLE_FILE_MONITORING flag 2025-04-12 06:47:47 +08:00
Will Miao
0618541527 checkpoint 2025-04-11 20:22:12 +08:00
Will Miao
1db49a4dd4 refactor: Enhance checkpoint download functionality with new modal and manager integration 2025-04-11 18:25:37 +08:00
Will Miao
3df96034a1 refactor: Consolidate model handling functions into baseModelApi for better code reuse and organization 2025-04-11 14:35:56 +08:00
Will Miao
e991dc061d refactor: Implement common endpoint handlers for model management in ModelRouteUtils and update routes in CheckpointsRoutes 2025-04-11 12:06:05 +08:00
Will Miao
56670066c7 refactor: Optimize preview image handling by converting to webp format and improving error logging 2025-04-11 11:17:49 +08:00
Will Miao
31d27ff3fa refactor: Extract model-related utility functions into ModelRouteUtils for better code organization 2025-04-11 10:54:19 +08:00
Will Miao
297ff0dd25 refactor: Improve download handling for previews and optimize image conversion in DownloadManager 2025-04-11 09:00:58 +08:00
Will Miao
b0a5b48fb2 refactor: Enhance preview file handling and add update_preview_in_cache method for ModelScanner 2025-04-11 08:43:21 +08:00
Will Miao
ac244e6ad9 refactor: Replace hardcoded image width with CARD_PREVIEW_WIDTH constant for consistency 2025-04-11 08:19:19 +08:00
Will Miao
7393e92b21 refactor: Consolidate preview file extensions into constants for improved maintainability 2025-04-11 06:19:15 +08:00
Will Miao
86810d9f03 refactor: Remove move_model method from LoraScanner class to streamline code 2025-04-11 06:05:19 +08:00
Will Miao
18aa8d11ad refactor: Remove showToast call from clearCustomFilter method in LorasControls 2025-04-11 05:59:32 +08:00
Will Miao
fafec56f09 refactor: Rename update_single_lora_cache to update_single_model_cache for consistency 2025-04-11 05:52:56 +08:00
Will Miao
129ca9da81 feat: Implement checkpoint modal functionality with metadata editing, showcase display, and utility functions
- Added ModelMetadata.js for handling model metadata editing, including model name, base model, and file name.
- Introduced ShowcaseView.js to manage the display of images and videos in the checkpoint modal, including NSFW filtering and lazy loading.
- Created index.js as the main entry point for the checkpoint modal, integrating various components and functionalities.
- Developed utils.js for utility functions related to file size formatting and tag rendering.
- Enhanced user experience with editable fields, toast notifications, and improved showcase scrolling.
2025-04-10 22:59:09 +08:00
Will Miao
cbfb9ac87c Enhance CheckpointModal: Implement detailed checkpoint display, editable fields, and showcase functionality 2025-04-10 22:25:40 +08:00
Will Miao
42309edef4 Refactor visibility toggle: Remove toggleApiKeyVisibility function and update related button in modals 2025-04-10 21:43:56 +08:00
Will Miao
559e57ca46 Enhance CheckpointCard: Implement NSFW content handling, toggle blur functionality, and improve video autoplay behavior 2025-04-10 21:28:34 +08:00
Will Miao
311bf1f157 Add support for '.gguf' file extension in CheckpointScanner 2025-04-10 21:15:12 +08:00
Will Miao
131c3cc324 Add Civitai metadata fetching functionality for checkpoints
- Implement fetchCivitai API method to retrieve metadata from Civitai.
- Enhance CheckpointsControls to include fetch from Civitai functionality.
- Update PageControls to register fetch from Civitai event listener for both LoRAs and Checkpoints.
2025-04-10 21:07:17 +08:00
Will Miao
152ec0da0d Refactor Checkpoints functionality: Integrate loadMoreCheckpoints API, remove CheckpointSearchManager, and enhance FilterManager for improved checkpoint loading and filtering. 2025-04-10 19:57:04 +08:00
Will Miao
ee04df40c3 Refactor controls and pagination for Checkpoints and LoRAs: Implement unified PageControls, enhance API integration, and improve event handling for better user experience. 2025-04-10 19:41:02 +08:00
Will Miao
252e90a633 Enhance Checkpoints Manager: Implement API integration for checkpoints, add filtering and sorting options, and improve UI components for better user experience 2025-04-10 16:04:08 +08:00
Will Miao
048d486fa6 Refactor cache initialization in LoraManager and RecipeScanner for improved background processing and error handling 2025-04-10 11:34:19 +08:00
Will Miao
8fdfb68741 checkpoint 2025-04-10 09:08:51 +08:00
Will Miao
64c9e4aeca Update version to 0.8.5 and add release notes for enhanced features and improvements 2025-04-09 11:41:38 +08:00
Will Miao
08b90e8767 Update toast messages to clarify settings update notifications 2025-04-09 11:29:02 +08:00
Will Miao
0206613f9e Update NSFW level filter to include 'R' rating for improved content moderation 2025-04-09 11:25:52 +08:00
Will Miao
ae0629628e Enhance settings modal with video autoplay on hover option and improve layout. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/92 2025-04-09 11:18:30 +08:00
Will Miao
785b2e7287 style: Add padding to recipe list to prevent item cutoff on hover 2025-04-08 13:51:00 +08:00
Will Miao
43e3d0552e style: Update filter indicator and button styles for improved UI consistency
feat: Add pulse animation to filter indicators in Lora and recipe management
refactor: Change filter-active button to a div for better semantic structure
2025-04-08 13:45:15 +08:00
Will Miao
801aa2e876 Enhance Lora and recipe integration with improved filtering and UI updates
- Added support for filtering LoRAs by hash in both API and UI components.
- Implemented session storage management for custom filter states when navigating between recipes and LoRAs.
- Introduced a new button in the recipe modal to view associated LoRAs, enhancing user navigation.
- Updated CSS styles for new UI elements, including a custom filter indicator and LoRA view button.
- Refactored existing JavaScript components to streamline the handling of filter parameters and improve maintainability.
2025-04-08 12:23:51 +08:00
Will Miao
bddc7a438d feat: Add Lora recipes retrieval and filtering functionality
- Implemented a new API endpoint to fetch recipes associated with a specific Lora by its hash.
- Enhanced the recipe scanning logic to support filtering by Lora hash and bypassing other filters.
- Added a new method to retrieve a recipe by its ID with formatted metadata.
- Created a new RecipeTab component to display recipes in the Lora modal.
- Introduced session storage utilities for managing custom filter states.
- Updated the UI to include a custom filter indicator and loading/error states for recipes.
- Refactored existing recipe management logic to accommodate new features and improve maintainability.
2025-04-07 21:53:39 +08:00
Will Miao
b8c78a68e7 refactor: remove unused recipe card CSS styles 2025-04-07 20:36:58 +08:00
Will Miao
49219f4447 feat: Refactor LoraModal into modular components
- Added ShowcaseView.js for rendering LoRA model showcase content with NSFW filtering and lazy loading.
- Introduced TriggerWords.js to manage trigger words, including editing, adding, and saving functionality.
- Created index.js as the main entry point for the LoraModal, integrating all components and functionalities.
- Implemented utils.js for utility functions such as file size formatting and tag rendering.
- Enhanced user experience with editable fields, tooltips, and improved event handling for trigger words and presets.
2025-04-07 15:36:13 +08:00
Will Miao
59b1abb719 Update version to 0.8.4 and add release notes for node layout improvements and bug fixes 2025-04-07 14:49:34 +08:00
Will Miao
3e2cfb552b Refactor image saving logic for batch processing and unique filename generation. Fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/79 2025-04-07 14:37:39 +08:00
Will Miao
779be1b8d0 Refactor loras_widget styles for improved layout consistency 2025-04-07 13:42:31 +08:00
Will Miao
faf74de238 Enhance model move functionality with detailed error handling and user feedback 2025-04-07 11:14:56 +08:00
Will Miao
50a51c2e79 Refactor Lora widget and dynamic module loading
- Updated lora_loader.js to dynamically import the appropriate loras widget based on ComfyUI version, enhancing compatibility and maintainability.
- Enhanced loras_widget.js with improved height management and styling for better user experience.
- Introduced utility functions in utils.js for version checking and dynamic imports, streamlining widget loading processes.
- Improved overall structure and readability of the code, ensuring better performance and easier future updates.
2025-04-07 09:02:36 +08:00
Will Miao
d31e641496 Add dynamic tags widget selection based on ComfyUI version
- Introduced a mechanism to dynamically import either the legacy or modern tags widget based on the ComfyUI frontend version.
- Updated the `addTagsWidget` function in both `tags_widget.js` and `legacy_tags_widget.js` to enhance tag rendering and widget height management.
- Improved styling and layout for tags, ensuring better alignment and responsiveness.
- Added a new serialization method to handle potential issues with ComfyUI's serialization process.
- Enhanced the overall user experience by providing a more modern and flexible tags widget implementation.
2025-04-07 08:42:20 +08:00
Will Miao
f2d36f5be9 Refactor DownloadManager and LoraFileHandler for improved file monitoring
- Simplified the path handling in DownloadManager by directly adding normalized paths to the ignore list.
- Updated LoraFileHandler to utilize a set for ignore paths, enhancing performance and clarity.
- Implemented debouncing for modified file events to prevent duplicate processing and improve efficiency.
- Enhanced the handling of file creation, modification, and deletion events for .safetensors files, ensuring accurate processing and logging.
- Adjusted cache operations to streamline the addition and removal of files based on real paths.
2025-04-06 22:27:55 +08:00
Will Miao
0b55f61fac Refactor LoraFileHandler to use real file paths for monitoring
- Updated the file monitoring logic to store and verify real file paths instead of mapped paths, ensuring accurate existence checks.
- Enhanced logging for error handling and processing actions, including detailed error messages with exception info.
- Adjusted cache operations to reflect the use of normalized paths for consistency in add/remove actions.
- Improved handling of ignore paths by removing successfully processed files from the ignore list.
2025-04-05 12:10:46 +08:00
pixelpaws
4156dcbafd Merge pull request #83 from willmiao/dev
Dev
2025-04-05 05:28:22 +08:00
Will Miao
36e6ac2362 Add CheckpointMetadata class for enhanced model metadata management
- Introduced a new CheckpointMetadata dataclass to encapsulate metadata for checkpoint models.
- Included fields for file details, model specifications, and additional attributes such as resolution and architecture.
- Implemented a __post_init__ method to initialize tags as an empty list if not provided, ensuring consistent data handling.
2025-04-05 05:16:52 +08:00
Will Miao
9613199152 Enhance SaveImage functionality with custom prompt support
- Added a new optional parameter `custom_prompt` to the SaveImage class methods to allow users to override the default prompt.
- Updated the `format_metadata` method to utilize the custom prompt if provided.
- Modified the `save_images` and `process_image` methods to accept and pass the custom prompt through the workflow processing.
2025-04-04 07:47:46 +08:00
pixelpaws
14328d7496 Merge pull request #77 from willmiao/dev
Add reconnect functionality for deleted LoRAs in recipe modal
2025-04-03 16:56:04 +08:00
Will Miao
6af12d1acc Add reconnect functionality for deleted LoRAs in recipe modal
- Introduced a new API endpoint to reconnect deleted LoRAs to local files.
- Updated RecipeModal to include UI elements for reconnecting LoRAs, including input fields and buttons.
- Enhanced CSS styles for deleted badges and reconnect containers to improve user experience.
- Implemented event handling for reconnect actions, including input validation and API calls.
- Updated recipe data handling to reflect changes after reconnecting LoRAs.
2025-04-03 16:55:19 +08:00
pixelpaws
9b44e49879 Merge pull request #75 from willmiao/dev
Enhance file monitoring for LoRA files
2025-04-03 11:10:29 +08:00
Will Miao
afee18f146 Enhance file monitoring for LoRA files
- Added a method to map symbolic links back to actual paths in the Config class.
- Improved file creation handling in LoraFileHandler to check for file size and existence before processing.
- Introduced handling for file modification events to update the ignore list and schedule updates.
- Increased debounce delay in _process_changes to allow for file downloads to complete.
- Enhanced action processing to prioritize 'add' actions and verify file existence before adding to cache.
2025-04-03 11:09:30 +08:00
Will Miao
f007369a66 Bump version to v0.8.3 2025-04-02 20:18:51 +08:00
pixelpaws
9a9c166dbe Merge pull request #74 from willmiao/dev
Dev
2025-04-02 20:15:11 +08:00
Will Miao
2f90e32dbf Delete unused files 2025-04-02 20:11:41 +08:00
Will Miao
26355ccb79 chore: remove .vscode from git 2025-04-02 20:09:58 +08:00
Will Miao
27ea3c0c8e chore: add .vscode to gitignore 2025-04-02 20:09:08 +08:00
Will Miao
5aa35b211a Update README and update_logs 2025-04-02 20:03:18 +08:00
Will Miao
92450385d2 Update README 2025-04-02 20:00:04 +08:00
Will Miao
8d15e23f3c Add markdown support for changelog in modal
- Introduced a simple markdown parser to convert markdown syntax in changelog items to HTML.
- Updated modal CSS to style markdown elements, enhancing the presentation of changelog items.
- Improved user experience by allowing formatted text in changelog, including bold, italic, code, and links.
2025-04-02 19:36:52 +08:00
Will Miao
73686d4146 Enhance modal and settings functionality with default LoRA root selection
- Updated modal styles for improved layout and added select control for default LoRA root.
- Modified DownloadManager, ImportManager, MoveManager, and SettingsManager to retrieve and set the default LoRA root from storage.
- Introduced asynchronous loading of LoRA roots in SettingsManager to dynamically populate the select options.
- Improved user experience by allowing users to set a default LoRA root for downloads, imports, and moves.
2025-04-02 17:37:16 +08:00
Will Miao
0499ca1300 Update process_node function to ignore type checking
- Added a type: ignore comment to the process_node function to suppress type checking errors.
- Removed the README.md file as it is no longer needed.
2025-04-02 17:02:11 +08:00
Will Miao
234c942f34 Refactor transform functions and update node mappers
- Moved and redefined transform functions for KSampler, EmptyLatentImage, CLIPTextEncode, and FluxGuidance to improve organization and maintainability.
- Updated NODE_MAPPERS to include new input tracking for clip_skip in KSampler and added new transform functions for LatentUpscale and CLIPSetLastLayer.
- Enhanced the transform_sampler_custom_advanced function to handle clip_skip extraction from model inputs.
2025-04-02 17:01:10 +08:00
Will Miao
aec218ba00 Enhance SaveImage class with filename formatting and multiple image support
- Updated the INPUT_TYPES to accept multiple images and modified the corresponding processing methods.
- Introduced a new format_filename method to handle dynamic filename generation using metadata patterns.
- Replaced save_workflow_json with embed_workflow for better clarity in saving workflow metadata.
- Improved directory handling and filename generation logic to ensure proper file saving.
2025-04-02 15:08:36 +08:00
Will Miao
b508f51fcf checkpoint 2025-04-02 14:13:53 +08:00
Will Miao
435628ea59 Refactor WorkflowParser by removing unused methods 2025-04-02 14:13:24 +08:00
Will Miao
4933dbfb87 Refactor ExifUtils by removing unused methods and imports
- Removed the extract_user_comment and update_user_comment methods to streamline the ExifUtils class.
- Cleaned up unnecessary imports and reduced code complexity, focusing on essential functionality for image metadata extraction.
2025-04-02 11:14:05 +08:00
Will Miao
5a93c40b79 Refactor logging levels and improve mapper registration
- Changed warning logs to debug logs in CivitaiClient and RecipeScanner for better log granularity.
- Updated the mapper registration function name for clarity and adjusted related logging messages.
- Enhanced extension loading process to automatically register mappers from NODE_MAPPERS_EXT, improving modularity and maintainability.
2025-04-02 10:29:31 +08:00
Will Miao
a8ec5af037 checkpoint 2025-04-02 06:05:24 +08:00
Will Miao
27db60ce68 checkpoint 2025-04-01 19:17:43 +08:00
Will Miao
195866b00d Implement KJNodes extension with new mappers and transform functions
- Added KJNodes mappers for JoinStrings, StringConstantMultiline, and EmptyLatentImagePresets.
- Introduced transform functions to handle string joining, string constants, and dimension extraction with optional inversion.
- Registered new mappers and logged successful registration for better traceability.
2025-04-01 16:22:57 +08:00
Will Miao
60575b6546 checkpoint 2025-04-01 08:38:49 +08:00
pixelpaws
350b81d678 Merge pull request #64 from richardhristov/main
Remember sort by name/date in LoRAs page
2025-03-31 20:16:29 +08:00
Will Miao
cc95314dae Bump version to v0.8.2 2025-03-30 20:53:22 +08:00
Will Miao
3f97087abb Update unauthorized access error message 2025-03-30 20:15:50 +08:00
Will Miao
f04af2de21 Add Civitai model retrieval and missing LoRAs download functionality
- Introduced new API endpoints for fetching Civitai model details by model version ID or hash.
- Enhanced the download manager to support downloading LoRAs using model version ID or hash, improving flexibility.
- Updated RecipeModal to handle missing LoRAs, allowing users to download them directly from the recipe interface.
- Added tooltip and click functionality for missing LoRAs status, enhancing user experience.
- Improved error handling for missing LoRAs download process, providing clearer feedback to users.
2025-03-30 19:45:03 +08:00
Richard Hristov
e7871bf843 Remember sort by name/date in LoRAs page 2025-03-29 17:11:53 +02:00
Will Miao
8e3308039a Refactor Lora handling in RecipeRoutes and enhance RecipeManager
- Updated Lora filtering logic in RecipeRoutes to skip deleted LoRAs without exclusion checks, improving performance and clarity.
- Enhanced condition for fetching cached LoRAs to ensure valid data is processed.
- Added toggleApiKeyVisibility function to RecipeManager, improving API key management in the UI.
2025-03-29 19:11:13 +08:00
Will Miao
b65350b7cb Add update functionality for recipe metadata in RecipeRoutes and RecipeModal
- Introduced a new API endpoint to update recipe metadata, allowing users to modify recipe titles and tags.
- Enhanced RecipeModal to support inline editing of recipe titles and tags, improving user interaction.
- Updated RecipeCard to reflect changes in recipe metadata, ensuring consistency across the application.
- Improved error handling for metadata updates to provide clearer feedback to users.
2025-03-29 18:46:19 +08:00
Will Miao
069ebce895 Add recipe syntax endpoint and update RecipeCard and RecipeModal for syntax fetching
- Introduced a new API endpoint to retrieve recipe syntax for LoRAs, allowing for better integration with the frontend.
- Updated RecipeCard to fetch recipe syntax from the backend instead of generating it locally.
- Modified RecipeModal to store the recipe ID and fetch syntax when the copy button is clicked, improving user experience.
- Enhanced error handling for fetching recipe syntax to provide clearer feedback to users.
2025-03-29 15:38:49 +08:00
Will Miao
63aa4e188e Add rename functionality for LoRA files and enhance UI for editing file names
- Introduced a new API endpoint to rename LoRA files, including validation and error handling for file paths and names.
- Updated the RecipeScanner to reflect changes in LoRA filenames across recipe files and cache.
- Enhanced the LoraModal UI to allow inline editing of file names with improved user interaction and validation.
- Added CSS styles for the editing interface to improve visual feedback during file name editing.
2025-03-29 09:25:41 +08:00
Will Miao
c31c9c16cf Enhance LoraScanner and file_utils for improved metadata handling
- Updated LoraScanner to first attempt to create metadata from .civitai.info files, improving metadata extraction from existing files.
- Added error handling for reading .civitai.info files and fallback to generating metadata using get_file_info if necessary.
- Refactored file_utils to expose find_preview_file function and added logic to utilize SHA256 from existing .json files to avoid recalculation.
- Improved overall robustness of metadata loading and preview file retrieval processes.
2025-03-28 16:27:59 +08:00
Will Miao
5a8a402fdc Enhance LoraRoutes and templates for improved cache initialization handling
- Updated LoraRoutes to better check cache initialization status and handle loading states.
- Added logging for successful cache loading and error handling for cache retrieval failures.
- Enhanced base.html and loras.html templates to display a loading spinner and initialization notice during cache setup.
- Improved user experience by ensuring the loading notice is displayed appropriately based on initialization state.
2025-03-28 15:04:35 +08:00
Will Miao
85c3e33343 Update version to 0.8.1 and add release notes for new features and improvements
- Bump version from 0.8.0 to 0.8.1 in pyproject.toml.
- Document new features in README.md, including base model correction, LoRA loader flexibility, expanded recipe support, enhanced showcase images, and various UI improvements and bug fixes.
2025-03-28 04:15:54 +08:00
Will Miao
1420ab31a2 Enhance CivitaiClient error handling for unauthorized access
- Updated handling of 401 unauthorized responses to differentiate between API key issues and early access restrictions.
- Improved logging for unauthorized access attempts.
- Refactored condition to check for early access restrictions based on response headers.
- Adjusted logic in DownloadManager to check for early access using a more concise method.
2025-03-28 04:11:08 +08:00
Will Miao
fd1435537f Add ImageSaverMetadataParser for ComfyUI Image Saver plugin metadata handling
- Introduced ImageSaverMetadataParser class to parse metadata from the Image Saver plugin format.
- Implemented methods to extract prompts, negative prompts, and LoRA information, including weights and hashes.
- Enhanced error handling and logging for metadata parsing failures.
- Updated RecipeParserFactory to include ImageSaverMetadataParser for relevant user comments.
2025-03-28 03:27:35 +08:00
Will Miao
4e0473ce11 Fix redownloading loras issue 2025-03-28 02:53:30 +08:00
Will Miao
450592b0d4 Implement Civitai data population methods for LoRA and checkpoint entries
- Added `populate_lora_from_civitai` and `populate_checkpoint_from_civitai` methods to enhance the extraction of model information from Civitai API responses.
- These methods populate LoRA and checkpoint entries with relevant data such as model name, version, thumbnail URL, base model, download URL, and file details.
- Improved error handling and logging for scenarios where models are not found or data retrieval fails.
- Refactored existing code to utilize the new methods, streamlining the process of fetching and updating LoRA and checkpoint metadata.
2025-03-28 02:16:53 +08:00
Will Miao
7cae0ee169 Enhance LoraModal to include image metadata panel
- Added a new image metadata panel to display generation parameters and prompts for images and videos.
- Implemented styles for the metadata panel in lora-modal.css, ensuring it is responsive and visually integrated.
- Introduced functionality to copy prompts to the clipboard and handle metadata interactions within the modal.
- Updated media rendering logic in LoraModal.js to incorporate metadata display and improve user experience.
2025-03-27 20:09:48 +08:00
Will Miao
ecd0e05f79 Add MetaFormatParser for Lora_N Model hash format metadata handling
- Introduced MetaFormatParser class to parse metadata from images with Lora_N Model hash format.
- Implemented methods to validate metadata structure, extract prompts, negative prompts, and LoRA information.
- Enhanced error handling and logging for metadata parsing failures.
- Updated RecipeParserFactory to include MetaFormatParser for relevant user comments.
2025-03-27 17:28:11 +08:00
Will Miao
6e3b4178ac Enhance LoraStacker to return active LoRAs in stack_loras method
- Updated RETURN_TYPES and RETURN_NAMES to include active LoRAs.
- Introduced active_loras list to track active LoRAs and their strengths.
- Formatted active_loras for return as a string in the format <lora:lora_name:strength>.
2025-03-27 16:10:50 +08:00
Will Miao
ba18cbabfd Add ComfyMetadataParser for Civitai ComfyUI metadata handling
- Introduced ComfyMetadataParser class to parse metadata from Civitai ComfyUI JSON format.
- Implemented methods to validate metadata structure, extract LoRA and checkpoint information, and retrieve additional model details from Civitai.
- Enhanced error handling and logging for metadata parsing failures.
- Updated RecipeParserFactory to prioritize ComfyMetadataParser for valid JSON inputs.
2025-03-27 15:43:58 +08:00
Will Miao
dec757c23b Refactor image metadata handling in RecipeRoutes and ExifUtils
- Replaced the download function for images from Twitter to Civitai in recipe_routes.py.
- Updated metadata extraction from user comments to a more comprehensive image metadata extraction method in ExifUtils.
- Enhanced the appending of recipe metadata to utilize the new metadata extraction method.
- Added a new utility function to download images from Civitai.
2025-03-27 14:56:37 +08:00
Will Miao
0459710c9b Made CLIP input optional in LoRA Loader, enabling compatibility with Hunyuan workflows 2025-03-26 21:50:26 +08:00
Will Miao
83582ef8a3 Refactor RecipeScanner to remove custom async timeout and streamline cache initialization
- Removed the custom async_timeout function and replaced it with direct usage of the initialization lock.
- Simplified the cache initialization process by eliminating the dependency on the lora scanner.
- Enhanced error handling during cache initialization to ensure a fallback to an empty cache on failure.
2025-03-26 18:56:18 +08:00
Will Miao
0dc396e148 Enhance RecipeModal to support video previews
- Updated RecipeModal.js to dynamically handle video and image previews based on the file type.
- Modified recipe-modal.css to ensure proper styling for both images and videos.
- Adjusted recipe_modal.html to accommodate the new media handling structure.
2025-03-26 16:39:53 +08:00
pixelpaws
86958e1420 Merge pull request #51 from AlUlkesh/main
Python < 3.11 backward compatibility for timeout.
2025-03-26 10:47:23 +08:00
Will Miao
c5b8e629fb Enhance save functionality in LoraModal for base model editing
- Added a check to prevent saving if the base model value has not changed.
- Stored the original value during editing to compare with the new selection.
- Updated the saveBaseModel function to accept the original value for comparison.
2025-03-26 07:05:32 +08:00
Will Miao
b0a495b4f6 Add base model editing functionality to LoraModal
- Introduced new styles for base model display and editing in lora-modal.css.
- Enhanced LoraModal.js to support editing of the base model with a dropdown selector.
- Implemented save functionality for the updated base model, including UI interactions for editing and saving changes.
2025-03-26 06:49:33 +08:00
Will Miao
7d2809467b Update tutorial video link 2025-03-25 14:10:13 +08:00
Will Miao
af90eeaf37 Bump version to 0.8.0 2025-03-25 14:00:00 +08:00
AlUlkesh
509e513f3a Python < 3.11 backward compatibility for timeout. 2025-03-24 14:16:46 +01:00
pixelpaws
80671e474c Update README.md 2025-03-24 08:39:51 +08:00
pixelpaws
a166d859e7 Update README.md 2025-03-24 04:49:28 +08:00
Will Miao
6af1e0aeb7 Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-03-24 04:00:02 +08:00
Will Miao
370ffb5d7c Update discord invite 2025-03-24 03:59:44 +08:00
pixelpaws
0ba288d09e Update README.md 2025-03-24 03:49:17 +08:00
Will Miao
008d86983b Update workflow 2025-03-24 03:46:12 +08:00
Will Miao
205bdfce5c Update README.md with new features and enhancements for v0.8.0, including LoRA recipes, improved UI/UX, and workflow integration. Remove outdated screenshot and update Discord link in modals.html. 2025-03-23 16:53:46 +08:00
Will Miao
27248b197d Update cache management in ApiRoutes to remove hash index by file path
- Added functionality to update the hash index by removing entries associated with the specified file path during cache management.
- Ensured that the cache is properly resorted after the removal of raw data items.
2025-03-23 16:50:56 +08:00
Will Miao
e216b4c455 Refactor early access checks in recipe parsers
- Updated the early access condition checks in RecipeFormatParser, StandardMetadataParser, and A1111MetadataParser to use `get` method for improved readability and safety.
- Ensured consistent handling of early access status across different parser classes.
2025-03-23 15:29:47 +08:00
Will Miao
c402f53258 Implement early access handling and UI enhancements for LoRA downloads
- Added error handling for early access restrictions in the API routes, returning appropriate status codes and messages.
- Enhanced the Civitai client to log unauthorized access attempts and provide user-friendly error messages.
- Updated the download manager to check for early access requirements and log warnings accordingly.
- Introduced UI elements to indicate early access status for LoRAs, including badges and warning messages in the import manager.
- Improved toast notifications to inform users about early access download failures and provide relevant information.
2025-03-23 14:45:11 +08:00
Will Miao
93329abe8b Refactor LoraFileHandler to use provided event loop and improve logging
- Updated LoraFileHandler to utilize the passed event loop for time retrieval instead of the current thread's event loop.
- Changed error logging for extension loading in mappers from error to warning level for better clarity.
2025-03-23 09:22:57 +08:00
Will Miao
f69b3d96b6 Update dependencies in pyproject.toml and requirements.txt
- Added new dependencies: piexif, Pillow, and requests to enhance image processing and HTTP request capabilities.
- Ensured consistency between pyproject.toml and requirements.txt by including the same set of dependencies.
2025-03-23 08:48:13 +08:00
Will Miao
8690a8f11a Enhance LoraStackerMapper and WorkflowParser functionality
- Updated LoraStackerMapper to handle multiple formats for lora_stack input, improving flexibility in processing existing stacks.
- Introduced caching for processed node results in WorkflowParser to optimize performance and prevent redundant processing.
- Added a new method to collect loras from model inputs, enhancing the ability to extract relevant data from the workflow.
- Improved handling of processed nodes to avoid cycles and ensure accurate results during workflow parsing.
2025-03-23 07:41:55 +08:00
Will Miao
6aa2342be1 Enhance node processing and error handling in workflow mappers
- Improved reference handling in NodeMapper to support integer node IDs and added error logging for reference processing failures.
- Updated LoraLoaderMapper and LoraStackerMapper to handle lora_stack as a dictionary, ensuring compatibility with new data formats.
- Refactored trace_model_path utility to perform a depth-first search for LoRA nodes, improving the accuracy of model path tracing.
- Cleaned up unused code in parser.py related to LoRA processing, streamlining the workflow parsing logic.
2025-03-23 07:20:50 +08:00
Will Miao
042153329b Update dependencies 2025-03-23 05:42:00 +08:00
Will Miao
2b67091986 Enhance workflow parsing and node mapper registration
- Introduced a new WorkflowParser class to streamline workflow parsing and manage node mappers.
- Added functionality to load external mappers dynamically from a specified directory.
- Refactored LoraLoaderMapper and LoraStackerMapper to handle new data formats for loras and trigger words.
- Updated recipe routes to utilize the new WorkflowParser for parsing workflows.
- Made adjustments to the flux_prompt.json to reflect changes in active states and class types.
2025-03-23 05:21:43 +08:00
Will Miao
3da35cf0db Remove deprecated workflow parameters and associated files
- Deleted the `__init__.py`, `cli.py`, `extension_manager.py`, `integration_example.py`, `README.md`, `simple_test.py`, `test_parser.py`, `verify_workflow.py`, and `workflow_parser.py` files as they are no longer needed.
- Updated `.gitignore` to exclude new output files and test scripts.
- Cleaned up the node processors directory by removing all processor implementations and their registration logic.
2025-03-22 20:43:17 +08:00
Will Miao
e566484a17 Add Civitai URL retrieval functionality and UI integration
- Introduced a new API route to fetch the Civitai URL for a specified LoRA file.
- Implemented error handling for missing LoRA names and absence of Civitai data.
- Added a "View on Civitai" option in the UI, allowing users to access the Civitai URL directly from the LoRA widget.
- Enhanced user feedback for successful and failed URL retrieval attempts.
2025-03-22 17:35:30 +08:00
Will Miao
e7dffbbb1e Refactor LoRA handling in LoraLoader, LoraStacker, and TriggerWordToggle
- Introduced logging to track unexpected formats in LoRA and trigger word data.
- Refactored LoRA processing to support both old and new kwargs formats in LoraLoader and LoraStacker.
- Enhanced trigger word processing to handle different data formats in TriggerWordToggle.
- Improved code readability and maintainability by extracting common logic into helper methods.
2025-03-22 15:56:37 +08:00
Will Miao
a31712ad1f Wrap status badge in a container div for improved layout in ImportManager component 2025-03-22 10:24:01 +08:00
Will Miao
2958f81adc Revert "Refactor path mapping logic in Config class"
This reverts commit fce58f3206.
2025-03-22 10:18:26 +08:00
Will Miao
95380fbbfb Add base model mapping for SD 1.5 2025-03-22 09:49:35 +08:00
Will Miao
4cc6996406 Refactor theme toggle styles for improved positioning
- Updated CSS for the theme toggle component to ensure relative positioning for the container.
- Centered light and dark icons within the theme toggle using absolute positioning and transform properties.
- Added transition effects for opacity to enhance visual feedback during theme changes.
2025-03-22 09:49:15 +08:00
Will Miao
372d74ec71 Enhance settings management and localStorage integration
- Added functionality to load settings from localStorage in the SettingsManager, ensuring user preferences are retained across sessions.
- Updated the state management to initialize settings from localStorage, improving user experience.
- Refactored the UpdateService to streamline update notification preferences.
- Improved migration logic in storageHelpers to prevent duplicate migrations and ensure data integrity.
- Removed unnecessary console logs for cleaner output in various modules.
2025-03-22 08:46:36 +08:00
Will Miao
19ef73a07f Refactor storage handling across application
- Introduced a new storageHelpers module to centralize localStorage interactions, improving code maintainability and readability.
- Updated various components and managers to utilize the new storageHelpers functions for setting, getting, and removing items from localStorage.
- Added migration logic to handle localStorage items during application initialization, ensuring compatibility with the new storage structure.
- Enhanced logging during application initialization for better debugging.
2025-03-22 05:32:18 +08:00
Will Miao
bb3d73b87c Fix support modal width 2025-03-22 04:36:34 +08:00
Will Miao
30e9e7168f Update logging level for parsed workflow and add refresh button to recipe controls
- Changed logging from info to debug for parsed workflow in RecipeRoutes to reduce log verbosity.
- Added a refresh button in the recipe controls section of the HTML template to allow users to reload the recipe list easily.
2025-03-21 21:38:02 +08:00
Will Miao
fce58f3206 Refactor path mapping logic in Config class
- Updated add_path_mapping method to return a boolean indicating success or failure of mapping addition.
- Enhanced link scanning to only continue if a mapping was successfully added.
- Filtered paths to exclude those already mapped, improving efficiency in path handling.
- Added logging for existing mappings to provide better insights during execution.
2025-03-21 21:26:00 +08:00
Will Miao
b3e5ac395f Enhance recipe modal styles and tooltip functionality
- Updated CSS for recipe modal to improve layout and responsiveness, including adjustments to header and badge styles.
- Added tooltip positioning logic to ensure correct display of local-badge tooltips on hover.
- Refactored HTML structure for local status badges to enhance stability and positioning.
- Removed unnecessary console logs from recipe fetching process in JavaScript for cleaner output.
2025-03-21 20:19:58 +08:00
Will Miao
3ebe9d159a Refactor LoraRoutes to return empty recipes when no data is available
- Removed the logic for fetching and formatting recipes from the cache.
- Updated the response to return an empty list for recipes when no data is present, simplifying the flow.
- Adjusted comments for clarity regarding the new behavior.
2025-03-21 20:00:15 +08:00
pixelpaws
ff95274757 Merge pull request #45 from willmiao/dev
Dev
2025-03-21 17:31:42 +08:00
Will Miao
8e653e2173 Refactor recipe saving process to utilize workflow JSON and enhance Lora handling
- Updated the recipe saving logic to accept a workflow JSON input instead of individual fields like name, tags, and metadata.
- Implemented parsing of the workflow to extract generation parameters and Lora stack, improving the recipe creation process.
- Enhanced error handling for missing workflow data and invalid Lora formats.
- Removed deprecated code related to individual field handling, streamlining the recipe saving functionality.
- Updated the front-end widget to send the workflow JSON directly, simplifying the data preparation process.
2025-03-21 17:28:20 +08:00
Will Miao
4bff17aa1a Update prompt configuration and enhance Lora management functionality
- Expanded the prompt.json file with new configurations for KSampler, CheckpointLoaderSimple, and various CLIPTextEncode nodes.
- Introduced additional Lora management features, including a new Lora Stacker and improved trigger word handling.
- Enhanced the loras_widget.js to log the generated prompt when saving recipes directly, aiding in debugging and user feedback.
- Improved overall structure and organization of the prompt configurations for better maintainability.
2025-03-21 16:35:52 +08:00
Will Miao
d4f300645d Enhance ExifUtils to extract prompts from node references in workflows
- Updated the logic in ExifUtils to first identify KSampler nodes and store references to positive and negative prompt nodes.
- Added a helper function to follow these references and extract text content from CLIP Text Encode nodes.
- Implemented a fallback mechanism to extract prompts using traditional methods if references are not available.
- Improved code readability with additional comments and structured handling of node data.
2025-03-21 11:32:51 +08:00
Will Miao
4ee32f02c5 Add functionality to save recipes from the LoRAs widget
- Introduced a new API endpoint to save recipes directly from the LoRAs widget.
- Implemented logic to handle recipe data, including image processing and metadata extraction.
- Enhanced error handling for missing fields and image retrieval.
- Updated the ExifUtils to extract generation parameters from images for recipe creation.
- Added a direct save option in the widget, improving user experience.
2025-03-21 11:11:09 +08:00
Will Miao
2cf4440a1e Add Android icons and site.webmanifest for PWA support
- Added new Android icon images (192x192 and 512x512) for better app integration.
- Created site.webmanifest file to define app metadata and icon usage for Progressive Web App (PWA) functionality.
2025-03-21 05:37:58 +08:00
Will Miao
644ee31654 Remove site.webmanifest and refactor state initialization in RecipeManager and HeaderManager
- Deleted the site.webmanifest file as it is no longer needed.
- Simplified state management by removing initPageState from RecipeManager and integrating it into HeaderManager.
- Cleaned up console logging in loraApi.js to reduce unnecessary output.
- Minor formatting adjustments in FilterManager to enhance code readability.
2025-03-21 05:22:20 +08:00
Will Miao
34078d8a60 Refactor recipe card styles and update HTML structure
- Migrated CSS styles from recipe-card.css to card.css for better organization.
- Updated recipe card class names in HTML to align with new styling conventions.
- Enhanced card layout with additional flex properties for improved responsiveness.
- Adjusted infinite scroll debounce timing for better performance.
2025-03-20 21:42:17 +08:00
Will Miao
5cfae7198d Refactor recipe metadata handling and update CSS styles
- Moved the recipe metadata appending logic to occur after the JSON creation for better workflow.
- Adjusted the user comment formatting in ExifUtils to include a newline for improved readability.
- Reduced the maximum height of the recipe modal bottom section for better layout consistency.
2025-03-20 19:53:05 +08:00
Will Miao
6a10cda61f Add recipe metadata handling in image processing
- Implemented functionality to append recipe metadata to images during the recipe creation process.
- Removed redundant metadata handling from the temporary image processing step, streamlining the image handling logic.
- Enhanced the overall image processing workflow for better integration of recipe data.
2025-03-20 18:55:00 +08:00
Will Miao
c149e73ef7 Add recipe tags functionality to RecipeModal
- Implemented display of recipe tags in a compact format within the RecipeModal.
- Added tooltip for additional tags with hover functionality.
- Updated CSS styles for recipe tags and tooltips to enhance visual presentation.
- Adjusted layout and padding in related components for improved aesthetics.
2025-03-20 17:57:35 +08:00
Will Miao
b11757c913 Fix infinite scroll 2025-03-20 17:31:56 +08:00
Will Miao
607ab35cce Refactor search functionality in Lora and Recipe scanners to utilize fuzzy matching
- Introduced a new fuzzy_match utility function for improved search accuracy across Lora and Recipe scanners.
- Updated search logic in LoraScanner and RecipeScanner to leverage fuzzy matching for titles, tags, and filenames, enhancing user experience.
- Removed deprecated search methods to streamline the codebase and improve maintainability.
- Adjusted API routes to ensure compatibility with the new search options, including recursive search handling.
2025-03-20 16:55:51 +08:00
Will Miao
19ff2ebfe1 Refactor RecipeManager and ImportManager for improved functionality
- Removed deprecated global functions from RecipeManager to streamline the API and enhance clarity.
- Updated the import handling in ImportManager to directly call loadRecipes on the RecipeManager, ensuring better integration.
- Adjusted the recipes.html template to utilize the ImportManager for showing the import modal, improving code consistency.
2025-03-20 15:57:00 +08:00
Will Miao
4a47dc2073 Add new API routes for base models and update existing routes
- Introduced a new endpoint for retrieving base models used in loras, enhancing the API functionality.
- Updated the existing top-tags route to reflect the new URL structure under '/api/loras'.
- Modified the FilterManager to accommodate the new base models API, ensuring proper data fetching and display on the loras page.
- Improved error handling and logging for base model retrieval, enhancing overall robustness of the application.
2025-03-20 15:19:05 +08:00
Will Miao
addf92d966 Refactor API routes and enhance recipe and filter management
- Removed the handle_get_recipes method from ApiRoutes to streamline the API structure.
- Updated RecipeRoutes to include logging for recipe retrieval requests and improved filter management.
- Consolidated filter management logic in FilterManager to support both recipes and loras, enhancing code reusability.
- Deleted obsolete LoraSearchManager and RecipeSearchManager classes to simplify the search functionality.
- Improved infinite scroll implementation for both recipes and loras, ensuring consistent loading behavior across pages.
2025-03-20 14:54:13 +08:00
Will Miao
c987338c84 Add Checkpoints feature with routes, template, and JavaScript integration
- Introduced CheckpointsRoutes for managing checkpoints-related endpoints and handling the checkpoints page.
- Added checkpoints.html template for rendering the checkpoints interface with a work-in-progress message.
- Implemented checkpoints.js to manage the initialization of the Checkpoints page and its components.
- Updated LoraManager to include checkpoints routes in the application setup, enhancing overall functionality.
2025-03-20 10:50:46 +08:00
Will Miao
a88b0239eb Refactor panel position management and enhance recipe card handling
- Removed redundant updatePanelPositions calls from various components and centralized the logic in the uiHelpers.js for better maintainability.
- Introduced appendRecipeCards function in RecipeManager to streamline the addition of recipe cards from search results.
- Cleaned up unused code related to search input handling and recipe loading, improving overall code clarity and performance.
- Updated HeaderManager and SearchManager to utilize the new updatePanelPositions function, ensuring consistent panel positioning across the application.
2025-03-20 09:54:13 +08:00
Will Miao
caf5b1528c Enhance recipe search functionality with improved state management and search options
- Introduced new search options for recipes, allowing users to filter by title, tags, LoRA filename, and LoRA model name.
- Updated the RecipeRoutes and RecipeScanner to accommodate the new search options, enhancing the filtering capabilities.
- Refactored RecipeManager and RecipeSearchManager to utilize the hierarchical state structure for managing search parameters and pagination state.
- Improved the user interface by dynamically displaying relevant search options based on the current page context.
2025-03-20 08:27:38 +08:00
Will Miao
90f74018ae Refactor state management to support hierarchical structure and page-specific states
- Introduced a new hierarchical state structure to manage global and page-specific states, enhancing organization and maintainability.
- Updated various managers and components to utilize the new state structure, ensuring consistent access to page-specific data.
- Removed the initSettings function and replaced it with initPageState for better initialization of page-specific states.
- Adjusted imports across multiple files to accommodate the new state management approach, improving code clarity.
2025-03-19 21:12:04 +08:00
Will Miao
d7a253cba3 Update LoraModal to enhance preset value configuration and file path retrieval
- Adjusted preset value min, max, and step properties for improved functionality based on selected options.
- Refactored file path retrieval to ensure consistency by targeting the specific modal context, enhancing code clarity and maintainability.
2025-03-19 20:53:15 +08:00
Will Miao
8a28846bac Merge branch 'main' into dev 2025-03-19 17:34:29 +08:00
Will Miao
04545c5706 Implement lazy loading and infinite scroll features in core application
- Added lazy loading for images and initialized infinite scroll in the AppCore class to enhance performance across various pages.
- Updated LoraPageManager and RecipeManager to utilize the new initializePageFeatures method for common UI features.
- Enhanced infinite scroll functionality to dynamically load more content based on the page type, improving user experience.
- Refactored recipes.html to trigger the import modal through the ModalManager for better modal handling.
2025-03-19 17:04:58 +08:00
Will Miao
32fa81cf93 Refactor ModalManager to conditionally register modals based on their existence
- Updated ModalManager to check for the presence of modals before registration, improving performance and preventing errors on pages without certain modals.
- Added support for closing modals when clicking outside of them, enhancing user experience.
- Ensured consistent handling of modal display properties across various modal types.
2025-03-19 16:36:07 +08:00
Will Miao
7924e4000c Refactor LoraModal and RecipeSearchManager for improved functionality and performance
- Updated LoraModal to enhance lazy loading and scroll behavior, utilizing MutationObserver for dynamic content changes and adding a new helper function for the back-to-top button.
- Modified RecipeSearchManager to ensure proper recipe loading through the window.recipeManager object, improving reliability in recipe reloading.
- Added additional components to loras.html for better modularity and organization of the modal structure.
2025-03-19 16:15:18 +08:00
Will Miao
f9c54690b0 Refactor logging and improve image optimization in RecipeRoutes and ExifUtils
- Removed print statements for initialization and setup in RecipeRoutes to reduce console clutter and improve logging practices.
- Updated image optimization parameters in RecipeRoutes to enhance image quality by increasing the target width.
- Modified user comment handling in ExifUtils to ensure proper formatting when appending recipe metadata, improving metadata consistency.
2025-03-19 14:50:36 +08:00
Will Miao
c3aaef3916 Enhance image handling and EXIF metadata processing in RecipeRoutes and ExifUtils
- Implemented image optimization in RecipeRoutes, resizing and converting uploaded images to WebP format while preserving metadata.
- Updated ExifUtils to support EXIF data handling for WebP images, ensuring compatibility with various image formats.
- Added a new method for optimizing images, allowing for better performance and quality in image uploads.
2025-03-19 14:17:37 +08:00
Will Miao
03dfe13769 Remove supportModal.html and refactor error-message styles across multiple CSS files for consistency
- Deleted supportModal.html as it is no longer needed.
- Removed duplicate error-message styles from download-modal.css, import-modal.css, and lora-modal.css.
- Consolidated error-message styles into shared.css to ensure consistent styling across components.
2025-03-19 10:10:27 +08:00
Will Miao
f38b51b85a Enhance RecipeScanner and CSS components for improved functionality and styling
- Added localPath retrieval for LoRA entries in RecipeScanner to enhance metadata handling.
- Included shared.css in the main stylesheet for better component styling consistency.
- Removed unused local-badge and local-path styles from download-modal.css and recipe-modal.css to streamline the CSS and improve maintainability.
2025-03-19 08:21:51 +08:00
Will Miao
0017a6cce5 Update A1111MetadataParser to correctly extract model ID, name, and version from Civitai info
- Changed the extraction of model ID to use 'id' instead of 'modelVersionId'.
- Updated the retrieval of model name and version to align with the new Civitai response structure, ensuring accurate metadata parsing for LoRA entries.
- Improved error handling and logging for better traceability during metadata fetching.
2025-03-19 05:49:53 +08:00
Will Miao
541ad624c5 Implement input-with-button layout in import modal for improved user experience
- Added a new input-with-button component to the import modal, allowing users to input an image URL and fetch the image with a button click.
- Removed the previous button placement to streamline the UI and enhance usability.
- Updated CSS styles for the new component to ensure proper layout and responsiveness.
2025-03-19 05:24:28 +08:00
Will Miao
7c56825f9b Enhance import functionality for recipes with image upload and URL support
- Added support for importing recipes via image upload or URL input in the ImportManager.
- Implemented toggle functionality to switch between upload and URL modes, updating the UI accordingly.
- Enhanced error handling for missing fields and invalid URLs during the import process.
- Updated the RecipeRoutes to analyze images from both uploaded files and URLs, returning appropriate metadata.
- Improved the import modal UI to accommodate new input methods and provide clearer user feedback.
2025-03-19 05:13:44 +08:00
Will Miao
8a871ae643 Refactor EXIF data extraction and enhance recipe metadata parsing
- Updated ExifUtils to handle both JPEG/TIFF and non-JPEG/TIFF images for extracting UserComment from EXIF data, improving compatibility with various image formats.
- Introduced A1111MetadataParser to support parsing of images with A1111 metadata format, extracting prompts, negative prompts, and LoRA information.
- Enhanced error handling and logging for metadata parsing processes, ensuring better traceability and debugging capabilities.
2025-03-18 20:36:58 +08:00
Will Miao
e2191ab4b4 Refactor recipe metadata processing in RecipeRoutes
- Introduced a new RecipeParserFactory to streamline the parsing of recipe metadata from user comments, supporting multiple formats.
- Removed legacy metadata extraction logic from RecipeRoutes, delegating responsibilities to the new parser classes.
- Enhanced error handling for cases where no valid parser is found, ensuring graceful responses.
- Updated the RecipeScanner to improve the handling of LoRA metadata and reduce logging verbosity for better performance.
2025-03-18 18:54:22 +08:00
Will Miao
4264dd19a8 Enhance recipe metadata handling in RecipeRoutes and ExifUtils
- Added functionality to extract and process existing recipe metadata from images, including LoRA details and Civitai information.
- Updated ExifUtils to manage recipe metadata more effectively, including appending and removing metadata from user comments.
- Improved the ImportManager to utilize recipe metadata for setting default recipe names and tags when importing shared recipes.
2025-03-18 16:49:04 +08:00
Will Miao
78f8d4ecc7 Add sharing functionality for recipes
- Introduced new endpoints for sharing recipes and downloading shared images in RecipeRoutes.
- Implemented logic to process recipe images and append metadata to EXIF data.
- Updated RecipeCard component to handle sharing via API calls, providing user feedback during the process.
- Enhanced error handling for missing recipe IDs and failed API responses.
2025-03-18 14:52:21 +08:00
Will Miao
e2cc3145de Update refs 2025-03-18 14:21:22 +08:00
Will Miao
710857dd41 checkpoint 2025-03-17 19:58:17 +08:00
Will Miao
1bfe12a288 Add filter button functionality and clean up recipe template scripts
- Implemented click handler for the filter button in FilterManager to toggle the filter panel.
- Removed redundant recipe filter manager initialization from recipes.html for cleaner code.
- Updated header.html to remove inline JavaScript for filter button, enhancing maintainability.
2025-03-17 17:41:41 +08:00
Will Miao
14a88e2cfa update 2025-03-17 16:55:19 +08:00
Will Miao
0580130d47 Fix lora page header 2025-03-17 15:53:53 +08:00
Will Miao
a4ee82b51f checkpoint 2025-03-17 15:10:11 +08:00
Will Miao
1034282161 Enhance LoRA and Recipe templates by adding request context to template rendering. Update JavaScript to initialize search managers with context-specific options and improve header navigation with dynamic search placeholders. Refactor header component for better context awareness in search functionality. 2025-03-17 10:11:50 +08:00
Will Miao
b0a8b0cc6f Implement share functionality in RecipeCard component to enable image downloads. Adjust recipe indicator position in CSS for improved layout. 2025-03-17 06:10:43 +08:00
Will Miao
3f38764a0e Add filter-related endpoints to RecipeRoutes for top tags and base models. Enhance get_paginated_data method in RecipeScanner to support filtering by base model and tags. Implement logic to retrieve and count occurrences of top tags and base models from cached recipes. 2025-03-16 21:21:00 +08:00
Will Miao
3338c17e8f Refactor recipe processing in RecipeRoutes to enhance LoRA handling. Introduce base model counting logic to determine the most common base model from LoRAs, and streamline the collection of LoRA metadata. Remove outdated metadata update method from RecipeScanner to improve code clarity and maintainability. 2025-03-16 18:56:27 +08:00
Will Miao
22085e5174 Add delete confirmation modal for recipes with updated styling and functionality. Implement modal content generation, event handling for delete and cancel actions, and integrate with modal manager for improved user experience. Enhance CSS for delete preview image display. 2025-03-16 18:17:19 +08:00
Will Miao
d7c643ee9b Enhance LoRA management by introducing deletion status and UI updates. Implement warning indicators for deleted LoRAs in the import modal, update cache handling for added and removed recipes, and improve styling for deleted items. Adjust logic to exclude deleted LoRAs from download prompts and ensure proper display of their status in the UI. 2025-03-16 17:59:55 +08:00
Will Miao
406284a045 checkpoint 2025-03-16 16:56:33 +08:00
Will Miao
50babfd471 Update modal CSS to allow scrolling by changing overflow property from hidden to auto. Adjust max-height to account for header height while maintaining reduced top margin. 2025-03-15 20:41:10 +08:00
Will Miao
edd36427ac Refactor recipe management to enhance initialization and metadata handling. Improve error logging during cache pre-warming, streamline recipe data structure, and ensure proper handling of generation parameters. Update UI components for missing LoRAs with improved summary and toggle functionality. Add new methods for adding recipes to cache and loading recipe data from JSON files. 2025-03-15 20:08:26 +08:00
Will Miao
9f2289329c Implement enhanced loading progress display in DownloadManager and ImportManager. Introduce detailed progress updates and UI elements for current item and overall progress during downloads. Update LoadingManager to support dynamic progress visualization. 2025-03-15 16:25:56 +08:00
Will Miao
9a1fe19cc8 Enhance DownloadManager and LoraFileHandler to support dynamic ignore path management with expiration times. Added handling for alternative path formats and improved logging for added and removed paths. 2025-03-15 14:58:40 +08:00
Will Miao
09f5e2961e Bump version to 0.7.39 2025-03-15 10:58:55 +08:00
Will Miao
756ad399bf Enhance LoraManagerLoader to include formatted loaded_loras in return values, improving data output for loaded LoRAs. 2025-03-15 10:45:32 +08:00
Will Miao
02adced7b8 Fix path formatting in LoraStacker to ensure compatibility across different operating systems by replacing '/' with os.sep. 2025-03-15 10:45:16 +08:00
Will Miao
9059795816 Enhance DownloadManager to update hash index with new LoRA entries, improving file tracking during downloads. 2025-03-15 10:16:52 +08:00
Will Miao
6920944724 Refactor API and DownloadManager to utilize version-level properties for model file existence and size, improving data handling and UI responsiveness. 2025-03-15 09:56:41 +08:00
Will Miao
c76b287aed Normalize SHA256 hash handling by converting to lowercase in LoraScanner and LoraMetadata classes for consistency. 2025-03-15 09:56:28 +08:00
Will Miao
5c62ec1177 checkpoint 2025-03-15 09:53:50 +08:00
Will Miao
09b2fdfc59 Refactor API and DownloadManager to utilize version-level properties for model file existence and size, improving data handling and UI responsiveness. 2025-03-15 09:45:07 +08:00
Will Miao
e498c9ce29 Normalize SHA256 hash handling by converting to lowercase in LoraScanner and LoraMetadata classes for consistency. 2025-03-15 07:25:00 +08:00
Will Miao
9bb4d7078e checkpoint 2025-03-15 05:29:25 +08:00
Will Miao
5e4d2c7760 checkpoint 2025-03-14 21:10:24 +08:00
Will Miao
426e84cfa3 checkpoint 2025-03-14 16:37:52 +08:00
Will Miao
b77df8f89f Merge branch 'main' into dev 2025-03-14 11:45:18 +08:00
Will Miao
f7c946778d Bump version to 0.7.38. fix: correct LoRA naming issue when fetching data from Civitai 2025-03-14 11:23:07 +08:00
Will Miao
81599b8f43 Fix: correct LoRA naming issue when fetching data from Civitai 2025-03-14 11:22:21 +08:00
Will Miao
9c0dcb2853 checkpoint 2025-03-14 11:04:58 +08:00
Will Miao
d3e4534673 Refactor model name editing functionality in LoraModal; update styles for improved user interaction and accessibility 2025-03-13 22:11:51 +08:00
Will Miao
dd81c86540 Enhance folder tag functionality and layout; update styles for action buttons and toggle behavior 2025-03-13 21:23:24 +08:00
Will Miao
3620376c3c Add search and filter functionality to header; adjust styles for responsiveness 2025-03-13 21:02:54 +08:00
Will Miao
444e8004c7 update 2025-03-13 20:55:35 +08:00
Will Miao
0b0caa1142 Fix layout 2025-03-13 20:37:23 +08:00
Will Miao
e7233c147d checkpoint 2025-03-13 15:04:18 +08:00
Will Miao
004c203ef2 Merge branch 'main' into dev 2025-03-13 11:45:43 +08:00
Will Miao
db04c349a7 Bump version to 0.7.37-bugfix for release preparation 2025-03-13 11:11:51 +08:00
Will Miao
e57a72d12b Fixed an issue caused by inconsistent base model name for Illustrious. It fixes https://github.com/willmiao/ComfyUI-Lora-Manager/issues/37 2025-03-13 11:00:55 +08:00
Will Miao
c88388da67 Refactor toggle switch styles for update preferences in the modal 2025-03-13 10:00:32 +08:00
Will Miao
2ea0fa8471 Update README and version for NSFW content control enhancements 2025-03-12 22:58:04 +08:00
Will Miao
7f088e58bc Implement SFW content filtering in LoraModal and update settings management 2025-03-12 22:57:21 +08:00
Will Miao
e992ace11c Add NSFW browse control functionality - Done 2025-03-12 22:21:30 +08:00
Will Miao
0cad6b5cbc Add nsfw browse control part 1 2025-03-12 21:06:31 +08:00
Will Miao
e9a703451c Fix the problem of repeatedly trying to fetch model description metadata when the model has a null description. 2025-03-12 15:25:58 +08:00
Will Miao
03ddd51a91 Fetch and update model metadata including tags and description in ApiRoutes and DownloadManager 2025-03-12 14:50:06 +08:00
Will Miao
9142cc4cde Enhance CivitaiClient to return HTTP status code with model metadata; update LoraScanner to handle deleted models 2025-03-12 11:18:19 +08:00
Will Miao
8e5e16ce68 Refactor logging and update badge visibility in UpdateService; improve path normalization in file_utils 2025-03-12 10:06:15 +08:00
Will Miao
9a4124c709 Bump version to 0.7.36 2025-03-11 22:01:40 +08:00
Will Miao
6f49a73f9c Enhance README with v0.7.36 release notes and update utils.py with credit attribution 2025-03-11 22:00:46 +08:00
Will Miao
a2c51f4779 Fix trigger word toggle node 2025-03-11 22:00:25 +08:00
Will Miao
79ebe25bc2 Add lora stacker support and fix TriggerWord Toggle node 2025-03-11 20:46:40 +08:00
Will Miao
ad56cafd62 checkpoint 2025-03-11 19:29:31 +08:00
Will Miao
5a6c412845 Update WebSocket connection to use secure protocol based on current window location 2025-03-11 15:08:52 +08:00
Will Miao
be5e8bad17 Revert "Enhance infinite scroll functionality; increase sentinel height and ensure full width, trigger layout recalculation on initialization."
This reverts commit a1e9e440ed.
2025-03-11 14:47:15 +08:00
Will Miao
d63a70474b Improve height calculation for tags widget to handle empty states and dynamic content 2025-03-11 14:45:50 +08:00
Will Miao
f48b954fb7 Add group mode for TriggerWord Toggle 2025-03-11 11:12:37 +08:00
Will Miao
c48da5300e Make trigger_words input optional for TriggerWord Toggle node. This will fix https://github.com/willmiao/ComfyUI-Lora-Manager/issues/31 2025-03-10 20:19:01 +08:00
Will Miao
2783782669 Add search options panel and functionality for filename, model name, and tags 2025-03-10 19:33:45 +08:00
Will Miao
cbb76580e4 Enhance error handling for civitai metadata parsing and update logic; add empty state messages for model descriptions 2025-03-10 17:32:28 +08:00
Will Miao
2bdecf571b Add trigger words editting 2025-03-10 17:10:58 +08:00
Will Miao
72a82707ea Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-03-10 15:32:42 +08:00
Will Miao
564e507fa9 Add tag info and filtering 2025-03-10 15:32:27 +08:00
Will Miao
721bef3ff8 Add tag filtering checkpoint 2025-03-10 13:18:56 +08:00
pixelpaws
1803a9085d Update issue templates 2025-03-10 00:53:31 +08:00
pixelpaws
c3fe58f055 Update issue templates 2025-03-10 00:50:51 +08:00
Will Miao
0069f84630 Add model description in lora details 2025-03-10 00:20:31 +08:00
Will Miao
d69406c4cb checkpoint 2025-03-09 15:42:00 +08:00
Will Miao
250e8445bb checkpoint 2025-03-09 12:29:24 +08:00
Will Miao
e6aafe8773 Add recipes checkpoint 2025-03-08 23:10:24 +08:00
Will Miao
e8e5012f0c Update flux workflow for v0.7.34 2025-03-07 21:23:50 +08:00
Will Miao
d6ed5b7fec Align image position in card component for better display 2025-03-07 17:43:48 +08:00
Will Miao
2186b7ee26 Improve bulk mode card display handling 2025-03-07 17:34:54 +08:00
172 changed files with 31281 additions and 4463 deletions

36
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,36 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
### **LoRA Manager Version**
- Version: `vX.X.X`
### **Environment Information**
- **Operating System**: (e.g., Windows 11, macOS Ventura, Ubuntu 22.04)
- **Browser & Version**: (e.g., Chrome 120.0.0, Edge 115.0.0)
### **Issue Description**
- Describe the issue in detail.
### **Steps to Reproduce**
1. Open LoRA Manager in [your browser].
2. Perform [specific action].
3. Observe the issue.
### **Expected Behavior**
- What did you expect to happen?
### **Screenshots** *(If applicable)*
- Upload screenshots or screen recordings.
### **Logs**
- Provide the **ComfyUI startup log** and any relevant error messages.
- Check the browser developer console (F12 → Console tab) and attach any errors.
### **Additional Context** *(Optional)*
- Any other relevant details.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

5
.gitignore vendored
View File

@@ -1,2 +1,5 @@
__pycache__/
settings.json
settings.json
output/*
py/run_test.py
.vscode/

110
README.md
View File

@@ -1,48 +1,69 @@
# ComfyUI LoRA Manager
A web-based management interface designed to help you organize and manage your local LoRA models in ComfyUI. Access the interface at: `http://localhost:8188/loras`
> **Revolutionize your workflow with the ultimate LoRA companion for ComfyUI!**
![Interface Preview](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/static/images/Screenshot%202025-01-27%20172349.png)
[![Discord](https://img.shields.io/discord/1346296675538571315?color=7289DA&label=Discord&logo=discord&logoColor=white)](https://discord.gg/vcqNrWVFvM)
[![Release](https://img.shields.io/github/v/release/willmiao/ComfyUI-Lora-Manager?include_prereleases&color=blue&logo=github)](https://github.com/willmiao/ComfyUI-Lora-Manager/releases)
[![Release Date](https://img.shields.io/github/release-date/willmiao/ComfyUI-Lora-Manager?color=green&logo=github)](https://github.com/willmiao/ComfyUI-Lora-Manager/releases)
A comprehensive toolset that streamlines organizing, downloading, and applying LoRA models in ComfyUI. With powerful features like recipe management, checkpoint organization, and one-click workflow integration, working with models becomes faster, smoother, and significantly easier. Access the interface at: `http://localhost:8188/loras`
![Interface Preview](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/static/images/screenshot.png)
## 📺 Tutorial: One-Click LoRA Integration
Watch this quick tutorial to learn how to use the new one-click LoRA integration feature:
[![One-Click LoRA Integration Tutorial](https://img.youtube.com/vi/qS95OjX3e70/0.jpg)](https://youtu.be/qS95OjX3e70)
[![LoRA Manager v0.8.0 - New Recipe Feature & Bulk Operations](https://img.youtube.com/vi/noN7f_ER7yo/0.jpg)](https://youtu.be/noN7f_ER7yo)
---
## Release Notes
### v0.7.35-beta
* Added base model filtering
* Implemented bulk operations (copy syntax, move multiple LoRAs)
* Added ability to edit LoRA model names in details view
* Added update checker with notification system
* Added support modal for user feedback and community links
### v0.8.6 Major Update
* **Checkpoint Management** - Added comprehensive management for model checkpoints including scanning, searching, filtering, and deletion
* **Enhanced Metadata Support** - New capabilities for retrieving and managing checkpoint metadata with improved operations
* **Improved Initial Loading** - Optimized cache initialization with visual progress indicators for better user experience
### v0.7.33
* Enhanced LoRA Loader node with visual strength adjustment widgets
* Added toggle switches for LoRA enable/disable
* Implemented image tooltips for LoRA preview
* Added TriggerWord Toggle node with visual word selection
* Fixed various bugs and improved stability
### v0.8.5
* **Enhanced LoRA & Recipe Connectivity** - Added Recipes tab in LoRA details to see all recipes using a specific LoRA
* **Improved Navigation** - New shortcuts to jump between related LoRAs and Recipes with one-click navigation
* **Video Preview Controls** - Added "Autoplay Videos on Hover" setting to optimize performance and reduce resource usage
* **UI Experience Refinements** - Smoother transitions between related content pages
### v0.7.3
* Added "Lora Loader (LoraManager)" custom node for workflows
* Implemented one-click LoRA integration
* Added direct copying of LoRA syntax from manager interface
* Added automatic preset strength value application
* Added automatic trigger word loading
### v0.8.4
* **Node Layout Improvements** - Fixed layout issues with LoRA Loader and Trigger Words Toggle nodes in newer ComfyUI frontend versions
* **Recipe LoRA Reconnection** - Added ability to reconnect deleted LoRAs in recipes by clicking the "deleted" badge in recipe details
* **Bug Fixes & Stability** - Resolved various issues for improved reliability
### v0.7.0
* Added direct CivitAI integration for downloading LoRAs
* Implemented version selection for model downloads
* Added target folder selection for downloads
* Added context menu with quick actions
* Added force refresh for CivitAI data
* Implemented LoRA movement between folders
* Added personal usage tips and notes for LoRAs
* Improved performance for details window
### v0.8.3
* **Enhanced Workflow Parser** - Rebuilt workflow analysis engine with improved support for ComfyUI core nodes and easier extensibility
* **Improved Recipe System** - Refined the experimental Save Recipe functionality with better workflow integration
* **New Save Image Node** - Added experimental node with metadata support for perfect CivitAI compatibility
* Supports dynamic filename prefixes with variables [1](https://github.com/nkchocoai/ComfyUI-SaveImageWithMetaData?tab=readme-ov-file#filename_prefix)
* **Default LoRA Root Setting** - Added configuration option for setting your preferred LoRA directory
### v0.8.2
* **Faster Initialization for Forge Users** - Improved first-run efficiency by utilizing existing `.json` and `.civitai.info` files from Forges CivitAI helper extension, making migration smoother.
* **LoRA Filename Editing** - Added support for renaming LoRA files directly within LoRA Manager.
* **Recipe Editing** - Users can now edit recipe names and tags.
* **Retain Deleted LoRAs in Recipes** - Deleted LoRAs will remain listed in recipes, allowing future functionality to reconnect them once re-obtained.
* **Download Missing LoRAs from Recipes** - Easily fetch missing LoRAs associated with a recipe.
### v0.8.1
* **Base Model Correction** - Added support for modifying base model associations to fix incorrect metadata for non-CivitAI LoRAs
* **LoRA Loader Flexibility** - Made CLIP input optional for model-only workflows like Hunyuan video generation
* **Expanded Recipe Support** - Added compatibility with 3 additional recipe metadata formats
* **Enhanced Showcase Images** - Generation parameters now displayed alongside LoRA preview images
* **UI Improvements & Bug Fixes** - Various interface refinements and stability enhancements
### v0.8.0
* **Introduced LoRA Recipes** - Create, import, save, and share your favorite LoRA combinations
* **Recipe Management System** - Easily browse, search, and organize your LoRA recipes
* **Workflow Integration** - Save recipes directly from your workflow with generation parameters preserved
* **Simplified Workflow Application** - Quickly apply saved recipes to new projects
* **Enhanced UI & UX** - Improved interface design and user experience
* **Bug Fixes & Stability** - Resolved various issues and enhanced overall performance
[View Update History](./update_logs.md)
@@ -76,11 +97,24 @@ Watch this quick tutorial to learn how to use the new one-click LoRA integration
- Trigger words at a glance
- One-click workflow integration with preset values
- 🔄 **Checkpoint Management**
- Scan and organize checkpoint models
- Filter and search your collection
- View and edit metadata
- Clean up and manage disk space
- 🧩 **LoRA Recipes**
- Save and share favorite LoRA combinations
- Preserve generation parameters for future reference
- Quick application to workflows
- Import/export functionality for community sharing
- 💻 **User Friendly**
- One-click access from ComfyUI menu
- Context menu for quick actions
- Custom notes and usage tips
- Multi-folder support
- Visual progress indicators during initialization
---
@@ -121,6 +155,15 @@ pip install requirements.txt
---
## Credits
This project has been inspired by and benefited from other excellent ComfyUI extensions:
- [ComfyUI-SaveImageWithMetaData](https://github.com/Comfy-Community/ComfyUI-SaveImageWithMetaData) - For the image metadata functionality
- [rgthree-comfy](https://github.com/rgthree/rgthree-comfy) - For the lora loader functionality
---
## Contributing
If you have suggestions, bug reports, or improvements, feel free to open an issue or contribute directly to the codebase. Pull requests are always welcome!
@@ -139,12 +182,3 @@ Join our Discord community for support, discussions, and updates:
[Discord Server](https://discord.gg/vcqNrWVFvM)
---
## 🗺️ Roadmap
- ✅ One-click integration of LoRAs into ComfyUI workflows with preset strength values
- 🤝 Improved usage tips retrieval from CivitAI model pages
- 🔌 Integration with Power LoRA Loader and other management tools
- 🛡️ Configurable NSFW level settings for content filtering
---

View File

@@ -1,14 +1,18 @@
from .py.lora_manager import LoraManager
from .py.nodes.lora_loader import LoraManagerLoader
from .py.nodes.trigger_word_toggle import TriggerWordToggle
from .py.nodes.lora_stacker import LoraStacker
from .py.nodes.save_image import SaveImage
NODE_CLASS_MAPPINGS = {
LoraManagerLoader.NAME: LoraManagerLoader,
TriggerWordToggle.NAME: TriggerWordToggle
TriggerWordToggle.NAME: TriggerWordToggle,
LoraStacker.NAME: LoraStacker,
SaveImage.NAME: SaveImage
}
WEB_DIRECTORY = "./web/comfyui"
# Register routes on import
LoraManager.add_routes()
__all__ = ['NODE_CLASS_MAPPINGS', 'WEB_DIRECTORY']
__all__ = ['NODE_CLASS_MAPPINGS', 'WEB_DIRECTORY']

View File

@@ -17,6 +17,8 @@ class Config:
# 静态路由映射字典, target to route mapping
self._route_mappings = {}
self.loras_roots = self._init_lora_paths()
self.checkpoints_roots = self._init_checkpoint_paths()
self.temp_directory = folder_paths.get_temp_directory()
# 在初始化时扫描符号链接
self._scan_symbolic_links()
@@ -38,9 +40,12 @@ class Config:
return False
def _scan_symbolic_links(self):
"""扫描所有 LoRA 根目录中的符号链接"""
"""扫描所有 LoRA 和 Checkpoint 根目录中的符号链接"""
for root in self.loras_roots:
self._scan_directory_links(root)
for root in self.checkpoints_roots:
self._scan_directory_links(root)
def _scan_directory_links(self, root: str):
"""递归扫描目录中的符号链接"""
@@ -72,7 +77,7 @@ class Config:
"""添加静态路由映射"""
normalized_path = os.path.normpath(path).replace(os.sep, '/')
self._route_mappings[normalized_path] = route
logger.info(f"Added route mapping: {normalized_path} -> {route}")
# logger.info(f"Added route mapping: {normalized_path} -> {route}")
def map_path_to_link(self, path: str) -> str:
"""将目标路径映射回符号链接路径"""
@@ -84,12 +89,23 @@ class Config:
mapped_path = normalized_path.replace(target_path, link_path, 1)
return mapped_path
return path
def map_link_to_path(self, link_path: str) -> str:
"""将符号链接路径映射回实际路径"""
normalized_link = os.path.normpath(link_path).replace(os.sep, '/')
# 检查路径是否包含在任何映射的目标路径中
for target_path, link_path in self._path_mappings.items():
if normalized_link.startswith(target_path):
# 如果路径以目标路径开头,则替换为实际路径
mapped_path = normalized_link.replace(target_path, link_path, 1)
return mapped_path
return link_path
def _init_lora_paths(self) -> List[str]:
"""Initialize and validate LoRA paths from ComfyUI settings"""
paths = list(set(path.replace(os.sep, "/")
paths = sorted(set(path.replace(os.sep, "/")
for path in folder_paths.get_folder_paths("loras")
if os.path.exists(path)))
if os.path.exists(path)), key=lambda p: p.lower())
print("Found LoRA roots:", "\n - " + "\n - ".join(paths))
if not paths:
@@ -103,6 +119,35 @@ class Config:
return paths
def _init_checkpoint_paths(self) -> List[str]:
"""Initialize and validate checkpoint paths from ComfyUI settings"""
# Get checkpoint paths from folder_paths
checkpoint_paths = folder_paths.get_folder_paths("checkpoints")
diffusion_paths = folder_paths.get_folder_paths("diffusers")
unet_paths = folder_paths.get_folder_paths("unet")
# Combine all checkpoint-related paths
all_paths = checkpoint_paths + diffusion_paths + unet_paths
# Filter and normalize paths
paths = sorted(set(path.replace(os.sep, "/")
for path in all_paths
if os.path.exists(path)), key=lambda p: p.lower())
print("Found checkpoint roots:", paths)
if not paths:
logger.warning("No valid checkpoint folders found in ComfyUI configuration")
return []
# 初始化路径映射,与 LoRA 路径处理方式相同
for path in paths:
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
if real_path != path:
self.add_path_mapping(path, real_path)
return paths
def get_preview_static_url(self, preview_path: str) -> str:
"""Convert local preview path to static URL"""
if not preview_path:

View File

@@ -1,12 +1,11 @@
import asyncio
import os
from server import PromptServer # type: ignore
from .config import config
from .routes.lora_routes import LoraRoutes
from .routes.api_routes import ApiRoutes
from .services.lora_scanner import LoraScanner
from .services.file_monitor import LoraFileMonitor
from .services.lora_cache import LoraCache
from .routes.recipe_routes import RecipeRoutes
from .routes.checkpoints_routes import CheckpointsRoutes
from .services.service_registry import ServiceRegistry
import logging
logger = logging.getLogger(__name__)
@@ -19,7 +18,7 @@ class LoraManager:
"""Initialize and register all routes"""
app = PromptServer.instance.app
added_targets = set() # 用于跟踪已添加的目标路径
added_targets = set() # Track already added target paths
# Add static routes for each lora root
for idx, root in enumerate(config.loras_roots, start=1):
@@ -31,77 +30,143 @@ class LoraManager:
if link == root:
real_root = target
break
# 为原始路径添加静态路由
# Add static route for original path
app.router.add_static(preview_path, real_root)
logger.info(f"Added static route {preview_path} -> {real_root}")
# 记录路由映射
# Record route mapping
config.add_route_mapping(real_root, preview_path)
added_targets.add(real_root)
# 为符号链接的目标路径添加额外的静态路由
link_idx = 1
# Add static routes for each checkpoint root
for idx, root in enumerate(config.checkpoints_roots, start=1):
preview_path = f'/checkpoints_static/root{idx}/preview'
real_root = root
if root in config._path_mappings.values():
for target, link in config._path_mappings.items():
if link == root:
real_root = target
break
# Add static route for original path
app.router.add_static(preview_path, real_root)
logger.info(f"Added static route {preview_path} -> {real_root}")
# Record route mapping
config.add_route_mapping(real_root, preview_path)
added_targets.add(real_root)
# Add static routes for symlink target paths
link_idx = {
'lora': 1,
'checkpoint': 1
}
for target_path, link_path in config._path_mappings.items():
if target_path not in added_targets:
route_path = f'/loras_static/link_{link_idx}/preview'
# Determine if this is a checkpoint or lora link based on path
is_checkpoint = any(cp_root in link_path for cp_root in config.checkpoints_roots)
is_checkpoint = is_checkpoint or any(cp_root in target_path for cp_root in config.checkpoints_roots)
if is_checkpoint:
route_path = f'/checkpoints_static/link_{link_idx["checkpoint"]}/preview'
link_idx["checkpoint"] += 1
else:
route_path = f'/loras_static/link_{link_idx["lora"]}/preview'
link_idx["lora"] += 1
app.router.add_static(route_path, target_path)
logger.info(f"Added static route for link target {route_path} -> {target_path}")
config.add_route_mapping(target_path, route_path)
added_targets.add(target_path)
link_idx += 1
# Add static route for plugin assets
app.router.add_static('/loras_static', config.static_path)
# Setup feature routes
routes = LoraRoutes()
lora_routes = LoraRoutes()
checkpoints_routes = CheckpointsRoutes()
# Setup file monitoring
monitor = LoraFileMonitor(routes.scanner, config.loras_roots)
monitor.start()
# Initialize routes
lora_routes.setup_routes(app)
checkpoints_routes.setup_routes(app)
ApiRoutes.setup_routes(app)
RecipeRoutes.setup_routes(app)
routes.setup_routes(app)
ApiRoutes.setup_routes(app, monitor)
# Store monitor in app for cleanup
app['lora_monitor'] = monitor
# Schedule cache initialization using the application's startup handler
app.on_startup.append(lambda app: cls._schedule_cache_init(routes.scanner))
# Schedule service initialization
app.on_startup.append(lambda app: cls._initialize_services())
# Add cleanup
app.on_shutdown.append(cls._cleanup)
app.on_shutdown.append(ApiRoutes.cleanup)
@classmethod
async def _schedule_cache_init(cls, scanner: LoraScanner):
"""Schedule cache initialization in the running event loop"""
async def _initialize_services(cls):
"""Initialize all services using the ServiceRegistry"""
try:
# 创建低优先级的初始化任务
asyncio.create_task(cls._initialize_cache(scanner), name='lora_cache_init')
except Exception as e:
print(f"LoRA Manager: Error scheduling cache initialization: {e}")
@classmethod
async def _initialize_cache(cls, scanner: LoraScanner):
"""Initialize cache in background"""
try:
# 设置初始缓存占位
scanner._cache = LoraCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
logger.info("LoRA Manager: Initializing services via ServiceRegistry")
# 分阶段加载缓存
await scanner.get_cached_data(force_refresh=True)
# Initialize CivitaiClient first to ensure it's ready for other services
civitai_client = await ServiceRegistry.get_civitai_client()
# Get file monitors through ServiceRegistry
lora_monitor = await ServiceRegistry.get_lora_monitor()
checkpoint_monitor = await ServiceRegistry.get_checkpoint_monitor()
# Start monitors
lora_monitor.start()
logger.info("Lora monitor started")
# Make sure checkpoint monitor has paths before starting
await checkpoint_monitor.initialize_paths()
checkpoint_monitor.start()
logger.info("Checkpoint monitor started")
# Register DownloadManager with ServiceRegistry
download_manager = await ServiceRegistry.get_download_manager()
# Initialize WebSocket manager
ws_manager = await ServiceRegistry.get_websocket_manager()
# Initialize scanners in background
lora_scanner = await ServiceRegistry.get_lora_scanner()
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
# Initialize recipe scanner if needed
recipe_scanner = await ServiceRegistry.get_recipe_scanner()
# Create low-priority initialization tasks
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
except Exception as e:
print(f"LoRA Manager: Error initializing cache: {e}")
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
@classmethod
async def _cleanup(cls, app):
"""Cleanup resources"""
if 'lora_monitor' in app:
app['lora_monitor'].stop()
"""Cleanup resources using ServiceRegistry"""
try:
logger.info("LoRA Manager: Cleaning up services")
# Get monitors from ServiceRegistry
lora_monitor = await ServiceRegistry.get_service("lora_monitor")
if lora_monitor:
lora_monitor.stop()
logger.info("Stopped LoRA monitor")
checkpoint_monitor = await ServiceRegistry.get_service("checkpoint_monitor")
if checkpoint_monitor:
checkpoint_monitor.stop()
logger.info("Stopped checkpoint monitor")
# Close CivitaiClient gracefully
civitai_client = await ServiceRegistry.get_service("civitai_client")
if civitai_client:
await civitai_client.close()
logger.info("Closed CivitaiClient connection")
except Exception as e:
logger.error(f"Error during cleanup: {e}", exc_info=True)

View File

@@ -1,3 +1,4 @@
import logging
from nodes import LoraLoader
from comfy.comfy_types import IO # type: ignore
from ..services.lora_scanner import LoraScanner
@@ -6,16 +7,18 @@ import asyncio
import os
from .utils import FlexibleOptionalInputType, any_type
logger = logging.getLogger(__name__)
class LoraManagerLoader:
NAME = "Lora Loader (LoraManager)"
CATEGORY = "loaders"
CATEGORY = "Lora Manager/loaders"
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"model": ("MODEL",),
"clip": ("CLIP",),
# "clip": ("CLIP",),
"text": (IO.STRING, {
"multiline": True,
"dynamicPrompts": True,
@@ -26,8 +29,8 @@ class LoraManagerLoader:
"optional": FlexibleOptionalInputType(any_type),
}
RETURN_TYPES = ("MODEL", "CLIP", IO.STRING)
RETURN_NAMES = ("MODEL", "CLIP", "trigger_words")
RETURN_TYPES = ("MODEL", "CLIP", IO.STRING, IO.STRING)
RETURN_NAMES = ("MODEL", "CLIP", "trigger_words", "loaded_loras")
FUNCTION = "load_loras"
async def get_lora_info(self, lora_name):
@@ -49,29 +52,73 @@ class LoraManagerLoader:
return relative_path, trigger_words
return lora_name, [] # Fallback if not found
def load_loras(self, model, clip, text, **kwargs):
"""Loads multiple LoRAs based on the kwargs input."""
def extract_lora_name(self, lora_path):
"""Extract the lora name from a lora path (e.g., 'IL\\aorunIllstrious.safetensors' -> 'aorunIllstrious')"""
# Get the basename without extension
basename = os.path.basename(lora_path)
return os.path.splitext(basename)[0]
def _get_loras_list(self, kwargs):
"""Helper to extract loras list from either old or new kwargs format"""
if 'loras' not in kwargs:
return []
loras_data = kwargs['loras']
# Handle new format: {'loras': {'__value__': [...]}}
if isinstance(loras_data, dict) and '__value__' in loras_data:
return loras_data['__value__']
# Handle old format: {'loras': [...]}
elif isinstance(loras_data, list):
return loras_data
# Unexpected format
else:
logger.warning(f"Unexpected loras format: {type(loras_data)}")
return []
def load_loras(self, model, text, **kwargs):
"""Loads multiple LoRAs based on the kwargs input and lora_stack."""
loaded_loras = []
all_trigger_words = []
if 'loras' in kwargs:
for lora in kwargs['loras']:
if not lora.get('active', False):
continue
lora_name = lora['name']
strength = float(lora['strength'])
clip = kwargs.get('clip', None)
lora_stack = kwargs.get('lora_stack', None)
# First process lora_stack if available
if lora_stack:
for lora_path, model_strength, clip_strength in lora_stack:
# Apply the LoRA using the provided path and strengths
model, clip = LoraLoader().load_lora(model, clip, lora_path, model_strength, clip_strength)
# Get lora path and trigger words
lora_path, trigger_words = asyncio.run(self.get_lora_info(lora_name))
# Extract lora name for trigger words lookup
lora_name = self.extract_lora_name(lora_path)
_, trigger_words = asyncio.run(self.get_lora_info(lora_name))
# Apply the LoRA using the resolved path
model, clip = LoraLoader().load_lora(model, clip, lora_path, strength, strength)
loaded_loras.append(f"{lora_name}: {strength}")
# Add trigger words to collection
all_trigger_words.extend(trigger_words)
loaded_loras.append(f"{lora_name}: {model_strength}")
trigger_words_text = ", ".join(all_trigger_words) if all_trigger_words else ""
# Then process loras from kwargs with support for both old and new formats
loras_list = self._get_loras_list(kwargs)
for lora in loras_list:
if not lora.get('active', False):
continue
lora_name = lora['name']
strength = float(lora['strength'])
# Get lora path and trigger words
lora_path, trigger_words = asyncio.run(self.get_lora_info(lora_name))
# Apply the LoRA using the resolved path
model, clip = LoraLoader().load_lora(model, clip, lora_path, strength, strength)
loaded_loras.append(f"{lora_name}: {strength}")
# Add trigger words to collection
all_trigger_words.extend(trigger_words)
# use ',, ' to separate trigger words for group mode
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
# Format loaded_loras as <lora:lora_name:strength> separated by spaces
formatted_loras = " ".join([f"<lora:{name.split(':')[0].strip()}:{str(strength).strip()}>"
for name, strength in [item.split(':') for item in loaded_loras]])
return (model, clip, trigger_words_text)
return (model, clip, trigger_words_text, formatted_loras)

118
py/nodes/lora_stacker.py Normal file
View File

@@ -0,0 +1,118 @@
from comfy.comfy_types import IO # type: ignore
from ..services.lora_scanner import LoraScanner
from ..config import config
import asyncio
import os
from .utils import FlexibleOptionalInputType, any_type
import logging
logger = logging.getLogger(__name__)
class LoraStacker:
NAME = "Lora Stacker (LoraManager)"
CATEGORY = "Lora Manager/stackers"
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"text": (IO.STRING, {
"multiline": True,
"dynamicPrompts": True,
"tooltip": "Format: <lora:lora_name:strength> separated by spaces or punctuation",
"placeholder": "LoRA syntax input: <lora:name:strength>"
}),
},
"optional": FlexibleOptionalInputType(any_type),
}
RETURN_TYPES = ("LORA_STACK", IO.STRING, IO.STRING)
RETURN_NAMES = ("LORA_STACK", "trigger_words", "active_loras")
FUNCTION = "stack_loras"
async def get_lora_info(self, lora_name):
"""Get the lora path and trigger words from cache"""
scanner = await LoraScanner.get_instance()
cache = await scanner.get_cached_data()
for item in cache.raw_data:
if item.get('file_name') == lora_name:
file_path = item.get('file_path')
if file_path:
for root in config.loras_roots:
root = root.replace(os.sep, '/')
if file_path.startswith(root):
relative_path = os.path.relpath(file_path, root).replace(os.sep, '/')
# Get trigger words from civitai metadata
civitai = item.get('civitai', {})
trigger_words = civitai.get('trainedWords', []) if civitai else []
return relative_path, trigger_words
return lora_name, [] # Fallback if not found
def extract_lora_name(self, lora_path):
"""Extract the lora name from a lora path (e.g., 'IL\\aorunIllstrious.safetensors' -> 'aorunIllstrious')"""
# Get the basename without extension
basename = os.path.basename(lora_path)
return os.path.splitext(basename)[0]
def _get_loras_list(self, kwargs):
"""Helper to extract loras list from either old or new kwargs format"""
if 'loras' not in kwargs:
return []
loras_data = kwargs['loras']
# Handle new format: {'loras': {'__value__': [...]}}
if isinstance(loras_data, dict) and '__value__' in loras_data:
return loras_data['__value__']
# Handle old format: {'loras': [...]}
elif isinstance(loras_data, list):
return loras_data
# Unexpected format
else:
logger.warning(f"Unexpected loras format: {type(loras_data)}")
return []
def stack_loras(self, text, **kwargs):
"""Stacks multiple LoRAs based on the kwargs input without loading them."""
stack = []
active_loras = []
all_trigger_words = []
# Process existing lora_stack if available
lora_stack = kwargs.get('lora_stack', None)
if lora_stack:
stack.extend(lora_stack)
# Get trigger words from existing stack entries
for lora_path, _, _ in lora_stack:
lora_name = self.extract_lora_name(lora_path)
_, trigger_words = asyncio.run(self.get_lora_info(lora_name))
all_trigger_words.extend(trigger_words)
# Process loras from kwargs with support for both old and new formats
loras_list = self._get_loras_list(kwargs)
for lora in loras_list:
if not lora.get('active', False):
continue
lora_name = lora['name']
model_strength = float(lora['strength'])
clip_strength = model_strength # Using same strength for both as in the original loader
# Get lora path and trigger words
lora_path, trigger_words = asyncio.run(self.get_lora_info(lora_name))
# Add to stack without loading
# replace '/' with os.sep to avoid different OS path format
stack.append((lora_path.replace('/', os.sep), model_strength, clip_strength))
active_loras.append((lora_name, model_strength))
# Add trigger words to collection
all_trigger_words.extend(trigger_words)
# use ',, ' to separate trigger words for group mode
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
# Format active_loras as <lora:lora_name:strength> separated by spaces
active_loras_text = " ".join([f"<lora:{name}:{str(strength).strip()}>"
for name, strength in active_loras])
return (stack, trigger_words_text, active_loras_text)

375
py/nodes/save_image.py Normal file
View File

@@ -0,0 +1,375 @@
import json
import os
import asyncio
import re
import numpy as np
import folder_paths # type: ignore
from ..services.lora_scanner import LoraScanner
from ..workflow.parser import WorkflowParser
from PIL import Image, PngImagePlugin
import piexif
from io import BytesIO
class SaveImage:
NAME = "Save Image (LoraManager)"
CATEGORY = "Lora Manager/utils"
DESCRIPTION = "Save images with embedded generation metadata in compatible format"
def __init__(self):
self.output_dir = folder_paths.get_output_directory()
self.type = "output"
self.prefix_append = ""
self.compress_level = 4
self.counter = 0
# Add pattern format regex for filename substitution
pattern_format = re.compile(r"(%[^%]+%)")
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"images": ("IMAGE",),
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
"file_format": (["png", "jpeg", "webp"],),
},
"optional": {
"custom_prompt": ("STRING", {"default": "", "forceInput": True}),
"lossless_webp": ("BOOLEAN", {"default": True}),
"quality": ("INT", {"default": 100, "min": 1, "max": 100}),
"embed_workflow": ("BOOLEAN", {"default": False}),
"add_counter_to_filename": ("BOOLEAN", {"default": True}),
},
"hidden": {
"prompt": "PROMPT",
"extra_pnginfo": "EXTRA_PNGINFO",
},
}
RETURN_TYPES = ("IMAGE",)
RETURN_NAMES = ("images",)
FUNCTION = "process_image"
OUTPUT_NODE = True
async def get_lora_hash(self, lora_name):
"""Get the lora hash from cache"""
scanner = await LoraScanner.get_instance()
cache = await scanner.get_cached_data()
for item in cache.raw_data:
if item.get('file_name') == lora_name:
return item.get('sha256')
return None
async def format_metadata(self, parsed_workflow, custom_prompt=None):
"""Format metadata in the requested format similar to userComment example"""
if not parsed_workflow:
return ""
# Extract the prompt and negative prompt
prompt = parsed_workflow.get('prompt', '')
negative_prompt = parsed_workflow.get('negative_prompt', '')
# Override prompt with custom_prompt if provided
if custom_prompt:
prompt = custom_prompt
# Extract loras from the prompt if present
loras_text = parsed_workflow.get('loras', '')
lora_hashes = {}
# If loras are found, add them on a new line after the prompt
if loras_text:
prompt_with_loras = f"{prompt}\n{loras_text}"
# Extract lora names from the format <lora:name:strength>
lora_matches = re.findall(r'<lora:([^:]+):([^>]+)>', loras_text)
# Get hash for each lora
for lora_name, strength in lora_matches:
hash_value = await self.get_lora_hash(lora_name)
if hash_value:
lora_hashes[lora_name] = hash_value
else:
prompt_with_loras = prompt
# Format the first part (prompt and loras)
metadata_parts = [prompt_with_loras]
# Add negative prompt
if negative_prompt:
metadata_parts.append(f"Negative prompt: {negative_prompt}")
# Format the second part (generation parameters)
params = []
# Add standard parameters in the correct order
if 'steps' in parsed_workflow:
params.append(f"Steps: {parsed_workflow.get('steps')}")
if 'sampler' in parsed_workflow:
sampler = parsed_workflow.get('sampler')
# Convert ComfyUI sampler names to user-friendly names
sampler_mapping = {
'euler': 'Euler',
'euler_ancestral': 'Euler a',
'dpm_2': 'DPM2',
'dpm_2_ancestral': 'DPM2 a',
'heun': 'Heun',
'dpm_fast': 'DPM fast',
'dpm_adaptive': 'DPM adaptive',
'lms': 'LMS',
'dpmpp_2s_ancestral': 'DPM++ 2S a',
'dpmpp_sde': 'DPM++ SDE',
'dpmpp_sde_gpu': 'DPM++ SDE',
'dpmpp_2m': 'DPM++ 2M',
'dpmpp_2m_sde': 'DPM++ 2M SDE',
'dpmpp_2m_sde_gpu': 'DPM++ 2M SDE',
'ddim': 'DDIM'
}
sampler_name = sampler_mapping.get(sampler, sampler)
params.append(f"Sampler: {sampler_name}")
if 'scheduler' in parsed_workflow:
scheduler = parsed_workflow.get('scheduler')
scheduler_mapping = {
'normal': 'Simple',
'karras': 'Karras',
'exponential': 'Exponential',
'sgm_uniform': 'SGM Uniform',
'sgm_quadratic': 'SGM Quadratic'
}
scheduler_name = scheduler_mapping.get(scheduler, scheduler)
params.append(f"Schedule type: {scheduler_name}")
# CFG scale (cfg in parsed_workflow)
if 'cfg_scale' in parsed_workflow:
params.append(f"CFG scale: {parsed_workflow.get('cfg_scale')}")
elif 'cfg' in parsed_workflow:
params.append(f"CFG scale: {parsed_workflow.get('cfg')}")
# Seed
if 'seed' in parsed_workflow:
params.append(f"Seed: {parsed_workflow.get('seed')}")
# Size
if 'size' in parsed_workflow:
params.append(f"Size: {parsed_workflow.get('size')}")
# Model info
if 'checkpoint' in parsed_workflow:
# Extract basename without path
checkpoint = os.path.basename(parsed_workflow.get('checkpoint', ''))
# Remove extension if present
checkpoint = os.path.splitext(checkpoint)[0]
params.append(f"Model: {checkpoint}")
# Add LoRA hashes if available
if lora_hashes:
lora_hash_parts = []
for lora_name, hash_value in lora_hashes.items():
lora_hash_parts.append(f"{lora_name}: {hash_value}")
if lora_hash_parts:
params.append(f"Lora hashes: \"{', '.join(lora_hash_parts)}\"")
# Combine all parameters with commas
metadata_parts.append(", ".join(params))
# Join all parts with a new line
return "\n".join(metadata_parts)
# credit to nkchocoai
# Add format_filename method to handle pattern substitution
def format_filename(self, filename, parsed_workflow):
"""Format filename with metadata values"""
if not parsed_workflow:
return filename
result = re.findall(self.pattern_format, filename)
for segment in result:
parts = segment.replace("%", "").split(":")
key = parts[0]
if key == "seed" and 'seed' in parsed_workflow:
filename = filename.replace(segment, str(parsed_workflow.get('seed', '')))
elif key == "width" and 'size' in parsed_workflow:
size = parsed_workflow.get('size', 'x')
w = size.split('x')[0] if isinstance(size, str) else size[0]
filename = filename.replace(segment, str(w))
elif key == "height" and 'size' in parsed_workflow:
size = parsed_workflow.get('size', 'x')
h = size.split('x')[1] if isinstance(size, str) else size[1]
filename = filename.replace(segment, str(h))
elif key == "pprompt" and 'prompt' in parsed_workflow:
prompt = parsed_workflow.get('prompt', '').replace("\n", " ")
if len(parts) >= 2:
length = int(parts[1])
prompt = prompt[:length]
filename = filename.replace(segment, prompt.strip())
elif key == "nprompt" and 'negative_prompt' in parsed_workflow:
prompt = parsed_workflow.get('negative_prompt', '').replace("\n", " ")
if len(parts) >= 2:
length = int(parts[1])
prompt = prompt[:length]
filename = filename.replace(segment, prompt.strip())
elif key == "model" and 'checkpoint' in parsed_workflow:
model = parsed_workflow.get('checkpoint', '')
model = os.path.splitext(os.path.basename(model))[0]
if len(parts) >= 2:
length = int(parts[1])
model = model[:length]
filename = filename.replace(segment, model)
elif key == "date":
from datetime import datetime
now = datetime.now()
date_table = {
"yyyy": str(now.year),
"MM": str(now.month).zfill(2),
"dd": str(now.day).zfill(2),
"hh": str(now.hour).zfill(2),
"mm": str(now.minute).zfill(2),
"ss": str(now.second).zfill(2),
}
if len(parts) >= 2:
date_format = parts[1]
for k, v in date_table.items():
date_format = date_format.replace(k, v)
filename = filename.replace(segment, date_format)
else:
date_format = "yyyyMMddhhmmss"
for k, v in date_table.items():
date_format = date_format.replace(k, v)
filename = filename.replace(segment, date_format)
return filename
def save_images(self, images, filename_prefix, file_format, prompt=None, extra_pnginfo=None,
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True,
custom_prompt=None):
"""Save images with metadata"""
results = []
# Parse the workflow using the WorkflowParser
parser = WorkflowParser()
if prompt:
parsed_workflow = parser.parse_workflow(prompt)
else:
parsed_workflow = {}
# Get or create metadata asynchronously
metadata = asyncio.run(self.format_metadata(parsed_workflow, custom_prompt))
# Process filename_prefix with pattern substitution
filename_prefix = self.format_filename(filename_prefix, parsed_workflow)
# Get initial save path info once for the batch
full_output_folder, filename, counter, subfolder, processed_prefix = folder_paths.get_save_image_path(
filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]
)
# Create directory if it doesn't exist
if not os.path.exists(full_output_folder):
os.makedirs(full_output_folder, exist_ok=True)
# Process each image with incrementing counter
for i, image in enumerate(images):
# Convert the tensor image to numpy array
img = 255. * image.cpu().numpy()
img = Image.fromarray(np.clip(img, 0, 255).astype(np.uint8))
# Generate filename with counter if needed
base_filename = filename
if add_counter_to_filename:
# Use counter + i to ensure unique filenames for all images in batch
current_counter = counter + i
base_filename += f"_{current_counter:05}"
# Set file extension and prepare saving parameters
if file_format == "png":
file = base_filename + ".png"
file_extension = ".png"
save_kwargs = {"optimize": True, "compress_level": self.compress_level}
pnginfo = PngImagePlugin.PngInfo()
elif file_format == "jpeg":
file = base_filename + ".jpg"
file_extension = ".jpg"
save_kwargs = {"quality": quality, "optimize": True}
elif file_format == "webp":
file = base_filename + ".webp"
file_extension = ".webp"
save_kwargs = {"quality": quality, "lossless": lossless_webp}
# Full save path
file_path = os.path.join(full_output_folder, file)
# Save the image with metadata
try:
if file_format == "png":
if metadata:
pnginfo.add_text("parameters", metadata)
if embed_workflow and extra_pnginfo is not None:
workflow_json = json.dumps(extra_pnginfo["workflow"])
pnginfo.add_text("workflow", workflow_json)
save_kwargs["pnginfo"] = pnginfo
img.save(file_path, format="PNG", **save_kwargs)
elif file_format == "jpeg":
# For JPEG, use piexif
if metadata:
try:
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
exif_bytes = piexif.dump(exif_dict)
save_kwargs["exif"] = exif_bytes
except Exception as e:
print(f"Error adding EXIF data: {e}")
img.save(file_path, format="JPEG", **save_kwargs)
elif file_format == "webp":
# For WebP, also use piexif for metadata
if metadata:
try:
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
exif_bytes = piexif.dump(exif_dict)
save_kwargs["exif"] = exif_bytes
except Exception as e:
print(f"Error adding EXIF data: {e}")
img.save(file_path, format="WEBP", **save_kwargs)
results.append({
"filename": file,
"subfolder": subfolder,
"type": self.type
})
except Exception as e:
print(f"Error saving image: {e}")
return results
def process_image(self, images, filename_prefix="ComfyUI", file_format="png", prompt=None, extra_pnginfo=None,
lossless_webp=True, quality=100, embed_workflow=False, add_counter_to_filename=True,
custom_prompt=""):
"""Process and save image with metadata"""
# Make sure the output directory exists
os.makedirs(self.output_dir, exist_ok=True)
# Ensure images is always a list of images
if len(images.shape) == 3: # Single image (height, width, channels)
images = [images]
else: # Multiple images (batch, height, width, channels)
images = [img for img in images]
# Save all images
results = self.save_images(
images,
filename_prefix,
file_format,
prompt,
extra_pnginfo,
lossless_webp,
quality,
embed_workflow,
add_counter_to_filename,
custom_prompt if custom_prompt.strip() else None
)
return (images,)

View File

@@ -1,17 +1,22 @@
import json
import re
from server import PromptServer # type: ignore
from .utils import FlexibleOptionalInputType, any_type
import logging
logger = logging.getLogger(__name__)
class TriggerWordToggle:
NAME = "TriggerWord Toggle (LoraManager)"
CATEGORY = "lora manager"
CATEGORY = "Lora Manager/utils"
DESCRIPTION = "Toggle trigger words on/off"
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"trigger_words": ("STRING", {"defaultInput": True, "forceInput": True}),
"group_mode": ("BOOLEAN", {"default": True}),
},
"optional": FlexibleOptionalInputType(any_type),
"hidden": {
@@ -23,7 +28,24 @@ class TriggerWordToggle:
RETURN_NAMES = ("filtered_trigger_words",)
FUNCTION = "process_trigger_words"
def process_trigger_words(self, trigger_words, id, **kwargs):
def _get_toggle_data(self, kwargs, key='toggle_trigger_words'):
"""Helper to extract data from either old or new kwargs format"""
if key not in kwargs:
return None
data = kwargs[key]
# Handle new format: {'key': {'__value__': ...}}
if isinstance(data, dict) and '__value__' in data:
return data['__value__']
# Handle old format: {'key': ...}
else:
return data
def process_trigger_words(self, id, group_mode, **kwargs):
# Handle both old and new formats for trigger_words
trigger_words_data = self._get_toggle_data(kwargs, 'trigger_words')
trigger_words = trigger_words_data if isinstance(trigger_words_data, str) else ""
# Send trigger words to frontend
PromptServer.instance.send_sync("trigger_word_update", {
"id": id,
@@ -32,31 +54,43 @@ class TriggerWordToggle:
filtered_triggers = trigger_words
if 'toggle_trigger_words' in kwargs:
# Get toggle data with support for both formats
trigger_data = self._get_toggle_data(kwargs, 'toggle_trigger_words')
if trigger_data:
try:
# Get trigger word toggle data
trigger_data = kwargs['toggle_trigger_words']
# Convert to list if it's a JSON string
if isinstance(trigger_data, str):
trigger_data = json.loads(trigger_data)
# Create dictionaries to track active state of words
# Create dictionaries to track active state of words or groups
active_state = {item['text']: item.get('active', False) for item in trigger_data}
# Split original trigger words
original_words = [word.strip() for word in trigger_words.split(',')]
# Filter words: keep those not in toggle_trigger_words or those that are active
filtered_words = [word for word in original_words if word not in active_state or active_state[word]]
# Join them in the same format as input
if filtered_words:
filtered_triggers = ', '.join(filtered_words)
if group_mode:
# Split by two or more consecutive commas to get groups
groups = re.split(r',{2,}', trigger_words)
# Remove leading/trailing whitespace from each group
groups = [group.strip() for group in groups]
# Filter groups: keep those not in toggle_trigger_words or those that are active
filtered_groups = [group for group in groups if group not in active_state or active_state[group]]
if filtered_groups:
filtered_triggers = ', '.join(filtered_groups)
else:
filtered_triggers = ""
else:
filtered_triggers = ""
# Original behavior for individual words mode
original_words = [word.strip() for word in trigger_words.split(',')]
# Filter out empty strings
original_words = [word for word in original_words if word]
filtered_words = [word for word in original_words if word not in active_state or active_state[word]]
if filtered_words:
filtered_triggers = ', '.join(filtered_words)
else:
filtered_triggers = ""
except Exception as e:
print(f"Error processing trigger words: {e}")
logger.error(f"Error processing trigger words: {e}")
return (filtered_triggers,)

View File

@@ -4,6 +4,7 @@ class AnyType(str):
def __ne__(self, __value: object) -> bool:
return False
# Credit to Regis Gaughan, III (rgthree)
class FlexibleOptionalInputType(dict):
"""A special class to make flexible nodes that pass data to our python handlers.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,678 @@
import os
import json
import jinja2
from aiohttp import web
import logging
import asyncio
from ..utils.routes_common import ModelRouteUtils
from ..utils.constants import NSFW_LEVELS
from ..services.websocket_manager import ws_manager
from ..services.service_registry import ServiceRegistry
from ..config import config
from ..services.settings_manager import settings
from ..utils.utils import fuzzy_match
logger = logging.getLogger(__name__)
class CheckpointsRoutes:
"""API routes for checkpoint management"""
def __init__(self):
self.scanner = None # Will be initialized in setup_routes
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.templates_path),
autoescape=True
)
self.download_manager = None # Will be initialized in setup_routes
self._download_lock = asyncio.Lock()
async def initialize_services(self):
"""Initialize services from ServiceRegistry"""
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
self.download_manager = await ServiceRegistry.get_download_manager()
def setup_routes(self, app):
"""Register routes with the aiohttp app"""
# Schedule service initialization on app startup
app.on_startup.append(lambda _: self.initialize_services())
app.router.add_get('/checkpoints', self.handle_checkpoints_page)
app.router.add_get('/api/checkpoints', self.get_checkpoints)
app.router.add_post('/api/checkpoints/fetch-all-civitai', self.fetch_all_civitai)
app.router.add_get('/api/checkpoints/base-models', self.get_base_models)
app.router.add_get('/api/checkpoints/top-tags', self.get_top_tags)
app.router.add_get('/api/checkpoints/scan', self.scan_checkpoints)
app.router.add_get('/api/checkpoints/info/{name}', self.get_checkpoint_info)
app.router.add_get('/api/checkpoints/roots', self.get_checkpoint_roots)
app.router.add_get('/api/checkpoints/civitai/versions/{model_id}', self.get_civitai_versions) # Add new route
# Add new routes for model management similar to LoRA routes
app.router.add_post('/api/checkpoints/delete', self.delete_model)
app.router.add_post('/api/checkpoints/fetch-civitai', self.fetch_civitai)
app.router.add_post('/api/checkpoints/replace-preview', self.replace_preview)
app.router.add_post('/api/checkpoints/download', self.download_checkpoint)
app.router.add_post('/api/checkpoints/save-metadata', self.save_metadata) # Add new route
# Add new WebSocket endpoint for checkpoint progress
app.router.add_get('/ws/checkpoint-progress', ws_manager.handle_checkpoint_connection)
async def get_checkpoints(self, request):
"""Get paginated checkpoint data"""
try:
# Parse query parameters
page = int(request.query.get('page', '1'))
page_size = min(int(request.query.get('page_size', '20')), 100)
sort_by = request.query.get('sort_by', 'name')
folder = request.query.get('folder', None)
search = request.query.get('search', None)
fuzzy_search = request.query.get('fuzzy_search', 'false').lower() == 'true'
base_models = request.query.getall('base_model', [])
tags = request.query.getall('tag', [])
# Process search options
search_options = {
'filename': request.query.get('search_filename', 'true').lower() == 'true',
'modelname': request.query.get('search_modelname', 'true').lower() == 'true',
'tags': request.query.get('search_tags', 'false').lower() == 'true',
'recursive': request.query.get('recursive', 'false').lower() == 'true',
}
# Process hash filters if provided
hash_filters = {}
if 'hash' in request.query:
hash_filters['single_hash'] = request.query['hash']
elif 'hashes' in request.query:
try:
hash_list = json.loads(request.query['hashes'])
if isinstance(hash_list, list):
hash_filters['multiple_hashes'] = hash_list
except (json.JSONDecodeError, TypeError):
pass
# Get data from scanner
result = await self.get_paginated_data(
page=page,
page_size=page_size,
sort_by=sort_by,
folder=folder,
search=search,
fuzzy_search=fuzzy_search,
base_models=base_models,
tags=tags,
search_options=search_options,
hash_filters=hash_filters
)
# Format response items
formatted_result = {
'items': [self._format_checkpoint_response(cp) for cp in result['items']],
'total': result['total'],
'page': result['page'],
'page_size': result['page_size'],
'total_pages': result['total_pages']
}
# Return as JSON
return web.json_response(formatted_result)
except Exception as e:
logger.error(f"Error in get_checkpoints: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
async def get_paginated_data(self, page, page_size, sort_by='name',
folder=None, search=None, fuzzy_search=False,
base_models=None, tags=None,
search_options=None, hash_filters=None):
"""Get paginated and filtered checkpoint data"""
cache = await self.scanner.get_cached_data()
# Get default search options if not provided
if search_options is None:
search_options = {
'filename': True,
'modelname': True,
'tags': False,
'recursive': False,
}
# Get the base data set
filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name
# Apply hash filtering if provided (highest priority)
if hash_filters:
single_hash = hash_filters.get('single_hash')
multiple_hashes = hash_filters.get('multiple_hashes')
if single_hash:
# Filter by single hash
single_hash = single_hash.lower() # Ensure lowercase for matching
filtered_data = [
cp for cp in filtered_data
if cp.get('sha256', '').lower() == single_hash
]
elif multiple_hashes:
# Filter by multiple hashes
hash_set = set(hash.lower() for hash in multiple_hashes) # Convert to set for faster lookup
filtered_data = [
cp for cp in filtered_data
if cp.get('sha256', '').lower() in hash_set
]
# Jump to pagination
total_items = len(filtered_data)
start_idx = (page - 1) * page_size
end_idx = min(start_idx + page_size, total_items)
result = {
'items': filtered_data[start_idx:end_idx],
'total': total_items,
'page': page,
'page_size': page_size,
'total_pages': (total_items + page_size - 1) // page_size
}
return result
# Apply SFW filtering if enabled in settings
if settings.get('show_only_sfw', False):
filtered_data = [
cp for cp in filtered_data
if not cp.get('preview_nsfw_level') or cp.get('preview_nsfw_level') < NSFW_LEVELS['R']
]
# Apply folder filtering
if folder is not None:
if search_options.get('recursive', False):
# Recursive folder filtering - include all subfolders
filtered_data = [
cp for cp in filtered_data
if cp['folder'].startswith(folder)
]
else:
# Exact folder filtering
filtered_data = [
cp for cp in filtered_data
if cp['folder'] == folder
]
# Apply base model filtering
if base_models and len(base_models) > 0:
filtered_data = [
cp for cp in filtered_data
if cp.get('base_model') in base_models
]
# Apply tag filtering
if tags and len(tags) > 0:
filtered_data = [
cp for cp in filtered_data
if any(tag in cp.get('tags', []) for tag in tags)
]
# Apply search filtering
if search:
search_results = []
for cp in filtered_data:
# Search by file name
if search_options.get('filename', True):
if fuzzy_search:
if fuzzy_match(cp.get('file_name', ''), search):
search_results.append(cp)
continue
elif search.lower() in cp.get('file_name', '').lower():
search_results.append(cp)
continue
# Search by model name
if search_options.get('modelname', True):
if fuzzy_search:
if fuzzy_match(cp.get('model_name', ''), search):
search_results.append(cp)
continue
elif search.lower() in cp.get('model_name', '').lower():
search_results.append(cp)
continue
# Search by tags
if search_options.get('tags', False) and 'tags' in cp:
if any((fuzzy_match(tag, search) if fuzzy_search else search.lower() in tag.lower()) for tag in cp['tags']):
search_results.append(cp)
continue
filtered_data = search_results
# Calculate pagination
total_items = len(filtered_data)
start_idx = (page - 1) * page_size
end_idx = min(start_idx + page_size, total_items)
result = {
'items': filtered_data[start_idx:end_idx],
'total': total_items,
'page': page,
'page_size': page_size,
'total_pages': (total_items + page_size - 1) // page_size
}
return result
def _format_checkpoint_response(self, checkpoint):
"""Format checkpoint data for API response"""
return {
"model_name": checkpoint["model_name"],
"file_name": checkpoint["file_name"],
"preview_url": config.get_preview_static_url(checkpoint.get("preview_url", "")),
"preview_nsfw_level": checkpoint.get("preview_nsfw_level", 0),
"base_model": checkpoint.get("base_model", ""),
"folder": checkpoint["folder"],
"sha256": checkpoint.get("sha256", ""),
"file_path": checkpoint["file_path"].replace(os.sep, "/"),
"file_size": checkpoint.get("size", 0),
"modified": checkpoint.get("modified", ""),
"tags": checkpoint.get("tags", []),
"modelDescription": checkpoint.get("modelDescription", ""),
"from_civitai": checkpoint.get("from_civitai", True),
"notes": checkpoint.get("notes", ""),
"model_type": checkpoint.get("model_type", "checkpoint"),
"civitai": ModelRouteUtils.filter_civitai_data(checkpoint.get("civitai", {}))
}
async def fetch_all_civitai(self, request: web.Request) -> web.Response:
"""Fetch CivitAI metadata for all checkpoints in the background"""
try:
cache = await self.scanner.get_cached_data()
total = len(cache.raw_data)
processed = 0
success = 0
needs_resort = False
# Prepare checkpoints to process
to_process = [
cp for cp in cache.raw_data
if cp.get('sha256') and (not cp.get('civitai') or 'id' not in cp.get('civitai')) and cp.get('from_civitai', True)
]
total_to_process = len(to_process)
# Send initial progress
await ws_manager.broadcast({
'status': 'started',
'total': total_to_process,
'processed': 0,
'success': 0
})
# Process each checkpoint
for cp in to_process:
try:
original_name = cp.get('model_name')
if await ModelRouteUtils.fetch_and_update_model(
sha256=cp['sha256'],
file_path=cp['file_path'],
model_data=cp,
update_cache_func=self.scanner.update_single_model_cache
):
success += 1
if original_name != cp.get('model_name'):
needs_resort = True
processed += 1
# Send progress update
await ws_manager.broadcast({
'status': 'processing',
'total': total_to_process,
'processed': processed,
'success': success,
'current_name': cp.get('model_name', 'Unknown')
})
except Exception as e:
logger.error(f"Error fetching CivitAI data for {cp['file_path']}: {e}")
if needs_resort:
await cache.resort(name_only=True)
# Send completion message
await ws_manager.broadcast({
'status': 'completed',
'total': total_to_process,
'processed': processed,
'success': success
})
return web.json_response({
"success": True,
"message": f"Successfully updated {success} of {processed} processed checkpoints (total: {total})"
})
except Exception as e:
# Send error message
await ws_manager.broadcast({
'status': 'error',
'error': str(e)
})
logger.error(f"Error in fetch_all_civitai for checkpoints: {e}")
return web.Response(text=str(e), status=500)
async def get_top_tags(self, request: web.Request) -> web.Response:
"""Handle request for top tags sorted by frequency"""
try:
# Parse query parameters
limit = int(request.query.get('limit', '20'))
# Validate limit
if limit < 1 or limit > 100:
limit = 20 # Default to a reasonable limit
# Get top tags
top_tags = await self.scanner.get_top_tags(limit)
return web.json_response({
'success': True,
'tags': top_tags
})
except Exception as e:
logger.error(f"Error getting top tags: {str(e)}", exc_info=True)
return web.json_response({
'success': False,
'error': 'Internal server error'
}, status=500)
async def get_base_models(self, request: web.Request) -> web.Response:
"""Get base models used in loras"""
try:
# Parse query parameters
limit = int(request.query.get('limit', '20'))
# Validate limit
if limit < 1 or limit > 100:
limit = 20 # Default to a reasonable limit
# Get base models
base_models = await self.scanner.get_base_models(limit)
return web.json_response({
'success': True,
'base_models': base_models
})
except Exception as e:
logger.error(f"Error retrieving base models: {e}")
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def scan_checkpoints(self, request):
"""Force a rescan of checkpoint files"""
try:
await self.scanner.get_cached_data(force_refresh=True)
return web.json_response({"status": "success", "message": "Checkpoint scan completed"})
except Exception as e:
logger.error(f"Error in scan_checkpoints: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
async def get_checkpoint_info(self, request):
"""Get detailed information for a specific checkpoint by name"""
try:
name = request.match_info.get('name', '')
checkpoint_info = await self.scanner.get_checkpoint_info_by_name(name)
if checkpoint_info:
return web.json_response(checkpoint_info)
else:
return web.json_response({"error": "Checkpoint not found"}, status=404)
except Exception as e:
logger.error(f"Error in get_checkpoint_info: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
async def handle_checkpoints_page(self, request: web.Request) -> web.Response:
"""Handle GET /checkpoints request"""
try:
# Check if the CheckpointScanner is initializing
# It's initializing if the cache object doesn't exist yet,
# OR if the scanner explicitly says it's initializing (background task running).
is_initializing = (
self.scanner._cache is None or
(hasattr(self.scanner, '_is_initializing') and self.scanner._is_initializing)
)
if is_initializing:
# If still initializing, return loading page
template = self.template_env.get_template('checkpoints.html')
rendered = template.render(
folders=[], # 空文件夹列表
is_initializing=True, # 新增标志
settings=settings, # Pass settings to template
request=request # Pass the request object to the template
)
logger.info("Checkpoints page is initializing, returning loading page")
else:
# 正常流程 - 获取已经初始化好的缓存数据
try:
cache = await self.scanner.get_cached_data(force_refresh=False)
template = self.template_env.get_template('checkpoints.html')
rendered = template.render(
folders=cache.folders,
is_initializing=False,
settings=settings, # Pass settings to template
request=request # Pass the request object to the template
)
except Exception as cache_error:
logger.error(f"Error loading checkpoints cache data: {cache_error}")
# 如果获取缓存失败,也显示初始化页面
template = self.template_env.get_template('checkpoints.html')
rendered = template.render(
folders=[],
is_initializing=True,
settings=settings,
request=request
)
logger.info("Checkpoints cache error, returning initialization page")
return web.Response(
text=rendered,
content_type='text/html'
)
except Exception as e:
logger.error(f"Error handling checkpoints request: {e}", exc_info=True)
return web.Response(
text="Error loading checkpoints page",
status=500
)
async def delete_model(self, request: web.Request) -> web.Response:
"""Handle checkpoint model deletion request"""
return await ModelRouteUtils.handle_delete_model(request, self.scanner)
async def fetch_civitai(self, request: web.Request) -> web.Response:
"""Handle CivitAI metadata fetch request for checkpoints"""
return await ModelRouteUtils.handle_fetch_civitai(request, self.scanner)
async def replace_preview(self, request: web.Request) -> web.Response:
"""Handle preview image replacement for checkpoints"""
return await ModelRouteUtils.handle_replace_preview(request, self.scanner)
async def download_checkpoint(self, request: web.Request) -> web.Response:
"""Handle checkpoint download request"""
async with self._download_lock:
# Get the download manager from service registry if not already initialized
if self.download_manager is None:
self.download_manager = await ServiceRegistry.get_download_manager()
try:
data = await request.json()
# Create progress callback that uses checkpoint-specific WebSocket
async def progress_callback(progress):
await ws_manager.broadcast_checkpoint_progress({
'status': 'progress',
'progress': progress
})
# Check which identifier is provided
download_url = data.get('download_url')
model_hash = data.get('model_hash')
model_version_id = data.get('model_version_id')
# Validate that at least one identifier is provided
if not any([download_url, model_hash, model_version_id]):
return web.Response(
status=400,
text="Missing required parameter: Please provide either 'download_url', 'hash', or 'modelVersionId'"
)
result = await self.download_manager.download_from_civitai(
download_url=download_url,
model_hash=model_hash,
model_version_id=model_version_id,
save_dir=data.get('checkpoint_root'),
relative_path=data.get('relative_path', ''),
progress_callback=progress_callback,
model_type="checkpoint"
)
if not result.get('success', False):
error_message = result.get('error', 'Unknown error')
# Return 401 for early access errors
if 'early access' in error_message.lower():
logger.warning(f"Early access download failed: {error_message}")
return web.Response(
status=401,
text=f"Early Access Restriction: {error_message}"
)
return web.Response(status=500, text=error_message)
return web.json_response(result)
except Exception as e:
error_message = str(e)
# Check if this might be an early access error
if '401' in error_message:
logger.warning(f"Early access error (401): {error_message}")
return web.Response(
status=401,
text="Early Access Restriction: This model requires purchase. Please ensure you have purchased early access and are logged in to Civitai."
)
logger.error(f"Error downloading checkpoint: {error_message}")
return web.Response(status=500, text=error_message)
async def get_checkpoint_roots(self, request):
"""Return the checkpoint root directories"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
roots = self.scanner.get_model_roots()
return web.json_response({
"success": True,
"roots": roots
})
except Exception as e:
logger.error(f"Error getting checkpoint roots: {e}", exc_info=True)
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def save_metadata(self, request: web.Request) -> web.Response:
"""Handle saving metadata updates for checkpoints"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
data = await request.json()
file_path = data.get('file_path')
if not file_path:
return web.Response(text='File path is required', status=400)
# Remove file path from data to avoid saving it
metadata_updates = {k: v for k, v in data.items() if k != 'file_path'}
# Get metadata file path
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
# Load existing metadata
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
# Update metadata
metadata.update(metadata_updates)
# Save updated metadata
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
# Update cache
await self.scanner.update_single_model_cache(file_path, file_path, metadata)
# If model_name was updated, resort the cache
if 'model_name' in metadata_updates:
cache = await self.scanner.get_cached_data()
await cache.resort(name_only=True)
return web.json_response({'success': True})
except Exception as e:
logger.error(f"Error saving checkpoint metadata: {e}", exc_info=True)
return web.Response(text=str(e), status=500)
async def get_civitai_versions(self, request: web.Request) -> web.Response:
"""Get available versions for a Civitai checkpoint model with local availability info"""
try:
if self.scanner is None:
self.scanner = await ServiceRegistry.get_checkpoint_scanner()
# Get the civitai client from service registry
civitai_client = await ServiceRegistry.get_civitai_client()
model_id = request.match_info['model_id']
response = await civitai_client.get_model_versions(model_id)
if not response or not response.get('modelVersions'):
return web.Response(status=404, text="Model not found")
versions = response.get('modelVersions', [])
model_type = response.get('type', '')
# Check model type - should be Checkpoint
if model_type.lower() != 'checkpoint':
return web.json_response({
'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
}, status=400)
# Check local availability for each version
for version in versions:
# Find the primary model file (type="Model" and primary=true) in the files list
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model' and file.get('primary') == True), None)
# If no primary file found, try to find any model file
if not model_file:
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model'), None)
if model_file:
sha256 = model_file.get('hashes', {}).get('SHA256')
if sha256:
# Set existsLocally and localPath at the version level
version['existsLocally'] = self.scanner.has_hash(sha256)
if version['existsLocally']:
version['localPath'] = self.scanner.get_path_by_hash(sha256)
# Also set the model file size at the version level for easier access
version['modelSizeKB'] = model_file.get('sizeKB')
else:
# No model file found in this version
version['existsLocally'] = False
return web.json_response(versions)
except Exception as e:
logger.error(f"Error fetching checkpoint model versions: {e}")
return web.Response(status=500, text=str(e))

View File

@@ -1,11 +1,11 @@
import os
from aiohttp import web
import jinja2
from typing import Dict, List
from typing import Dict
import logging
from ..services.lora_scanner import LoraScanner
from ..config import config
from ..services.settings_manager import settings # Add this import
from ..services.settings_manager import settings
from ..services.service_registry import ServiceRegistry # Add ServiceRegistry import
logger = logging.getLogger(__name__)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
@@ -14,22 +14,35 @@ class LoraRoutes:
"""Route handlers for LoRA management endpoints"""
def __init__(self):
self.scanner = LoraScanner()
# Initialize service references as None, will be set during async init
self.scanner = None
self.recipe_scanner = None
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.templates_path),
autoescape=True
)
async def init_services(self):
"""Initialize services from ServiceRegistry"""
self.scanner = await ServiceRegistry.get_lora_scanner()
self.recipe_scanner = await ServiceRegistry.get_recipe_scanner()
def format_lora_data(self, lora: Dict) -> Dict:
"""Format LoRA data for template rendering"""
return {
"model_name": lora["model_name"],
"file_name": lora["file_name"],
"preview_url": config.get_preview_static_url(lora["preview_url"]),
"preview_nsfw_level": lora.get("preview_nsfw_level", 0),
"base_model": lora["base_model"],
"folder": lora["folder"],
"sha256": lora["sha256"],
"file_path": lora["file_path"].replace(os.sep, "/"),
"size": lora["size"],
"tags": lora["tags"],
"modelDescription": lora["modelDescription"],
"usage_tips": lora["usage_tips"],
"notes": lora["notes"],
"modified": lora["modified"],
"from_civitai": lora.get("from_civitai", True),
"civitai": self._filter_civitai_data(lora.get("civitai", {}))
@@ -50,30 +63,49 @@ class LoraRoutes:
async def handle_loras_page(self, request: web.Request) -> web.Response:
"""Handle GET /loras request"""
try:
# 不等待缓存数据,直接检查缓存状态
# Ensure services are initialized
await self.init_services()
# Check if the LoraScanner is initializing
# It's initializing if the cache object doesn't exist yet,
# OR if the scanner explicitly says it's initializing (background task running).
is_initializing = (
self.scanner._cache is None and
(self.scanner._initialization_task is not None and
not self.scanner._initialization_task.done())
self.scanner._cache is None or
(hasattr(self.scanner, '_is_initializing') and self.scanner._is_initializing)
)
if is_initializing:
# 如果正在初始化,返回一个只包含加载提示的页面
# If still initializing, return loading page
template = self.template_env.get_template('loras.html')
rendered = template.render(
folders=[], # 空文件夹列表
is_initializing=True, # 新增标志
settings=settings # Pass settings to template
folders=[],
is_initializing=True,
settings=settings,
request=request
)
logger.info("Loras page is initializing, returning loading page")
else:
# 正常流程
cache = await self.scanner.get_cached_data()
template = self.template_env.get_template('loras.html')
rendered = template.render(
folders=cache.folders,
is_initializing=False,
settings=settings # Pass settings to template
)
# Normal flow - get data from initialized cache
try:
cache = await self.scanner.get_cached_data(force_refresh=False)
template = self.template_env.get_template('loras.html')
rendered = template.render(
folders=cache.folders,
is_initializing=False,
settings=settings,
request=request
)
except Exception as cache_error:
logger.error(f"Error loading cache data: {cache_error}")
template = self.template_env.get_template('loras.html')
rendered = template.render(
folders=[],
is_initializing=True,
settings=settings,
request=request
)
logger.info("Cache error, returning initialization page")
return web.Response(
text=rendered,
@@ -87,6 +119,71 @@ class LoraRoutes:
status=500
)
async def handle_recipes_page(self, request: web.Request) -> web.Response:
"""Handle GET /loras/recipes request"""
try:
# Ensure services are initialized
await self.init_services()
# Skip initialization check and directly try to get cached data
try:
# Recipe scanner will initialize cache if needed
await self.recipe_scanner.get_cached_data(force_refresh=False)
template = self.template_env.get_template('recipes.html')
rendered = template.render(
recipes=[], # Frontend will load recipes via API
is_initializing=False,
settings=settings,
request=request
)
except Exception as cache_error:
logger.error(f"Error loading recipe cache data: {cache_error}")
# Still keep error handling - show initializing page on error
template = self.template_env.get_template('recipes.html')
rendered = template.render(
is_initializing=True,
settings=settings,
request=request
)
logger.info("Recipe cache error, returning initialization page")
return web.Response(
text=rendered,
content_type='text/html'
)
except Exception as e:
logger.error(f"Error handling recipes request: {e}", exc_info=True)
return web.Response(
text="Error loading recipes page",
status=500
)
def _format_recipe_file_url(self, file_path: str) -> str:
"""Format file path for recipe image as a URL - same as in recipe_routes"""
try:
# Return the file URL directly for the first lora root's preview
recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/')
if file_path.replace(os.sep, '/').startswith(recipes_dir):
relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/')
return f"/loras_static/root1/preview/{relative_path}"
# If not in recipes dir, try to create a valid URL from the file path
file_name = os.path.basename(file_path)
return f"/loras_static/root1/preview/recipes/{file_name}"
except Exception as e:
logger.error(f"Error formatting recipe file URL: {e}", exc_info=True)
return '/loras_static/images/no-preview.png' # Return default image on error
def setup_routes(self, app: web.Application):
"""Register routes with the application"""
# Add an app startup handler to initialize services
app.on_startup.append(self._on_startup)
# Register routes
app.router.add_get('/loras', self.handle_loras_page)
app.router.add_get('/loras/recipes', self.handle_recipes_page)
async def _on_startup(self, app):
"""Initialize services when the app starts"""
await self.init_services()

1233
py/routes/recipe_routes.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -24,11 +24,9 @@ class UpdateRoutes:
try:
# Read local version from pyproject.toml
local_version = UpdateRoutes._get_local_version()
logger.info(f"Local version: {local_version}")
# Fetch remote version from GitHub
remote_version, changelog = await UpdateRoutes._get_remote_version()
logger.info(f"Remote version: {remote_version}")
# Compare versions
update_available = UpdateRoutes._compare_versions(
@@ -36,8 +34,6 @@ class UpdateRoutes:
remote_version.replace('v', '')
)
logger.info(f"Update available: {update_available}")
return web.json_response({
'success': True,
'current_version': local_version,

View File

@@ -0,0 +1,131 @@
import os
import logging
import asyncio
from typing import List, Dict, Optional, Set
import folder_paths # type: ignore
from ..utils.models import CheckpointMetadata
from ..config import config
from .model_scanner import ModelScanner
from .model_hash_index import ModelHashIndex
from .service_registry import ServiceRegistry
logger = logging.getLogger(__name__)
class CheckpointScanner(ModelScanner):
"""Service for scanning and managing checkpoint files"""
_instance = None
_lock = asyncio.Lock()
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if not hasattr(self, '_initialized'):
# Define supported file extensions
file_extensions = {'.safetensors', '.ckpt', '.pt', '.pth', '.sft', '.gguf'}
super().__init__(
model_type="checkpoint",
model_class=CheckpointMetadata,
file_extensions=file_extensions,
hash_index=ModelHashIndex()
)
self._checkpoint_roots = self._init_checkpoint_roots()
self._initialized = True
@classmethod
async def get_instance(cls):
"""Get singleton instance with async support"""
async with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
def _init_checkpoint_roots(self) -> List[str]:
"""Initialize checkpoint roots from ComfyUI settings"""
# Get both checkpoint and diffusion_models paths
checkpoint_paths = folder_paths.get_folder_paths("checkpoints")
diffusion_paths = folder_paths.get_folder_paths("diffusion_models")
# Combine, normalize and deduplicate paths
all_paths = set()
for path in checkpoint_paths + diffusion_paths:
if os.path.exists(path):
norm_path = path.replace(os.sep, "/")
all_paths.add(norm_path)
# Sort for consistent order
sorted_paths = sorted(all_paths, key=lambda p: p.lower())
return sorted_paths
def get_model_roots(self) -> List[str]:
"""Get checkpoint root directories"""
return self._checkpoint_roots
async def scan_all_models(self) -> List[Dict]:
"""Scan all checkpoint directories and return metadata"""
all_checkpoints = []
# Create scan tasks for each directory
scan_tasks = []
for root in self._checkpoint_roots:
task = asyncio.create_task(self._scan_directory(root))
scan_tasks.append(task)
# Wait for all tasks to complete
for task in scan_tasks:
try:
checkpoints = await task
all_checkpoints.extend(checkpoints)
except Exception as e:
logger.error(f"Error scanning checkpoint directory: {e}")
return all_checkpoints
async def _scan_directory(self, root_path: str) -> List[Dict]:
"""Scan a directory for checkpoint files"""
checkpoints = []
original_root = root_path
async def scan_recursive(path: str, visited_paths: set):
try:
real_path = os.path.realpath(path)
if real_path in visited_paths:
logger.debug(f"Skipping already visited path: {path}")
return
visited_paths.add(real_path)
with os.scandir(path) as it:
entries = list(it)
for entry in entries:
try:
if entry.is_file(follow_symlinks=True):
# Check if file has supported extension
ext = os.path.splitext(entry.name)[1].lower()
if ext in self.file_extensions:
file_path = entry.path.replace(os.sep, "/")
await self._process_single_file(file_path, original_root, checkpoints)
await asyncio.sleep(0)
elif entry.is_dir(follow_symlinks=True):
# For directories, continue scanning with original path
await scan_recursive(entry.path, visited_paths)
except Exception as e:
logger.error(f"Error processing entry {entry.path}: {e}")
except Exception as e:
logger.error(f"Error scanning {path}: {e}")
await scan_recursive(root_path, set())
return checkpoints
async def _process_single_file(self, file_path: str, root_path: str, checkpoints: list):
"""Process a single checkpoint file and add to results"""
try:
result = await self._process_model_file(file_path, root_path)
if result:
checkpoints.append(result)
except Exception as e:
logger.error(f"Error processing {file_path}: {e}")

View File

@@ -3,6 +3,7 @@ import aiohttp
import os
import json
import logging
import asyncio
from email.parser import Parser
from typing import Optional, Dict, Tuple, List
from urllib.parse import unquote
@@ -11,20 +12,51 @@ from ..utils.models import LoraMetadata
logger = logging.getLogger(__name__)
class CivitaiClient:
_instance = None
_lock = asyncio.Lock()
@classmethod
async def get_instance(cls):
"""Get singleton instance of CivitaiClient"""
async with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
# Check if already initialized for singleton pattern
if hasattr(self, '_initialized'):
return
self._initialized = True
self.base_url = "https://civitai.com/api/v1"
self.headers = {
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
}
self._session = None
# Set default buffer size to 1MB for higher throughput
self.chunk_size = 1024 * 1024
@property
async def session(self) -> aiohttp.ClientSession:
"""Lazy initialize the session"""
if self._session is None:
connector = aiohttp.TCPConnector(ssl=True)
trust_env = True # 允许使用系统环境变量中的代理设置
self._session = aiohttp.ClientSession(connector=connector, trust_env=trust_env)
# Optimize TCP connection parameters
connector = aiohttp.TCPConnector(
ssl=True,
limit=10, # Increase parallel connections
ttl_dns_cache=300, # DNS cache time
force_close=False, # Keep connections for reuse
enable_cleanup_closed=True
)
trust_env = True # Allow using system environment proxy settings
# Configure timeout parameters
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=60)
self._session = aiohttp.ClientSession(
connector=connector,
trust_env=trust_env,
timeout=timeout
)
return self._session
def _parse_content_disposition(self, header: str) -> str:
@@ -74,8 +106,24 @@ class CivitaiClient:
session = await self.session
try:
headers = self._get_request_headers()
# Add Range header to allow resumable downloads
headers['Accept-Encoding'] = 'identity' # Disable compression for better chunked downloads
async with session.get(url, headers=headers, allow_redirects=True) as response:
if response.status != 200:
# Handle 401 unauthorized responses
if response.status == 401:
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
return False, "Invalid or missing CivitAI API key, or early access restriction."
# Handle other client errors that might be permission-related
if response.status == 403:
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
return False, "Access forbidden: You don't have permission to download this file."
# Generic error response for other status codes
return False, f"Download failed with status {response.status}"
# Get filename from content-disposition header
@@ -89,16 +137,23 @@ class CivitaiClient:
# Get total file size for progress calculation
total_size = int(response.headers.get('content-length', 0))
current_size = 0
last_progress_report_time = datetime.now()
# Stream download to file with progress updates
# Stream download to file with progress updates using larger buffer
with open(save_path, 'wb') as f:
async for chunk in response.content.iter_chunked(8192):
async for chunk in response.content.iter_chunked(self.chunk_size):
if chunk:
f.write(chunk)
current_size += len(chunk)
if progress_callback and total_size:
# Limit progress update frequency to reduce overhead
now = datetime.now()
time_diff = (now - last_progress_report_time).total_seconds()
if progress_callback and total_size and time_diff >= 0.5:
progress = (current_size / total_size) * 100
await progress_callback(progress)
last_progress_report_time = now
# Ensure 100% progress is reported
if progress_callback:
@@ -106,6 +161,9 @@ class CivitaiClient:
return True, save_path
except aiohttp.ClientError as e:
logger.error(f"Network error during download: {e}")
return False, f"Network error: {str(e)}"
except Exception as e:
logger.error(f"Download error: {e}")
return False, str(e)
@@ -143,7 +201,11 @@ class CivitaiClient:
if response.status != 200:
return None
data = await response.json()
return data.get('modelVersions', [])
# Also return model type along with versions
return {
'modelVersions': data.get('modelVersions', []),
'type': data.get('type', '')
}
except Exception as e:
logger.error(f"Error fetching model versions: {e}")
return None
@@ -163,8 +225,78 @@ class CivitaiClient:
logger.error(f"Error fetching model version info: {e}")
return None
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
"""Fetch model metadata (description and tags) from Civitai API
Args:
model_id: The Civitai model ID
Returns:
Tuple[Optional[Dict], int]: A tuple containing:
- A dictionary with model metadata or None if not found
- The HTTP status code from the request
"""
try:
session = await self.session
headers = self._get_request_headers()
url = f"{self.base_url}/models/{model_id}"
async with session.get(url, headers=headers) as response:
status_code = response.status
if status_code != 200:
logger.warning(f"Failed to fetch model metadata: Status {status_code}")
return None, status_code
data = await response.json()
# Extract relevant metadata
metadata = {
"description": data.get("description") or "No model description available",
"tags": data.get("tags", [])
}
if metadata["description"] or metadata["tags"]:
return metadata, status_code
else:
logger.warning(f"No metadata found for model {model_id}")
return None, status_code
except Exception as e:
logger.error(f"Error fetching model metadata: {e}", exc_info=True)
return None, 0
# Keep old method for backward compatibility, delegating to the new one
async def get_model_description(self, model_id: str) -> Optional[str]:
"""Fetch the model description from Civitai API (Legacy method)"""
metadata, _ = await self.get_model_metadata(model_id)
return metadata.get("description") if metadata else None
async def close(self):
"""Close the session if it exists"""
if self._session is not None:
await self._session.close()
self._session = None
self._session = None
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
"""Get hash from Civitai API"""
try:
if not self._session:
return None
version_info = await self._session.get(f"{self.base_url}/model-versions/{model_version_id}")
if not version_info or not version_info.json().get('files'):
return None
# Get hash from the first file
for file_info in version_info.json().get('files', []):
if file_info.get('hashes', {}).get('SHA256'):
# Convert hash to lowercase to standardize
hash_value = file_info['hashes']['SHA256'].lower()
return hash_value
return None
except Exception as e:
logger.error(f"Error getting hash from Civitai: {e}")
return None

View File

@@ -1,20 +1,79 @@
import logging
import os
import json
from typing import Optional, Dict
import asyncio
from typing import Optional, Dict, Any
from .civitai_client import CivitaiClient
from .file_monitor import LoraFileMonitor
from ..utils.models import LoraMetadata
from ..utils.models import LoraMetadata, CheckpointMetadata
from ..utils.constants import CARD_PREVIEW_WIDTH
from ..utils.exif_utils import ExifUtils
from .service_registry import ServiceRegistry
# Download to temporary file first
import tempfile
logger = logging.getLogger(__name__)
class DownloadManager:
def __init__(self, file_monitor: Optional[LoraFileMonitor] = None):
self.civitai_client = CivitaiClient()
self.file_monitor = file_monitor
_instance = None
_lock = asyncio.Lock()
@classmethod
async def get_instance(cls):
"""Get singleton instance of DownloadManager"""
async with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
async def download_from_civitai(self, download_url: str, save_dir: str, relative_path: str = '',
progress_callback=None) -> Dict:
def __init__(self):
# Check if already initialized for singleton pattern
if hasattr(self, '_initialized'):
return
self._initialized = True
self._civitai_client = None # Will be lazily initialized
async def _get_civitai_client(self):
"""Lazily initialize CivitaiClient from registry"""
if self._civitai_client is None:
self._civitai_client = await ServiceRegistry.get_civitai_client()
return self._civitai_client
async def _get_lora_monitor(self):
"""Get the lora file monitor from registry"""
return await ServiceRegistry.get_lora_monitor()
async def _get_checkpoint_monitor(self):
"""Get the checkpoint file monitor from registry"""
return await ServiceRegistry.get_checkpoint_monitor()
async def _get_lora_scanner(self):
"""Get the lora scanner from registry"""
return await ServiceRegistry.get_lora_scanner()
async def _get_checkpoint_scanner(self):
"""Get the checkpoint scanner from registry"""
return await ServiceRegistry.get_checkpoint_scanner()
async def download_from_civitai(self, download_url: str = None, model_hash: str = None,
model_version_id: str = None, save_dir: str = None,
relative_path: str = '', progress_callback=None,
model_type: str = "lora") -> Dict:
"""Download model from Civitai
Args:
download_url: Direct download URL for the model
model_hash: SHA256 hash of the model
model_version_id: Civitai model version ID
save_dir: Directory to save the model to
relative_path: Relative path within save_dir
progress_callback: Callback function for progress updates
model_type: Type of model ('lora' or 'checkpoint')
Returns:
Dict with download result
"""
try:
# Update save directory with relative path if provided
if relative_path:
@@ -22,56 +81,114 @@ class DownloadManager:
# Create directory if it doesn't exist
os.makedirs(save_dir, exist_ok=True)
# Get version info
version_id = download_url.split('/')[-1]
version_info = await self.civitai_client.get_model_version_info(version_id)
# Get civitai client
civitai_client = await self._get_civitai_client()
# Get version info based on the provided identifier
version_info = None
if download_url:
# Extract version ID from download URL
version_id = download_url.split('/')[-1]
version_info = await civitai_client.get_model_version_info(version_id)
elif model_version_id:
# Use model version ID directly
version_info = await civitai_client.get_model_version_info(model_version_id)
elif model_hash:
# Get model by hash
version_info = await civitai_client.get_model_by_hash(model_hash)
if not version_info:
return {'success': False, 'error': 'Failed to fetch model metadata'}
# Check if this is an early access model
if version_info.get('earlyAccessEndsAt'):
early_access_date = version_info.get('earlyAccessEndsAt', '')
# Convert to a readable date if possible
try:
from datetime import datetime
date_obj = datetime.fromisoformat(early_access_date.replace('Z', '+00:00'))
formatted_date = date_obj.strftime('%Y-%m-%d')
early_access_msg = f"This model requires early access payment (until {formatted_date}). "
except:
early_access_msg = "This model requires early access payment. "
early_access_msg += "Please ensure you have purchased early access and are logged in to Civitai."
logger.warning(f"Early access model detected: {version_info.get('name', 'Unknown')}")
# We'll still try to download, but log a warning and prepare for potential failure
if progress_callback:
await progress_callback(1) # Show minimal progress to indicate we're trying
# Report initial progress
if progress_callback:
await progress_callback(0)
# 2. 获取文件信息
# 2. Get file information
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
if not file_info:
return {'success': False, 'error': 'No primary file found in metadata'}
# 3. 准备下载
# 3. Prepare download
file_name = file_info['name']
save_path = os.path.join(save_dir, file_name)
file_size = file_info.get('sizeKB', 0) * 1024
# 4. 通知文件监控系统
self.file_monitor.handler.add_ignore_path(
save_path.replace(os.sep, '/'),
file_size
)
# 4. Notify file monitor - use normalized path and file size
file_monitor = await self._get_lora_monitor() if model_type == "lora" else await self._get_checkpoint_monitor()
if file_monitor and file_monitor.handler:
file_monitor.handler.add_ignore_path(
save_path.replace(os.sep, '/'),
file_size
)
# 5. 准备元数据
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
# 5. Prepare metadata based on model type
if model_type == "checkpoint":
metadata = CheckpointMetadata.from_civitai_info(version_info, file_info, save_path)
logger.info(f"Creating CheckpointMetadata for {file_name}")
else:
metadata = LoraMetadata.from_civitai_info(version_info, file_info, save_path)
logger.info(f"Creating LoraMetadata for {file_name}")
# 6. 开始下载流程
# 5.1 Get and update model tags and description
model_id = version_info.get('modelId')
if model_id:
model_metadata, _ = await civitai_client.get_model_metadata(str(model_id))
if model_metadata:
if model_metadata.get("tags"):
metadata.tags = model_metadata.get("tags", [])
if model_metadata.get("description"):
metadata.modelDescription = model_metadata.get("description", "")
# 6. Start download process
result = await self._execute_download(
download_url=download_url,
download_url=file_info.get('downloadUrl', ''),
save_dir=save_dir,
metadata=metadata,
version_info=version_info,
relative_path=relative_path,
progress_callback=progress_callback
progress_callback=progress_callback,
model_type=model_type
)
return result
except Exception as e:
logger.error(f"Error in download_from_civitai: {e}", exc_info=True)
# Check if this might be an early access error
error_str = str(e).lower()
if "403" in error_str or "401" in error_str or "unauthorized" in error_str or "early access" in error_str:
return {'success': False, 'error': f"Early access restriction: {str(e)}. Please ensure you have purchased early access and are logged in to Civitai."}
return {'success': False, 'error': str(e)}
async def _execute_download(self, download_url: str, save_dir: str,
metadata: LoraMetadata, version_info: Dict,
relative_path: str, progress_callback=None) -> Dict:
metadata, version_info: Dict,
relative_path: str, progress_callback=None,
model_type: str = "lora") -> Dict:
"""Execute the actual download process including preview images and model files"""
try:
civitai_client = await self._get_civitai_client()
save_path = metadata.file_path
metadata_path = os.path.splitext(save_path)[0] + '.metadata.json'
@@ -82,19 +199,61 @@ class DownloadManager:
if progress_callback:
await progress_callback(1) # 1% progress for starting preview download
preview_ext = '.mp4' if images[0].get('type') == 'video' else '.png'
preview_path = os.path.splitext(save_path)[0] + '.preview' + preview_ext
if await self.civitai_client.download_preview_image(images[0]['url'], preview_path):
metadata.preview_url = preview_path.replace(os.sep, '/')
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
# Check if it's a video or an image
is_video = images[0].get('type') == 'video'
if is_video:
# For videos, use .mp4 extension
preview_ext = '.mp4'
preview_path = os.path.splitext(save_path)[0] + preview_ext
# Download video directly
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
metadata.preview_url = preview_path.replace(os.sep, '/')
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
else:
# For images, use WebP format for better performance
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
temp_path = temp_file.name
# Download the original image to temp path
if await civitai_client.download_preview_image(images[0]['url'], temp_path):
# Optimize and convert to WebP
preview_path = os.path.splitext(save_path)[0] + '.webp'
# Use ExifUtils to optimize and convert the image
optimized_data, _ = ExifUtils.optimize_image(
image_data=temp_path,
target_width=CARD_PREVIEW_WIDTH,
format='webp',
quality=85,
preserve_metadata=True
)
# Save the optimized image
with open(preview_path, 'wb') as f:
f.write(optimized_data)
# Update metadata
metadata.preview_url = preview_path.replace(os.sep, '/')
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
# Remove temporary file
try:
os.unlink(temp_path)
except Exception as e:
logger.warning(f"Failed to delete temp file: {e}")
# Report preview download completion
if progress_callback:
await progress_callback(3) # 3% progress after preview download
# Download model file with progress tracking
success, result = await self.civitai_client._download_file(
success, result = await civitai_client._download_file(
download_url,
save_dir,
os.path.basename(save_path),
@@ -108,15 +267,22 @@ class DownloadManager:
os.remove(path)
return {'success': False, 'error': result}
# 4. 更新文件信息(大小和修改时间)
# 4. Update file information (size and modified time)
metadata.update_file_info(save_path)
# 5. 最终更新元数据
# 5. Final metadata update
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata.to_dict(), f, indent=2, ensure_ascii=False)
# 6. update lora cache
cache = await self.file_monitor.scanner.get_cached_data()
# 6. Update cache based on model type
if model_type == "checkpoint":
scanner = await self._get_checkpoint_scanner()
logger.info(f"Updating checkpoint cache for {save_path}")
else:
scanner = await self._get_lora_scanner()
logger.info(f"Updating lora cache for {save_path}")
cache = await scanner.get_cached_data()
metadata_dict = metadata.to_dict()
metadata_dict['folder'] = relative_path
cache.raw_data.append(metadata_dict)
@@ -124,6 +290,9 @@ class DownloadManager:
all_folders = set(cache.folders)
all_folders.add(relative_path)
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
# Update the hash index with the new model entry
scanner._hash_index.add_entry(metadata_dict['sha256'], metadata_dict['file_path'])
# Report 100% completion
if progress_callback:

View File

@@ -1,28 +1,42 @@
from operator import itemgetter
import os
import logging
import asyncio
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileCreatedEvent, FileDeletedEvent
from typing import List
from watchdog.events import FileSystemEventHandler
from typing import List, Dict, Set, Optional
from threading import Lock
from .lora_scanner import LoraScanner
from ..config import config
from .service_registry import ServiceRegistry
logger = logging.getLogger(__name__)
class LoraFileHandler(FileSystemEventHandler):
"""Handler for LoRA file system events"""
# Configuration constant to control file monitoring functionality
ENABLE_FILE_MONITORING = False
class BaseFileHandler(FileSystemEventHandler):
"""Base handler for file system events"""
def __init__(self, scanner: LoraScanner, loop: asyncio.AbstractEventLoop):
self.scanner = scanner
self.loop = loop # 存储事件循环引用
self.pending_changes = set() # 待处理的变更
self.lock = Lock() # 线程安全锁
self.update_task = None # 异步更新任务
self._ignore_paths = set() # Add ignore paths set
self._min_ignore_timeout = 5 # minimum timeout in seconds
self._download_speed = 1024 * 1024 # assume 1MB/s as base speed
def __init__(self, loop: asyncio.AbstractEventLoop):
self.loop = loop # Store event loop reference
self.pending_changes = set() # Pending changes
self.lock = Lock() # Thread-safe lock
self.update_task = None # Async update task
self._ignore_paths = set() # Paths to ignore
self._min_ignore_timeout = 5 # Minimum timeout in seconds
self._download_speed = 1024 * 1024 # Assume 1MB/s as base speed
# Track modified files with timestamps for debouncing
self.modified_files: Dict[str, float] = {}
self.debounce_timer = None
self.debounce_delay = 3.0 # Seconds to wait after last modification
# Track files already scheduled for processing
self.scheduled_files: Set[str] = set()
# File extensions to monitor - should be overridden by subclasses
self.file_extensions = set()
def _should_ignore(self, path: str) -> bool:
"""Check if path should be ignored"""
@@ -37,32 +51,152 @@ class LoraFileHandler(FileSystemEventHandler):
# Short timeout (e.g. 5 seconds) is sufficient to ignore the CREATE event
timeout = 5
asyncio.get_event_loop().call_later(
self.loop.call_later(
timeout,
self._ignore_paths.discard,
real_path.replace(os.sep, '/')
)
def on_created(self, event):
if event.is_directory or not event.src_path.endswith('.safetensors'):
if event.is_directory:
return
if self._should_ignore(event.src_path):
# Handle appropriate files based on extensions
file_ext = os.path.splitext(event.src_path)[1].lower()
if file_ext in self.file_extensions:
if self._should_ignore(event.src_path):
return
# Process this file directly and ignore subsequent modifications
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
if normalized_path not in self.scheduled_files:
logger.info(f"File created: {event.src_path}")
self.scheduled_files.add(normalized_path)
self._schedule_update('add', event.src_path)
# Ignore modifications for a short period after creation
self.loop.call_later(
self.debounce_delay * 2,
self.scheduled_files.discard,
normalized_path
)
def on_modified(self, event):
if event.is_directory:
return
logger.info(f"LoRA file created: {event.src_path}")
self._schedule_update('add', event.src_path)
# Only process files with supported extensions
file_ext = os.path.splitext(event.src_path)[1].lower()
if file_ext in self.file_extensions:
if self._should_ignore(event.src_path):
return
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
# Skip if this file is already scheduled for processing
if normalized_path in self.scheduled_files:
return
# Update the timestamp for this file
self.modified_files[normalized_path] = time.time()
# Cancel any existing timer
if self.debounce_timer:
self.debounce_timer.cancel()
# Set a new timer to process modified files after debounce period
self.debounce_timer = self.loop.call_later(
self.debounce_delay,
self.loop.call_soon_threadsafe,
self._process_modified_files
)
def _process_modified_files(self):
"""Process files that have been modified after debounce period"""
current_time = time.time()
files_to_process = []
# Find files that haven't been modified for debounce_delay seconds
for file_path, last_modified in list(self.modified_files.items()):
if current_time - last_modified >= self.debounce_delay:
# Only process if not already scheduled
if file_path not in self.scheduled_files:
files_to_process.append(file_path)
self.scheduled_files.add(file_path)
# Auto-remove from scheduled list after reasonable time
self.loop.call_later(
self.debounce_delay * 2,
self.scheduled_files.discard,
file_path
)
del self.modified_files[file_path]
# Process stable files
for file_path in files_to_process:
logger.info(f"Processing modified file: {file_path}")
self._schedule_update('add', file_path)
def on_deleted(self, event):
if event.is_directory or not event.src_path.endswith('.safetensors'):
if event.is_directory:
return
file_ext = os.path.splitext(event.src_path)[1].lower()
if file_ext not in self.file_extensions:
return
if self._should_ignore(event.src_path):
return
logger.info(f"LoRA file deleted: {event.src_path}")
# Remove from scheduled files if present
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
self.scheduled_files.discard(normalized_path)
logger.info(f"File deleted: {event.src_path}")
self._schedule_update('remove', event.src_path)
def _schedule_update(self, action: str, file_path: str): #file_path is a real path
def on_moved(self, event):
"""Handle file move/rename events"""
src_ext = os.path.splitext(event.src_path)[1].lower()
dest_ext = os.path.splitext(event.dest_path)[1].lower()
# If destination has supported extension, treat as new file
if dest_ext in self.file_extensions:
if self._should_ignore(event.dest_path):
return
normalized_path = os.path.realpath(event.dest_path).replace(os.sep, '/')
# Only process if not already scheduled
if normalized_path not in self.scheduled_files:
logger.info(f"File renamed/moved to: {event.dest_path}")
self.scheduled_files.add(normalized_path)
self._schedule_update('add', event.dest_path)
# Auto-remove from scheduled list after reasonable time
self.loop.call_later(
self.debounce_delay * 2,
self.scheduled_files.discard,
normalized_path
)
# If source was a supported file, treat it as deleted
if src_ext in self.file_extensions:
if self._should_ignore(event.src_path):
return
normalized_path = os.path.realpath(event.src_path).replace(os.sep, '/')
self.scheduled_files.discard(normalized_path)
logger.info(f"File moved/renamed from: {event.src_path}")
self._schedule_update('remove', event.src_path)
def _schedule_update(self, action: str, file_path: str):
"""Schedule a cache update"""
with self.lock:
# 使用 config 中的方法映射路径
# Use config method to map path
mapped_path = config.map_path_to_link(file_path)
normalized_path = mapped_path.replace(os.sep, '/')
self.pending_changes.add((action, normalized_path))
@@ -73,7 +207,20 @@ class LoraFileHandler(FileSystemEventHandler):
"""Create update task in the event loop"""
if self.update_task is None or self.update_task.done():
self.update_task = asyncio.create_task(self._process_changes())
async def _process_changes(self, delay: float = 2.0):
"""Process pending changes with debouncing - should be implemented by subclasses"""
raise NotImplementedError("Subclasses must implement _process_changes")
class LoraFileHandler(BaseFileHandler):
"""Handler for LoRA file system events"""
def __init__(self, loop: asyncio.AbstractEventLoop):
super().__init__(loop)
# Set supported file extensions for LoRAs
self.file_extensions = {'.safetensors'}
async def _process_changes(self, delay: float = 2.0):
"""Process pending changes with debouncing"""
await asyncio.sleep(delay)
@@ -86,32 +233,54 @@ class LoraFileHandler(FileSystemEventHandler):
if not changes:
return
logger.info(f"Processing {len(changes)} file changes")
logger.info(f"Processing {len(changes)} LoRA file changes")
cache = await self.scanner.get_cached_data()
# Get scanner through ServiceRegistry
scanner = await ServiceRegistry.get_lora_scanner()
cache = await scanner.get_cached_data()
needs_resort = False
new_folders = set()
for action, file_path in changes:
try:
if action == 'add':
# Check if file already exists in cache
existing = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
if existing:
logger.info(f"File {file_path} already in cache, skipping")
continue
# Scan new file
lora_data = await self.scanner.scan_single_lora(file_path)
if lora_data:
cache.raw_data.append(lora_data)
new_folders.add(lora_data['folder'])
model_data = await scanner.scan_single_model(file_path)
if model_data:
# Update tags count
for tag in model_data.get('tags', []):
scanner._tags_count[tag] = scanner._tags_count.get(tag, 0) + 1
cache.raw_data.append(model_data)
new_folders.add(model_data['folder'])
# Update hash index
if 'sha256' in lora_data:
self.scanner._hash_index.add_entry(
lora_data['sha256'],
lora_data['file_path']
if 'sha256' in model_data:
scanner._hash_index.add_entry(
model_data['sha256'],
model_data['file_path']
)
needs_resort = True
elif action == 'remove':
# Find the model to remove so we can update tags count
model_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
if model_to_remove:
# Update tags count by reducing counts
for tag in model_to_remove.get('tags', []):
if tag in scanner._tags_count:
scanner._tags_count[tag] = max(0, scanner._tags_count[tag] - 1)
if scanner._tags_count[tag] == 0:
del scanner._tags_count[tag]
# Remove from cache and hash index
logger.info(f"Removing {file_path} from cache")
self.scanner._hash_index.remove_by_path(file_path)
scanner._hash_index.remove_by_path(file_path)
cache.raw_data = [
item for item in cache.raw_data
if item['file_path'] != file_path
@@ -129,62 +298,245 @@ class LoraFileHandler(FileSystemEventHandler):
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
except Exception as e:
logger.error(f"Error in process_changes: {e}")
logger.error(f"Error in process_changes for LoRA: {e}")
class LoraFileMonitor:
"""Monitor for LoRA file changes"""
class CheckpointFileHandler(BaseFileHandler):
"""Handler for checkpoint file system events"""
def __init__(self, scanner: LoraScanner, roots: List[str]):
self.scanner = scanner
scanner.set_file_monitor(self)
def __init__(self, loop: asyncio.AbstractEventLoop):
super().__init__(loop)
# Set supported file extensions for checkpoints
self.file_extensions = {'.safetensors', '.ckpt', '.pt', '.pth', '.sft', '.gguf'}
async def _process_changes(self, delay: float = 2.0):
"""Process pending changes with debouncing for checkpoint files"""
await asyncio.sleep(delay)
try:
with self.lock:
changes = self.pending_changes.copy()
self.pending_changes.clear()
if not changes:
return
logger.info(f"Processing {len(changes)} checkpoint file changes")
# Get scanner through ServiceRegistry
scanner = await ServiceRegistry.get_checkpoint_scanner()
cache = await scanner.get_cached_data()
needs_resort = False
new_folders = set()
for action, file_path in changes:
try:
if action == 'add':
# Check if file already exists in cache
existing = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
if existing:
logger.info(f"File {file_path} already in cache, skipping")
continue
# Scan new file
model_data = await scanner.scan_single_model(file_path)
if model_data:
# Update tags count if applicable
for tag in model_data.get('tags', []):
scanner._tags_count[tag] = scanner._tags_count.get(tag, 0) + 1
cache.raw_data.append(model_data)
new_folders.add(model_data['folder'])
# Update hash index
if 'sha256' in model_data:
scanner._hash_index.add_entry(
model_data['sha256'],
model_data['file_path']
)
needs_resort = True
elif action == 'remove':
# Find the model to remove so we can update tags count
model_to_remove = next((item for item in cache.raw_data if item['file_path'] == file_path), None)
if model_to_remove:
# Update tags count by reducing counts
for tag in model_to_remove.get('tags', []):
if tag in scanner._tags_count:
scanner._tags_count[tag] = max(0, scanner._tags_count[tag] - 1)
if scanner._tags_count[tag] == 0:
del scanner._tags_count[tag]
# Remove from cache and hash index
logger.info(f"Removing {file_path} from checkpoint cache")
scanner._hash_index.remove_by_path(file_path)
cache.raw_data = [
item for item in cache.raw_data
if item['file_path'] != file_path
]
needs_resort = True
except Exception as e:
logger.error(f"Error processing checkpoint {action} for {file_path}: {e}")
if needs_resort:
await cache.resort()
# Update folder list
all_folders = set(cache.folders) | new_folders
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
except Exception as e:
logger.error(f"Error in process_changes for checkpoint: {e}")
class BaseFileMonitor:
"""Base class for file monitoring"""
def __init__(self, monitor_paths: List[str]):
self.observer = Observer()
self.loop = asyncio.get_event_loop()
self.handler = LoraFileHandler(scanner, self.loop)
# 使用已存在的路径映射
self.monitor_paths = set()
for root in roots:
self.monitor_paths.add(os.path.realpath(root).replace(os.sep, '/'))
# Process monitor paths
for path in monitor_paths:
self.monitor_paths.add(os.path.realpath(path).replace(os.sep, '/'))
# 添加所有已映射的目标路径
# Add mapped paths from config
for target_path in config._path_mappings.keys():
self.monitor_paths.add(target_path)
def start(self):
"""Start monitoring"""
for path_info in self.monitor_paths:
"""Start file monitoring"""
if not ENABLE_FILE_MONITORING:
logger.info("File monitoring is disabled via ENABLE_FILE_MONITORING setting")
return
for path in self.monitor_paths:
try:
if isinstance(path_info, tuple):
# 对于链接,监控目标路径
_, target_path = path_info
self.observer.schedule(self.handler, target_path, recursive=True)
logger.info(f"Started monitoring target path: {target_path}")
else:
# 对于普通路径,直接监控
self.observer.schedule(self.handler, path_info, recursive=True)
logger.info(f"Started monitoring: {path_info}")
self.observer.schedule(self.handler, path, recursive=True)
logger.info(f"Started monitoring: {path}")
except Exception as e:
logger.error(f"Error monitoring {path_info}: {e}")
logger.error(f"Error monitoring {path}: {e}")
self.observer.start()
def stop(self):
"""Stop monitoring"""
"""Stop file monitoring"""
if not ENABLE_FILE_MONITORING:
return
self.observer.stop()
self.observer.join()
def rescan_links(self):
"""重新扫描链接(当添加新的链接时调用)"""
"""Rescan links when new ones are added"""
if not ENABLE_FILE_MONITORING:
return
# Find new paths not yet being monitored
new_paths = set()
for path in self.monitor_paths.copy():
self._add_link_targets(path)
for path in config._path_mappings.keys():
real_path = os.path.realpath(path).replace(os.sep, '/')
if real_path not in self.monitor_paths:
new_paths.add(real_path)
self.monitor_paths.add(real_path)
# 添加新发现的路径到监控
new_paths = self.monitor_paths - set(self.observer.watches.keys())
# Add new paths to monitoring
for path in new_paths:
try:
self.observer.schedule(self.handler, path, recursive=True)
logger.info(f"Added new monitoring path: {path}")
except Exception as e:
logger.error(f"Error adding new monitor for {path}: {e}")
logger.error(f"Error adding new monitor for {path}: {e}")
class LoraFileMonitor(BaseFileMonitor):
"""Monitor for LoRA file changes"""
_instance = None
_lock = asyncio.Lock()
def __new__(cls, monitor_paths=None):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self, monitor_paths=None):
if not hasattr(self, '_initialized'):
if monitor_paths is None:
from ..config import config
monitor_paths = config.loras_roots
super().__init__(monitor_paths)
self.handler = LoraFileHandler(self.loop)
self._initialized = True
@classmethod
async def get_instance(cls):
"""Get singleton instance with async support"""
async with cls._lock:
if cls._instance is None:
from ..config import config
cls._instance = cls(config.loras_roots)
return cls._instance
class CheckpointFileMonitor(BaseFileMonitor):
"""Monitor for checkpoint file changes"""
_instance = None
_lock = asyncio.Lock()
def __new__(cls, monitor_paths=None):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self, monitor_paths=None):
if not hasattr(self, '_initialized'):
if monitor_paths is None:
# Get checkpoint roots from scanner
monitor_paths = []
# We'll initialize monitor paths later when scanner is available
super().__init__(monitor_paths or [])
self.handler = CheckpointFileHandler(self.loop)
self._initialized = True
@classmethod
async def get_instance(cls):
"""Get singleton instance with async support"""
async with cls._lock:
if cls._instance is None:
cls._instance = cls([])
# Now get checkpoint roots from scanner
from .checkpoint_scanner import CheckpointScanner
scanner = await CheckpointScanner.get_instance()
monitor_paths = scanner.get_model_roots()
# Update monitor paths - but don't actually monitor them
for path in monitor_paths:
real_path = os.path.realpath(path).replace(os.sep, '/')
cls._instance.monitor_paths.add(real_path)
return cls._instance
def start(self):
"""Override start to check global enable flag"""
if not ENABLE_FILE_MONITORING:
logger.info("Checkpoint file monitoring is disabled via ENABLE_FILE_MONITORING setting")
return
logger.info("Checkpoint file monitoring is temporarily disabled")
# Skip the actual monitoring setup
pass
async def initialize_paths(self):
"""Initialize monitor paths from scanner - currently disabled"""
if not ENABLE_FILE_MONITORING:
logger.info("Checkpoint path initialization skipped (monitoring disabled)")
return
logger.info("Checkpoint file path initialization skipped (monitoring disabled)")
pass

View File

@@ -15,11 +15,13 @@ class LoraHashIndex:
"""Add or update a hash -> path mapping"""
if not sha256 or not file_path:
return
self._hash_to_path[sha256] = file_path
# Always store lowercase hashes for consistency
self._hash_to_path[sha256.lower()] = file_path
def remove_entry(self, sha256: str) -> None:
"""Remove a hash entry"""
self._hash_to_path.pop(sha256, None)
if sha256:
self._hash_to_path.pop(sha256.lower(), None)
def remove_by_path(self, file_path: str) -> None:
"""Remove entry by file path"""
@@ -30,7 +32,9 @@ class LoraHashIndex:
def get_path(self, sha256: str) -> Optional[str]:
"""Get file path for a given hash"""
return self._hash_to_path.get(sha256)
if not sha256:
return None
return self._hash_to_path.get(sha256.lower())
def get_hash(self, file_path: str) -> Optional[str]:
"""Get hash for a given file path"""
@@ -41,7 +45,9 @@ class LoraHashIndex:
def has_hash(self, sha256: str) -> bool:
"""Check if hash exists in index"""
return sha256 in self._hash_to_path
if not sha256:
return False
return sha256.lower() in self._hash_to_path
def clear(self) -> None:
"""Clear all entries"""

View File

@@ -3,18 +3,22 @@ import os
import logging
import asyncio
import shutil
from typing import List, Dict, Optional
from dataclasses import dataclass
from operator import itemgetter
import time
from typing import List, Dict, Optional, Set
from ..utils.models import LoraMetadata
from ..config import config
from ..utils.file_utils import load_metadata, get_file_info
from .lora_cache import LoraCache
from difflib import SequenceMatcher
from .model_scanner import ModelScanner
from .lora_hash_index import LoraHashIndex
from .settings_manager import settings
from ..utils.constants import NSFW_LEVELS
from ..utils.utils import fuzzy_match
from .service_registry import ServiceRegistry
import sys
logger = logging.getLogger(__name__)
class LoraScanner:
class LoraScanner(ModelScanner):
"""Service for scanning and managing LoRA files"""
_instance = None
@@ -26,19 +30,20 @@ class LoraScanner:
return cls._instance
def __init__(self):
# 确保初始化只执行一次
# Ensure initialization happens only once
if not hasattr(self, '_initialized'):
self._cache: Optional[LoraCache] = None
self._hash_index = LoraHashIndex()
self._initialization_lock = asyncio.Lock()
self._initialization_task: Optional[asyncio.Task] = None
# Define supported file extensions
file_extensions = {'.safetensors'}
# Initialize parent class
super().__init__(
model_type="lora",
model_class=LoraMetadata,
file_extensions=file_extensions,
hash_index=LoraHashIndex()
)
self._initialized = True
self.file_monitor = None # Add this line
def set_file_monitor(self, monitor):
"""Set file monitor instance"""
self.file_monitor = monitor
@classmethod
async def get_instance(cls):
"""Get singleton instance with async support"""
@@ -46,119 +51,78 @@ class LoraScanner:
if cls._instance is None:
cls._instance = cls()
return cls._instance
async def get_cached_data(self, force_refresh: bool = False) -> LoraCache:
"""Get cached LoRA data, refresh if needed"""
async with self._initialization_lock:
def get_model_roots(self) -> List[str]:
"""Get lora root directories"""
return config.loras_roots
async def scan_all_models(self) -> List[Dict]:
"""Scan all LoRA directories and return metadata"""
all_loras = []
# Create scan tasks for each directory
scan_tasks = []
for lora_root in self.get_model_roots():
task = asyncio.create_task(self._scan_directory(lora_root))
scan_tasks.append(task)
# 如果缓存未初始化但需要响应请求,返回空缓存
if self._cache is None and not force_refresh:
return LoraCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
# 如果正在初始化,等待完成
if self._initialization_task and not self._initialization_task.done():
try:
await self._initialization_task
except Exception as e:
logger.error(f"Cache initialization failed: {e}")
self._initialization_task = None
if (self._cache is None or force_refresh):
# Wait for all tasks to complete
for task in scan_tasks:
try:
loras = await task
all_loras.extend(loras)
except Exception as e:
logger.error(f"Error scanning directory: {e}")
# 创建新的初始化任务
if not self._initialization_task or self._initialization_task.done():
self._initialization_task = asyncio.create_task(self._initialize_cache())
return all_loras
async def _scan_directory(self, root_path: str) -> List[Dict]:
"""Scan a single directory for LoRA files"""
loras = []
original_root = root_path # Save original root path
async def scan_recursive(path: str, visited_paths: set):
"""Recursively scan directory, avoiding circular symlinks"""
try:
real_path = os.path.realpath(path)
if real_path in visited_paths:
logger.debug(f"Skipping already visited path: {path}")
return
visited_paths.add(real_path)
try:
await self._initialization_task
except Exception as e:
logger.error(f"Cache initialization failed: {e}")
# 如果缓存已存在,继续使用旧缓存
if self._cache is None:
raise # 如果没有缓存,则抛出异常
return self._cache
with os.scandir(path) as it:
entries = list(it)
for entry in entries:
try:
if entry.is_file(follow_symlinks=True) and any(entry.name.endswith(ext) for ext in self.file_extensions):
# Use original path instead of real path
file_path = entry.path.replace(os.sep, "/")
await self._process_single_file(file_path, original_root, loras)
await asyncio.sleep(0)
elif entry.is_dir(follow_symlinks=True):
# For directories, continue scanning with original path
await scan_recursive(entry.path, visited_paths)
except Exception as e:
logger.error(f"Error processing entry {entry.path}: {e}")
except Exception as e:
logger.error(f"Error scanning {path}: {e}")
async def _initialize_cache(self) -> None:
"""Initialize or refresh the cache"""
await scan_recursive(root_path, set())
return loras
async def _process_single_file(self, file_path: str, root_path: str, loras: list):
"""Process a single file and add to results list"""
try:
# Clear existing hash index
self._hash_index.clear()
# Scan for new data
raw_data = await self.scan_all_loras()
# Build hash index
for lora_data in raw_data:
if 'sha256' in lora_data and 'file_path' in lora_data:
self._hash_index.add_entry(lora_data['sha256'], lora_data['file_path'])
# Update cache
self._cache = LoraCache(
raw_data=raw_data,
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
# Call resort_cache to create sorted views
await self._cache.resort()
self._initialization_task = None
logger.info("LoRA Manager: Cache initialization completed")
result = await self._process_model_file(file_path, root_path)
if result:
loras.append(result)
except Exception as e:
logger.error(f"LoRA Manager: Error initializing cache: {e}")
self._cache = LoraCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
def fuzzy_match(self, text: str, pattern: str, threshold: float = 0.7) -> bool:
"""
Check if text matches pattern using fuzzy matching.
Returns True if similarity ratio is above threshold.
"""
if not pattern or not text:
return False
# Convert both to lowercase for case-insensitive matching
text = text.lower()
pattern = pattern.lower()
# Split pattern into words
search_words = pattern.split()
# Check each word
for word in search_words:
# First check if word is a substring (faster)
if word in text:
continue
# If not found as substring, try fuzzy matching
# Check if any part of the text matches this word
found_match = False
for text_part in text.split():
ratio = SequenceMatcher(None, text_part, word).ratio()
if ratio >= threshold:
found_match = True
break
if not found_match:
return False
# All words found either as substrings or fuzzy matches
return True
logger.error(f"Error processing {file_path}: {e}")
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name',
folder: str = None, search: str = None, fuzzy: bool = False,
recursive: bool = False, base_models: list = None):
folder: str = None, search: str = None, fuzzy_search: bool = False,
base_models: list = None, tags: list = None,
search_options: dict = None, hash_filters: dict = None) -> Dict:
"""Get paginated and filtered lora data
Args:
@@ -167,59 +131,125 @@ class LoraScanner:
sort_by: Sort method ('name' or 'date')
folder: Filter by folder path
search: Search term
fuzzy: Use fuzzy matching for search
recursive: Include subfolders when folder filter is applied
fuzzy_search: Use fuzzy matching for search
base_models: List of base models to filter by
tags: List of tags to filter by
search_options: Dictionary with search options (filename, modelname, tags, recursive)
hash_filters: Dictionary with hash filtering options (single_hash or multiple_hashes)
"""
cache = await self.get_cached_data()
# 先获取基础数据集
# Get default search options if not provided
if search_options is None:
search_options = {
'filename': True,
'modelname': True,
'tags': False,
'recursive': False,
}
# Get the base data set
filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name
# 应用文件夹过滤
if folder is not None:
if recursive:
# 递归模式:匹配所有以该文件夹开头的路径
# Apply hash filtering if provided (highest priority)
if hash_filters:
single_hash = hash_filters.get('single_hash')
multiple_hashes = hash_filters.get('multiple_hashes')
if single_hash:
# Filter by single hash
single_hash = single_hash.lower() # Ensure lowercase for matching
filtered_data = [
item for item in filtered_data
if item['folder'].startswith(folder + '/') or item['folder'] == folder
lora for lora in filtered_data
if lora.get('sha256', '').lower() == single_hash
]
elif multiple_hashes:
# Filter by multiple hashes
hash_set = set(hash.lower() for hash in multiple_hashes) # Convert to set for faster lookup
filtered_data = [
lora for lora in filtered_data
if lora.get('sha256', '').lower() in hash_set
]
# Jump to pagination
total_items = len(filtered_data)
start_idx = (page - 1) * page_size
end_idx = min(start_idx + page_size, total_items)
result = {
'items': filtered_data[start_idx:end_idx],
'total': total_items,
'page': page,
'page_size': page_size,
'total_pages': (total_items + page_size - 1) // page_size
}
return result
# Apply SFW filtering if enabled
if settings.get('show_only_sfw', False):
filtered_data = [
lora for lora in filtered_data
if not lora.get('preview_nsfw_level') or lora.get('preview_nsfw_level') < NSFW_LEVELS['R']
]
# Apply folder filtering
if folder is not None:
if search_options.get('recursive', False):
# Recursive folder filtering - include all subfolders
filtered_data = [
lora for lora in filtered_data
if lora['folder'].startswith(folder)
]
else:
# 非递归模式:只匹配确切的文件夹
# Exact folder filtering
filtered_data = [
item for item in filtered_data
if item['folder'] == folder
lora for lora in filtered_data
if lora['folder'] == folder
]
# Apply base model filtering
if base_models and len(base_models) > 0:
filtered_data = [
item for item in filtered_data
if item.get('base_model') in base_models
lora for lora in filtered_data
if lora.get('base_model') in base_models
]
# 应用搜索过滤
# Apply tag filtering
if tags and len(tags) > 0:
filtered_data = [
lora for lora in filtered_data
if any(tag in lora.get('tags', []) for tag in tags)
]
# Apply search filtering
if search:
if fuzzy:
filtered_data = [
item for item in filtered_data
if any(
self.fuzzy_match(str(value), search)
for value in [
item.get('model_name', ''),
item.get('base_model', '')
]
if value
)
]
else:
# Original exact search logic
filtered_data = [
item for item in filtered_data
if search in str(item.get('model_name', '')).lower()
]
search_results = []
search_opts = search_options or {}
for lora in filtered_data:
# Search by file name
if search_opts.get('filename', True):
if fuzzy_match(lora.get('file_name', ''), search):
search_results.append(lora)
continue
# Search by model name
if search_opts.get('modelname', True):
if fuzzy_match(lora.get('model_name', ''), search):
search_results.append(lora)
continue
# Search by tags
if search_opts.get('tags', False) and 'tags' in lora:
if any(fuzzy_match(tag, search) for tag in lora['tags']):
search_results.append(lora)
continue
filtered_data = search_results
# 计算分页
# Calculate pagination
total_items = len(filtered_data)
start_idx = (page - 1) * page_size
end_idx = min(start_idx + page_size, total_items)
@@ -234,238 +264,6 @@ class LoraScanner:
return result
def invalidate_cache(self):
"""Invalidate the current cache"""
self._cache = None
async def scan_all_loras(self) -> List[Dict]:
"""Scan all LoRA directories and return metadata"""
all_loras = []
# 分目录异步扫描
scan_tasks = []
for loras_root in config.loras_roots:
task = asyncio.create_task(self._scan_directory(loras_root))
scan_tasks.append(task)
for task in scan_tasks:
try:
loras = await task
all_loras.extend(loras)
except Exception as e:
logger.error(f"Error scanning directory: {e}")
return all_loras
async def _scan_directory(self, root_path: str) -> List[Dict]:
"""Scan a single directory for LoRA files"""
loras = []
original_root = root_path # 保存原始根路径
async def scan_recursive(path: str, visited_paths: set):
"""递归扫描目录,避免循环链接"""
try:
real_path = os.path.realpath(path)
if real_path in visited_paths:
logger.debug(f"Skipping already visited path: {path}")
return
visited_paths.add(real_path)
with os.scandir(path) as it:
entries = list(it)
for entry in entries:
try:
if entry.is_file(follow_symlinks=True) and entry.name.endswith('.safetensors'):
# 使用原始路径而不是真实路径
file_path = entry.path.replace(os.sep, "/")
await self._process_single_file(file_path, original_root, loras)
await asyncio.sleep(0)
elif entry.is_dir(follow_symlinks=True):
# 对于目录,使用原始路径继续扫描
await scan_recursive(entry.path, visited_paths)
except Exception as e:
logger.error(f"Error processing entry {entry.path}: {e}")
except Exception as e:
logger.error(f"Error scanning {path}: {e}")
await scan_recursive(root_path, set())
return loras
async def _process_single_file(self, file_path: str, root_path: str, loras: list):
"""处理单个文件并添加到结果列表"""
try:
result = await self._process_lora_file(file_path, root_path)
if result:
loras.append(result)
except Exception as e:
logger.error(f"Error processing {file_path}: {e}")
async def _process_lora_file(self, file_path: str, root_path: str) -> Dict:
"""Process a single LoRA file and return its metadata"""
# Try loading existing metadata
metadata = await load_metadata(file_path)
if metadata is None:
# Create new metadata if none exists
metadata = await get_file_info(file_path)
# Convert to dict and add folder info
lora_data = metadata.to_dict()
rel_path = os.path.relpath(file_path, root_path)
folder = os.path.dirname(rel_path)
lora_data['folder'] = folder.replace(os.path.sep, '/')
return lora_data
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
"""Update preview URL in cache for a specific lora
Args:
file_path: The file path of the lora to update
preview_url: The new preview URL
Returns:
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
"""
if self._cache is None:
return False
return await self._cache.update_preview_url(file_path, preview_url)
async def scan_single_lora(self, file_path: str) -> Optional[Dict]:
"""Scan a single LoRA file and return its metadata"""
try:
if not os.path.exists(os.path.realpath(file_path)):
return None
# 获取基本文件信息
metadata = await get_file_info(file_path)
if not metadata:
return None
folder = self._calculate_folder(file_path)
# 确保 folder 字段存在
metadata_dict = metadata.to_dict()
metadata_dict['folder'] = folder or ''
return metadata_dict
except Exception as e:
logger.error(f"Error scanning {file_path}: {e}")
return None
def _calculate_folder(self, file_path: str) -> str:
"""Calculate the folder path for a LoRA file"""
# 使用原始路径计算相对路径
for root in config.loras_roots:
if file_path.startswith(root):
rel_path = os.path.relpath(file_path, root)
return os.path.dirname(rel_path).replace(os.path.sep, '/')
return ''
async def move_model(self, source_path: str, target_path: str) -> bool:
"""Move a model and its associated files to a new location"""
try:
# 保持原始路径格式
source_path = source_path.replace(os.sep, '/')
target_path = target_path.replace(os.sep, '/')
# 其余代码保持不变
base_name = os.path.splitext(os.path.basename(source_path))[0]
source_dir = os.path.dirname(source_path)
os.makedirs(target_path, exist_ok=True)
target_lora = os.path.join(target_path, f"{base_name}.safetensors").replace(os.sep, '/')
# 使用真实路径进行文件操作
real_source = os.path.realpath(source_path)
real_target = os.path.realpath(target_lora)
file_size = os.path.getsize(real_source)
if self.file_monitor:
self.file_monitor.handler.add_ignore_path(
real_source,
file_size
)
self.file_monitor.handler.add_ignore_path(
real_target,
file_size
)
# 使用真实路径进行文件操作
shutil.move(real_source, real_target)
# Move associated files
source_metadata = os.path.join(source_dir, f"{base_name}.metadata.json")
if os.path.exists(source_metadata):
target_metadata = os.path.join(target_path, f"{base_name}.metadata.json")
shutil.move(source_metadata, target_metadata)
metadata = await self._update_metadata_paths(target_metadata, target_lora)
# Move preview file if exists
preview_extensions = ['.preview.png', '.preview.jpeg', '.preview.jpg', '.preview.mp4',
'.png', '.jpeg', '.jpg', '.mp4']
for ext in preview_extensions:
source_preview = os.path.join(source_dir, f"{base_name}{ext}")
if os.path.exists(source_preview):
target_preview = os.path.join(target_path, f"{base_name}{ext}")
shutil.move(source_preview, target_preview)
break
# Update cache
await self.update_single_lora_cache(source_path, target_lora, metadata)
return True
except Exception as e:
logger.error(f"Error moving model: {e}", exc_info=True)
return False
async def update_single_lora_cache(self, original_path: str, new_path: str, metadata: Dict) -> bool:
cache = await self.get_cached_data()
# Remove old path from hash index if exists
self._hash_index.remove_by_path(original_path)
# Remove the old entry from raw_data
cache.raw_data = [
item for item in cache.raw_data
if item['file_path'] != original_path
]
if metadata:
# If this is an update to an existing path (not a move), ensure folder is preserved
if original_path == new_path:
# Find the folder from existing entries or calculate it
existing_folder = next((item['folder'] for item in cache.raw_data
if item['file_path'] == original_path), None)
if existing_folder:
metadata['folder'] = existing_folder
else:
metadata['folder'] = self._calculate_folder(new_path)
else:
# For moved files, recalculate the folder
metadata['folder'] = self._calculate_folder(new_path)
# Add the updated metadata to raw_data
cache.raw_data.append(metadata)
# Update hash index with new path
if 'sha256' in metadata:
self._hash_index.add_entry(metadata['sha256'], new_path)
# Update folders list
all_folders = set(item['folder'] for item in cache.raw_data)
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
# Resort cache
await cache.resort()
return True
async def _update_metadata_paths(self, metadata_path: str, lora_path: str) -> Dict:
"""Update file paths in metadata file"""
try:
@@ -492,16 +290,101 @@ class LoraScanner:
except Exception as e:
logger.error(f"Error updating metadata paths: {e}", exc_info=True)
# Add new methods for hash index functionality
# Lora-specific hash index functionality
def has_lora_hash(self, sha256: str) -> bool:
"""Check if a LoRA with given hash exists"""
return self._hash_index.has_hash(sha256)
return self.has_hash(sha256)
def get_lora_path_by_hash(self, sha256: str) -> Optional[str]:
"""Get file path for a LoRA by its hash"""
return self._hash_index.get_path(sha256)
return self.get_path_by_hash(sha256)
def get_lora_hash_by_path(self, file_path: str) -> Optional[str]:
"""Get hash for a LoRA by its file path"""
return self._hash_index.get_hash(file_path)
return self.get_hash_by_path(file_path)
async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]:
"""Get top tags sorted by count"""
# Make sure cache is initialized
await self.get_cached_data()
# Sort tags by count in descending order
sorted_tags = sorted(
[{"tag": tag, "count": count} for tag, count in self._tags_count.items()],
key=lambda x: x['count'],
reverse=True
)
# Return limited number
return sorted_tags[:limit]
async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]:
"""Get base models used in loras sorted by frequency"""
# Make sure cache is initialized
cache = await self.get_cached_data()
# Count base model occurrences
base_model_counts = {}
for lora in cache.raw_data:
if 'base_model' in lora and lora['base_model']:
base_model = lora['base_model']
base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
# Sort base models by count
sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()]
sorted_models.sort(key=lambda x: x['count'], reverse=True)
# Return limited number
return sorted_models[:limit]
async def diagnose_hash_index(self):
"""Diagnostic method to verify hash index functionality"""
print("\n\n*** DIAGNOSING LORA HASH INDEX ***\n\n", file=sys.stderr)
# First check if the hash index has any entries
if hasattr(self, '_hash_index'):
index_entries = len(self._hash_index._hash_to_path)
print(f"Hash index has {index_entries} entries", file=sys.stderr)
# Print a few example entries if available
if index_entries > 0:
print("\nSample hash index entries:", file=sys.stderr)
count = 0
for hash_val, path in self._hash_index._hash_to_path.items():
if count < 5: # Just show the first 5
print(f"Hash: {hash_val[:8]}... -> Path: {path}", file=sys.stderr)
count += 1
else:
break
else:
print("Hash index not initialized", file=sys.stderr)
# Try looking up by a known hash for testing
if not hasattr(self, '_hash_index') or not self._hash_index._hash_to_path:
print("No hash entries to test lookup with", file=sys.stderr)
return
test_hash = next(iter(self._hash_index._hash_to_path.keys()))
test_path = self._hash_index.get_path(test_hash)
print(f"\nTest lookup by hash: {test_hash[:8]}... -> {test_path}", file=sys.stderr)
# Also test reverse lookup
test_hash_result = self._hash_index.get_hash(test_path)
print(f"Test reverse lookup: {test_path} -> {test_hash_result[:8]}...\n\n", file=sys.stderr)
async def get_lora_info_by_name(self, name):
"""Get LoRA information by name"""
try:
# Get cached data
cache = await self.get_cached_data()
# Find the LoRA by name
for lora in cache.raw_data:
if lora.get("file_name") == name:
return lora
return None
except Exception as e:
logger.error(f"Error getting LoRA info by name: {e}", exc_info=True)
return None

View File

@@ -0,0 +1,64 @@
import asyncio
from typing import List, Dict
from dataclasses import dataclass
from operator import itemgetter
@dataclass
class ModelCache:
"""Cache structure for model data"""
raw_data: List[Dict]
sorted_by_name: List[Dict]
sorted_by_date: List[Dict]
folders: List[str]
def __post_init__(self):
self._lock = asyncio.Lock()
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
self.sorted_by_name = sorted(
self.raw_data,
key=lambda x: x['model_name'].lower() # Case-insensitive sort
)
if not name_only:
self.sorted_by_date = sorted(
self.raw_data,
key=itemgetter('modified'),
reverse=True
)
# Update folder list
all_folders = set(l['folder'] for l in self.raw_data)
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
async def update_preview_url(self, file_path: str, preview_url: str) -> bool:
"""Update preview_url for a specific model in all cached data
Args:
file_path: The file path of the model to update
preview_url: The new preview URL
Returns:
bool: True if the update was successful, False if the model wasn't found
"""
async with self._lock:
# Update in raw_data
for item in self.raw_data:
if item['file_path'] == file_path:
item['preview_url'] = preview_url
break
else:
return False # Model not found
# Update in sorted lists (references to the same dict objects)
for item in self.sorted_by_name:
if item['file_path'] == file_path:
item['preview_url'] = preview_url
break
for item in self.sorted_by_date:
if item['file_path'] == file_path:
item['preview_url'] = preview_url
break
return True

View File

@@ -0,0 +1,78 @@
from typing import Dict, Optional, Set
class ModelHashIndex:
"""Index for looking up models by hash or path"""
def __init__(self):
self._hash_to_path: Dict[str, str] = {}
self._path_to_hash: Dict[str, str] = {}
def add_entry(self, sha256: str, file_path: str) -> None:
"""Add or update hash index entry"""
if not sha256 or not file_path:
return
# Ensure hash is lowercase for consistency
sha256 = sha256.lower()
# Remove old path mapping if hash exists
if sha256 in self._hash_to_path:
old_path = self._hash_to_path[sha256]
if old_path in self._path_to_hash:
del self._path_to_hash[old_path]
# Remove old hash mapping if path exists
if file_path in self._path_to_hash:
old_hash = self._path_to_hash[file_path]
if old_hash in self._hash_to_path:
del self._hash_to_path[old_hash]
# Add new mappings
self._hash_to_path[sha256] = file_path
self._path_to_hash[file_path] = sha256
def remove_by_path(self, file_path: str) -> None:
"""Remove entry by file path"""
if file_path in self._path_to_hash:
hash_val = self._path_to_hash[file_path]
if hash_val in self._hash_to_path:
del self._hash_to_path[hash_val]
del self._path_to_hash[file_path]
def remove_by_hash(self, sha256: str) -> None:
"""Remove entry by hash"""
sha256 = sha256.lower()
if sha256 in self._hash_to_path:
path = self._hash_to_path[sha256]
if path in self._path_to_hash:
del self._path_to_hash[path]
del self._hash_to_path[sha256]
def has_hash(self, sha256: str) -> bool:
"""Check if hash exists in index"""
return sha256.lower() in self._hash_to_path
def get_path(self, sha256: str) -> Optional[str]:
"""Get file path for a hash"""
return self._hash_to_path.get(sha256.lower())
def get_hash(self, file_path: str) -> Optional[str]:
"""Get hash for a file path"""
return self._path_to_hash.get(file_path)
def clear(self) -> None:
"""Clear all entries"""
self._hash_to_path.clear()
self._path_to_hash.clear()
def get_all_hashes(self) -> Set[str]:
"""Get all hashes in the index"""
return set(self._hash_to_path.keys())
def get_all_paths(self) -> Set[str]:
"""Get all file paths in the index"""
return set(self._path_to_hash.keys())
def __len__(self) -> int:
"""Get number of entries"""
return len(self._hash_to_path)

View File

@@ -0,0 +1,879 @@
import json
import os
import logging
import asyncio
import time
import shutil
from typing import List, Dict, Optional, Type, Set
from ..utils.models import BaseModelMetadata
from ..config import config
from ..utils.file_utils import load_metadata, get_file_info, find_preview_file, save_metadata
from .model_cache import ModelCache
from .model_hash_index import ModelHashIndex
from ..utils.constants import PREVIEW_EXTENSIONS
from .service_registry import ServiceRegistry
from .websocket_manager import ws_manager
logger = logging.getLogger(__name__)
class ModelScanner:
"""Base service for scanning and managing model files"""
_lock = asyncio.Lock()
def __init__(self, model_type: str, model_class: Type[BaseModelMetadata], file_extensions: Set[str], hash_index: Optional[ModelHashIndex] = None):
"""Initialize the scanner
Args:
model_type: Type of model (lora, checkpoint, etc.)
model_class: Class used to create metadata instances
file_extensions: Set of supported file extensions including the dot (e.g. {'.safetensors'})
hash_index: Hash index instance (optional)
"""
self.model_type = model_type
self.model_class = model_class
self.file_extensions = file_extensions
self._cache = None
self._hash_index = hash_index or ModelHashIndex()
self._tags_count = {} # Dictionary to store tag counts
self._is_initializing = False # Flag to track initialization state
# Register this service
asyncio.create_task(self._register_service())
async def _register_service(self):
"""Register this instance with the ServiceRegistry"""
service_name = f"{self.model_type}_scanner"
await ServiceRegistry.register_service(service_name, self)
async def initialize_in_background(self) -> None:
"""Initialize cache in background using thread pool"""
try:
# Set initial empty cache to avoid None reference errors
if self._cache is None:
self._cache = ModelCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
# Set initializing flag to true
self._is_initializing = True
# Determine the page type based on model type
page_type = 'loras' if self.model_type == 'lora' else 'checkpoints'
# First, count all model files to track progress
await ws_manager.broadcast_init_progress({
'stage': 'scan_folders',
'progress': 0,
'details': f"Scanning {self.model_type} folders...",
'scanner_type': self.model_type,
'pageType': page_type
})
# Count files in a separate thread to avoid blocking
loop = asyncio.get_event_loop()
total_files = await loop.run_in_executor(
None, # Use default thread pool
self._count_model_files # Run file counting in thread
)
await ws_manager.broadcast_init_progress({
'stage': 'count_models',
'progress': 1, # Changed from 10 to 1
'details': f"Found {total_files} {self.model_type} files",
'scanner_type': self.model_type,
'pageType': page_type
})
start_time = time.time()
# Use thread pool to execute CPU-intensive operations with progress reporting
await loop.run_in_executor(
None, # Use default thread pool
self._initialize_cache_sync, # Run synchronous version in thread
total_files, # Pass the total file count for progress reporting
page_type # Pass the page type for progress reporting
)
# Send final progress update
await ws_manager.broadcast_init_progress({
'stage': 'finalizing',
'progress': 99, # Changed from 95 to 99
'details': f"Finalizing {self.model_type} cache...",
'scanner_type': self.model_type,
'pageType': page_type
})
logger.info(f"{self.model_type.capitalize()} cache initialized in {time.time() - start_time:.2f} seconds. Found {len(self._cache.raw_data)} models")
# Send completion message
await asyncio.sleep(0.5) # Small delay to ensure final progress message is sent
await ws_manager.broadcast_init_progress({
'stage': 'finalizing',
'progress': 100,
'status': 'complete',
'details': f"Completed! Found {len(self._cache.raw_data)} {self.model_type} files.",
'scanner_type': self.model_type,
'pageType': page_type
})
except Exception as e:
logger.error(f"{self.model_type.capitalize()} Scanner: Error initializing cache in background: {e}")
finally:
# Always clear the initializing flag when done
self._is_initializing = False
def _count_model_files(self) -> int:
"""Count all model files with supported extensions in all roots
Returns:
int: Total number of model files found
"""
total_files = 0
visited_real_paths = set()
for root_path in self.get_model_roots():
if not os.path.exists(root_path):
continue
def count_recursive(path):
nonlocal total_files
try:
real_path = os.path.realpath(path)
if real_path in visited_real_paths:
return
visited_real_paths.add(real_path)
with os.scandir(path) as it:
for entry in it:
try:
if entry.is_file(follow_symlinks=True):
ext = os.path.splitext(entry.name)[1].lower()
if ext in self.file_extensions:
total_files += 1
elif entry.is_dir(follow_symlinks=True):
count_recursive(entry.path)
except Exception as e:
logger.error(f"Error counting files in entry {entry.path}: {e}")
except Exception as e:
logger.error(f"Error counting files in {path}: {e}")
count_recursive(root_path)
return total_files
def _initialize_cache_sync(self, total_files=0, page_type='loras'):
"""Synchronous version of cache initialization for thread pool execution"""
try:
# Create a new event loop for this thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Create a synchronous method to bypass the async lock
def sync_initialize_cache():
# Track progress
processed_files = 0
last_progress_time = time.time()
last_progress_percent = 0
# We need a wrapper around scan_all_models to track progress
# This is a local function that will run in our thread's event loop
async def scan_with_progress():
nonlocal processed_files, last_progress_time, last_progress_percent
# For storing raw model data
all_models = []
# Process each model root
for root_path in self.get_model_roots():
if not os.path.exists(root_path):
continue
# Track visited paths to avoid symlink loops
visited_paths = set()
# Recursively process directory
async def scan_dir_with_progress(path):
nonlocal processed_files, last_progress_time, last_progress_percent
try:
real_path = os.path.realpath(path)
if real_path in visited_paths:
return
visited_paths.add(real_path)
with os.scandir(path) as it:
entries = list(it)
for entry in entries:
try:
if entry.is_file(follow_symlinks=True):
ext = os.path.splitext(entry.name)[1].lower()
if ext in self.file_extensions:
file_path = entry.path.replace(os.sep, "/")
result = await self._process_model_file(file_path, root_path)
if result:
all_models.append(result)
# Update progress counter
processed_files += 1
# Update progress periodically (not every file to avoid excessive updates)
current_time = time.time()
if total_files > 0 and (current_time - last_progress_time > 0.5 or processed_files == total_files):
# Adjusted progress calculation
progress_percent = min(99, int(1 + (processed_files / total_files) * 98))
if progress_percent > last_progress_percent:
last_progress_percent = progress_percent
last_progress_time = current_time
# Send progress update through websocket
await ws_manager.broadcast_init_progress({
'stage': 'process_models',
'progress': progress_percent,
'details': f"Processing {self.model_type} files: {processed_files}/{total_files}",
'scanner_type': self.model_type,
'pageType': page_type
})
elif entry.is_dir(follow_symlinks=True):
await scan_dir_with_progress(entry.path)
except Exception as e:
logger.error(f"Error processing entry {entry.path}: {e}")
except Exception as e:
logger.error(f"Error scanning {path}: {e}")
# Process the root path
await scan_dir_with_progress(root_path)
return all_models
# Run the progress-tracking scan function
raw_data = loop.run_until_complete(scan_with_progress())
# Update hash index and tags count
for model_data in raw_data:
if 'sha256' in model_data and 'file_path' in model_data:
self._hash_index.add_entry(model_data['sha256'].lower(), model_data['file_path'])
# Count tags
if 'tags' in model_data and model_data['tags']:
for tag in model_data['tags']:
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
# Update cache
self._cache.raw_data = raw_data
loop.run_until_complete(self._cache.resort())
return self._cache
# Run our sync initialization that avoids lock conflicts
return sync_initialize_cache()
except Exception as e:
logger.error(f"Error in thread-based {self.model_type} cache initialization: {e}")
finally:
# Clean up the event loop
loop.close()
async def get_cached_data(self, force_refresh: bool = False) -> ModelCache:
"""Get cached model data, refresh if needed"""
# If cache is not initialized, return an empty cache
# Actual initialization should be done via initialize_in_background
if self._cache is None and not force_refresh:
return ModelCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
# If force refresh is requested, initialize the cache directly
if force_refresh:
if self._cache is None:
# For initial creation, do a full initialization
await self._initialize_cache()
else:
# For subsequent refreshes, use fast reconciliation
await self._reconcile_cache()
return self._cache
async def _initialize_cache(self) -> None:
"""Initialize or refresh the cache"""
self._is_initializing = True # Set flag
try:
start_time = time.time()
# Clear existing hash index
self._hash_index.clear()
# Clear existing tags count
self._tags_count = {}
# Determine the page type based on model type
page_type = 'loras' if self.model_type == 'lora' else 'checkpoints'
# Scan for new data
raw_data = await self.scan_all_models()
# Build hash index and tags count
for model_data in raw_data:
if 'sha256' in model_data and 'file_path' in model_data:
self._hash_index.add_entry(model_data['sha256'].lower(), model_data['file_path'])
# Count tags
if 'tags' in model_data and model_data['tags']:
for tag in model_data['tags']:
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
# Update cache
self._cache = ModelCache(
raw_data=raw_data,
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
# Resort cache
await self._cache.resort()
logger.info(f"{self.model_type.capitalize()} Scanner: Cache initialization completed in {time.time() - start_time:.2f} seconds, found {len(raw_data)} models")
except Exception as e:
logger.error(f"{self.model_type.capitalize()} Scanner: Error initializing cache: {e}")
# Ensure cache is at least an empty structure on error
if self._cache is None:
self._cache = ModelCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[],
folders=[]
)
finally:
self._is_initializing = False # Unset flag
async def _reconcile_cache(self) -> None:
"""Fast cache reconciliation - only process differences between cache and filesystem"""
self._is_initializing = True # Set flag for reconciliation duration
try:
start_time = time.time()
logger.info(f"{self.model_type.capitalize()} Scanner: Starting fast cache reconciliation...")
# Get current cached file paths
cached_paths = {item['file_path'] for item in self._cache.raw_data}
path_to_item = {item['file_path']: item for item in self._cache.raw_data}
# Track found files and new files
found_paths = set()
new_files = []
# Scan all model roots
for root_path in self.get_model_roots():
if not os.path.exists(root_path):
continue
# Track visited real paths to avoid symlink loops
visited_real_paths = set()
# Recursively scan directory
for root, _, files in os.walk(root_path, followlinks=True):
real_root = os.path.realpath(root)
if real_root in visited_real_paths:
continue
visited_real_paths.add(real_root)
for file in files:
ext = os.path.splitext(file)[1].lower()
if ext in self.file_extensions:
# Construct paths exactly as they would be in cache
file_path = os.path.join(root, file).replace(os.sep, '/')
# Check if this file is already in cache
if file_path in cached_paths:
found_paths.add(file_path)
continue
# Try case-insensitive match on Windows
if os.name == 'nt':
lower_path = file_path.lower()
matched = False
for cached_path in cached_paths:
if cached_path.lower() == lower_path:
found_paths.add(cached_path)
matched = True
break
if matched:
continue
# This is a new file to process
new_files.append(file_path)
# Yield control periodically
await asyncio.sleep(0)
# Process new files in batches
total_added = 0
if new_files:
logger.info(f"{self.model_type.capitalize()} Scanner: Found {len(new_files)} new files to process")
batch_size = 50
for i in range(0, len(new_files), batch_size):
batch = new_files[i:i+batch_size]
for path in batch:
try:
model_data = await self.scan_single_model(path)
if model_data:
# Add to cache
self._cache.raw_data.append(model_data)
# Update hash index if available
if 'sha256' in model_data and 'file_path' in model_data:
self._hash_index.add_entry(model_data['sha256'].lower(), model_data['file_path'])
# Update tags count
if 'tags' in model_data and model_data['tags']:
for tag in model_data['tags']:
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
total_added += 1
except Exception as e:
logger.error(f"Error adding {path} to cache: {e}")
# Yield control after each batch
await asyncio.sleep(0)
# Find missing files (in cache but not in filesystem)
missing_files = cached_paths - found_paths
total_removed = 0
if missing_files:
logger.info(f"{self.model_type.capitalize()} Scanner: Found {len(missing_files)} files to remove from cache")
# Process files to remove
for path in missing_files:
try:
model_to_remove = path_to_item[path]
# Update tags count
for tag in model_to_remove.get('tags', []):
if tag in self._tags_count:
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
if self._tags_count[tag] == 0:
del self._tags_count[tag]
# Remove from hash index
self._hash_index.remove_by_path(path)
total_removed += 1
except Exception as e:
logger.error(f"Error removing {path} from cache: {e}")
# Update cache data
self._cache.raw_data = [item for item in self._cache.raw_data if item['file_path'] not in missing_files]
# Resort cache if changes were made
if total_added > 0 or total_removed > 0:
# Update folders list
all_folders = set(item.get('folder', '') for item in self._cache.raw_data)
self._cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
# Resort cache
await self._cache.resort()
logger.info(f"{self.model_type.capitalize()} Scanner: Cache reconciliation completed in {time.time() - start_time:.2f} seconds. Added {total_added}, removed {total_removed} models.")
except Exception as e:
logger.error(f"{self.model_type.capitalize()} Scanner: Error reconciling cache: {e}", exc_info=True)
finally:
self._is_initializing = False # Unset flag
# These methods should be implemented in child classes
async def scan_all_models(self) -> List[Dict]:
"""Scan all model directories and return metadata"""
raise NotImplementedError("Subclasses must implement scan_all_models")
def get_model_roots(self) -> List[str]:
"""Get model root directories"""
raise NotImplementedError("Subclasses must implement get_model_roots")
async def scan_single_model(self, file_path: str) -> Optional[Dict]:
"""Scan a single model file and return its metadata"""
try:
if not os.path.exists(os.path.realpath(file_path)):
return None
# Get basic file info
metadata = await self._get_file_info(file_path)
if not metadata:
return None
folder = self._calculate_folder(file_path)
# Ensure folder field exists
metadata_dict = metadata.to_dict()
metadata_dict['folder'] = folder or ''
return metadata_dict
except Exception as e:
logger.error(f"Error scanning {file_path}: {e}")
return None
async def _get_file_info(self, file_path: str) -> Optional[BaseModelMetadata]:
"""Get model file info and metadata (extensible for different model types)"""
return await get_file_info(file_path, self.model_class)
def _calculate_folder(self, file_path: str) -> str:
"""Calculate the folder path for a model file"""
for root in self.get_model_roots():
if file_path.startswith(root):
rel_path = os.path.relpath(file_path, root)
return os.path.dirname(rel_path).replace(os.path.sep, '/')
return ''
# Common methods shared between scanners
async def _process_model_file(self, file_path: str, root_path: str) -> Dict:
"""Process a single model file and return its metadata"""
metadata = await load_metadata(file_path, self.model_class)
if metadata is None:
civitai_info_path = f"{os.path.splitext(file_path)[0]}.civitai.info"
if os.path.exists(civitai_info_path):
try:
with open(civitai_info_path, 'r', encoding='utf-8') as f:
version_info = json.load(f)
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
if file_info:
file_name = os.path.splitext(os.path.basename(file_path))[0]
file_info['name'] = file_name
metadata = self.model_class.from_civitai_info(version_info, file_info, file_path)
metadata.preview_url = find_preview_file(file_name, os.path.dirname(file_path))
await save_metadata(file_path, metadata)
logger.debug(f"Created metadata from .civitai.info for {file_path}")
except Exception as e:
logger.error(f"Error creating metadata from .civitai.info for {file_path}: {e}")
if metadata is None:
metadata = await self._get_file_info(file_path)
model_data = metadata.to_dict()
await self._fetch_missing_metadata(file_path, model_data)
rel_path = os.path.relpath(file_path, root_path)
folder = os.path.dirname(rel_path)
model_data['folder'] = folder.replace(os.path.sep, '/')
return model_data
async def _fetch_missing_metadata(self, file_path: str, model_data: Dict) -> None:
"""Fetch missing description and tags from Civitai if needed"""
try:
if model_data.get('civitai_deleted', False):
logger.debug(f"Skipping metadata fetch for {file_path}: marked as deleted on Civitai")
return
needs_metadata_update = False
model_id = None
if model_data.get('civitai'):
model_id = model_data['civitai'].get('modelId')
if model_id:
model_id = str(model_id)
tags_missing = not model_data.get('tags') or len(model_data.get('tags', [])) == 0
desc_missing = not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")
needs_metadata_update = tags_missing or desc_missing
if needs_metadata_update and model_id:
logger.debug(f"Fetching missing metadata for {file_path} with model ID {model_id}")
from ..services.civitai_client import CivitaiClient
client = CivitaiClient()
model_metadata, status_code = await client.get_model_metadata(model_id)
await client.close()
if status_code == 404:
logger.warning(f"Model {model_id} appears to be deleted from Civitai (404 response)")
model_data['civitai_deleted'] = True
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(model_data, f, indent=2, ensure_ascii=False)
elif model_metadata:
logger.debug(f"Updating metadata for {file_path} with model ID {model_id}")
if model_metadata.get('tags') and (not model_data.get('tags') or len(model_data.get('tags', [])) == 0):
model_data['tags'] = model_metadata['tags']
if model_metadata.get('description') and (not model_data.get('modelDescription') or model_data.get('modelDescription') in (None, "")):
model_data['modelDescription'] = model_metadata['description']
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(model_data, f, indent=2, ensure_ascii=False)
except Exception as e:
logger.error(f"Failed to update metadata from Civitai for {file_path}: {e}")
async def _scan_directory(self, root_path: str) -> List[Dict]:
"""Base implementation for directory scanning"""
models = []
original_root = root_path
async def scan_recursive(path: str, visited_paths: set):
try:
real_path = os.path.realpath(path)
if real_path in visited_paths:
logger.debug(f"Skipping already visited path: {path}")
return
visited_paths.add(real_path)
with os.scandir(path) as it:
entries = list(it)
for entry in entries:
try:
if entry.is_file(follow_symlinks=True):
ext = os.path.splitext(entry.name)[1].lower()
if ext in self.file_extensions:
file_path = entry.path.replace(os.sep, "/")
await self._process_single_file(file_path, original_root, models)
await asyncio.sleep(0)
elif entry.is_dir(follow_symlinks=True):
await scan_recursive(entry.path, visited_paths)
except Exception as e:
logger.error(f"Error processing entry {entry.path}: {e}")
except Exception as e:
logger.error(f"Error scanning {path}: {e}")
await scan_recursive(root_path, set())
return models
async def _process_single_file(self, file_path: str, root_path: str, models_list: list):
"""Process a single file and add to results list"""
try:
result = await self._process_model_file(file_path, root_path)
if result:
models_list.append(result)
except Exception as e:
logger.error(f"Error processing {file_path}: {e}")
async def move_model(self, source_path: str, target_path: str) -> bool:
"""Move a model and its associated files to a new location"""
try:
source_path = source_path.replace(os.sep, '/')
target_path = target_path.replace(os.sep, '/')
file_ext = os.path.splitext(source_path)[1]
if not file_ext or file_ext.lower() not in self.file_extensions:
logger.error(f"Invalid file extension for model: {file_ext}")
return False
base_name = os.path.splitext(os.path.basename(source_path))[0]
source_dir = os.path.dirname(source_path)
os.makedirs(target_path, exist_ok=True)
target_file = os.path.join(target_path, f"{base_name}{file_ext}").replace(os.sep, '/')
real_source = os.path.realpath(source_path)
real_target = os.path.realpath(target_file)
file_size = os.path.getsize(real_source)
# Get the appropriate file monitor through ServiceRegistry
if self.model_type == "lora":
monitor = await ServiceRegistry.get_lora_monitor()
elif self.model_type == "checkpoint":
monitor = await ServiceRegistry.get_checkpoint_monitor()
else:
monitor = None
if monitor:
monitor.handler.add_ignore_path(
real_source,
file_size
)
monitor.handler.add_ignore_path(
real_target,
file_size
)
shutil.move(real_source, real_target)
source_metadata = os.path.join(source_dir, f"{base_name}.metadata.json")
metadata = None
if os.path.exists(source_metadata):
target_metadata = os.path.join(target_path, f"{base_name}.metadata.json")
shutil.move(source_metadata, target_metadata)
metadata = await self._update_metadata_paths(target_metadata, target_file)
for ext in PREVIEW_EXTENSIONS:
source_preview = os.path.join(source_dir, f"{base_name}{ext}")
if os.path.exists(source_preview):
target_preview = os.path.join(target_path, f"{base_name}{ext}")
shutil.move(source_preview, target_preview)
break
await self.update_single_model_cache(source_path, target_file, metadata)
return True
except Exception as e:
logger.error(f"Error moving model: {e}", exc_info=True)
return False
async def _update_metadata_paths(self, metadata_path: str, model_path: str) -> Dict:
"""Update file paths in metadata file"""
try:
with open(metadata_path, 'r', encoding='utf-8') as f:
metadata = json.load(f)
metadata['file_path'] = model_path.replace(os.sep, '/')
if 'preview_url' in metadata:
preview_dir = os.path.dirname(model_path)
preview_name = os.path.splitext(os.path.basename(metadata['preview_url']))[0]
preview_ext = os.path.splitext(metadata['preview_url'])[1]
new_preview_path = os.path.join(preview_dir, f"{preview_name}{preview_ext}")
metadata['preview_url'] = new_preview_path.replace(os.sep, '/')
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
return metadata
except Exception as e:
logger.error(f"Error updating metadata paths: {e}", exc_info=True)
return None
async def update_single_model_cache(self, original_path: str, new_path: str, metadata: Dict) -> bool:
"""Update cache after a model has been moved or modified"""
cache = await self.get_cached_data()
existing_item = next((item for item in cache.raw_data if item['file_path'] == original_path), None)
if existing_item and 'tags' in existing_item:
for tag in existing_item.get('tags', []):
if tag in self._tags_count:
self._tags_count[tag] = max(0, self._tags_count[tag] - 1)
if self._tags_count[tag] == 0:
del self._tags_count[tag]
self._hash_index.remove_by_path(original_path)
cache.raw_data = [
item for item in cache.raw_data
if item['file_path'] != original_path
]
if metadata:
if original_path == new_path:
existing_folder = next((item['folder'] for item in cache.raw_data
if item['file_path'] == original_path), None)
if existing_folder:
metadata['folder'] = existing_folder
else:
metadata['folder'] = self._calculate_folder(new_path)
else:
metadata['folder'] = self._calculate_folder(new_path)
cache.raw_data.append(metadata)
if 'sha256' in metadata:
self._hash_index.add_entry(metadata['sha256'].lower(), new_path)
all_folders = set(item['folder'] for item in cache.raw_data)
cache.folders = sorted(list(all_folders), key=lambda x: x.lower())
if 'tags' in metadata:
for tag in metadata.get('tags', []):
self._tags_count[tag] = self._tags_count.get(tag, 0) + 1
await cache.resort()
return True
def has_hash(self, sha256: str) -> bool:
"""Check if a model with given hash exists"""
return self._hash_index.has_hash(sha256.lower())
def get_path_by_hash(self, sha256: str) -> Optional[str]:
"""Get file path for a model by its hash"""
return self._hash_index.get_path(sha256.lower())
def get_hash_by_path(self, file_path: str) -> Optional[str]:
"""Get hash for a model by its file path"""
return self._hash_index.get_hash(file_path)
# TODO: Adjust this method to use metadata instead of finding the file
def get_preview_url_by_hash(self, sha256: str) -> Optional[str]:
"""Get preview static URL for a model by its hash"""
file_path = self._hash_index.get_path(sha256.lower())
if not file_path:
return None
base_name = os.path.splitext(file_path)[0]
for ext in PREVIEW_EXTENSIONS:
preview_path = f"{base_name}{ext}"
if os.path.exists(preview_path):
return config.get_preview_static_url(preview_path)
return None
async def get_top_tags(self, limit: int = 20) -> List[Dict[str, any]]:
"""Get top tags sorted by count"""
await self.get_cached_data()
sorted_tags = sorted(
[{"tag": tag, "count": count} for tag, count in self._tags_count.items()],
key=lambda x: x['count'],
reverse=True
)
return sorted_tags[:limit]
async def get_base_models(self, limit: int = 20) -> List[Dict[str, any]]:
"""Get base models sorted by frequency"""
cache = await self.get_cached_data()
base_model_counts = {}
for model in cache.raw_data:
if 'base_model' in model and model['base_model']:
base_model = model['base_model']
base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
sorted_models = [{'name': model, 'count': count} for model, count in base_model_counts.items()]
sorted_models.sort(key=lambda x: x['count'], reverse=True)
return sorted_models[:limit]
async def get_model_info_by_name(self, name):
"""Get model information by name"""
try:
cache = await self.get_cached_data()
for model in cache.raw_data:
if model.get("file_name") == name:
return model
return None
except Exception as e:
logger.error(f"Error getting model info by name: {e}", exc_info=True)
return None
async def update_preview_in_cache(self, file_path: str, preview_url: str) -> bool:
"""Update preview URL in cache for a specific lora
Args:
file_path: The file path of the lora to update
preview_url: The new preview URL
Returns:
bool: True if the update was successful, False if cache doesn't exist or lora wasn't found
"""
if self._cache is None:
return False
return await self._cache.update_preview_url(file_path, preview_url)

View File

@@ -0,0 +1,85 @@
import asyncio
from typing import List, Dict
from dataclasses import dataclass
from operator import itemgetter
@dataclass
class RecipeCache:
"""Cache structure for Recipe data"""
raw_data: List[Dict]
sorted_by_name: List[Dict]
sorted_by_date: List[Dict]
def __post_init__(self):
self._lock = asyncio.Lock()
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
self.sorted_by_name = sorted(
self.raw_data,
key=lambda x: x.get('title', '').lower() # Case-insensitive sort
)
if not name_only:
self.sorted_by_date = sorted(
self.raw_data,
key=itemgetter('created_date', 'file_path'),
reverse=True
)
async def update_recipe_metadata(self, recipe_id: str, metadata: Dict) -> bool:
"""Update metadata for a specific recipe in all cached data
Args:
recipe_id: The ID of the recipe to update
metadata: The new metadata
Returns:
bool: True if the update was successful, False if the recipe wasn't found
"""
# Update in raw_data
for item in self.raw_data:
if item.get('id') == recipe_id:
item.update(metadata)
break
else:
return False # Recipe not found
# Resort to reflect changes
await self.resort()
return True
async def add_recipe(self, recipe_data: Dict) -> None:
"""Add a new recipe to the cache
Args:
recipe_data: The recipe data to add
"""
async with self._lock:
self.raw_data.append(recipe_data)
await self.resort()
async def remove_recipe(self, recipe_id: str) -> bool:
"""Remove a recipe from the cache by ID
Args:
recipe_id: The ID of the recipe to remove
Returns:
bool: True if the recipe was found and removed, False otherwise
"""
# Find the recipe in raw_data
recipe_index = next((i for i, recipe in enumerate(self.raw_data)
if recipe.get('id') == recipe_id), None)
if recipe_index is None:
return False
# Remove from raw_data
self.raw_data.pop(recipe_index)
# Resort to update sorted lists
await self.resort()
return True

View File

@@ -0,0 +1,807 @@
import os
import logging
import asyncio
import json
import time
from typing import List, Dict, Optional, Any, Tuple
from ..config import config
from .recipe_cache import RecipeCache
from .service_registry import ServiceRegistry
from .lora_scanner import LoraScanner
from ..utils.utils import fuzzy_match
import sys
logger = logging.getLogger(__name__)
class RecipeScanner:
"""Service for scanning and managing recipe images"""
_instance = None
_lock = asyncio.Lock()
@classmethod
async def get_instance(cls, lora_scanner: Optional[LoraScanner] = None):
"""Get singleton instance of RecipeScanner"""
async with cls._lock:
if cls._instance is None:
if not lora_scanner:
# Get lora scanner from service registry if not provided
lora_scanner = await ServiceRegistry.get_lora_scanner()
cls._instance = cls(lora_scanner)
return cls._instance
def __new__(cls, lora_scanner: Optional[LoraScanner] = None):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._lora_scanner = lora_scanner
cls._instance._civitai_client = None # Will be lazily initialized
return cls._instance
def __init__(self, lora_scanner: Optional[LoraScanner] = None):
# Ensure initialization only happens once
if not hasattr(self, '_initialized'):
self._cache: Optional[RecipeCache] = None
self._initialization_lock = asyncio.Lock()
self._initialization_task: Optional[asyncio.Task] = None
self._is_initializing = False
if lora_scanner:
self._lora_scanner = lora_scanner
self._initialized = True
async def _get_civitai_client(self):
"""Lazily initialize CivitaiClient from registry"""
if self._civitai_client is None:
self._civitai_client = await ServiceRegistry.get_civitai_client()
return self._civitai_client
async def initialize_in_background(self) -> None:
"""Initialize cache in background using thread pool"""
try:
# Set initial empty cache to avoid None reference errors
if self._cache is None:
self._cache = RecipeCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[]
)
# Mark as initializing to prevent concurrent initializations
self._is_initializing = True
try:
# Start timer
start_time = time.time()
# Use thread pool to execute CPU-intensive operations
loop = asyncio.get_event_loop()
cache = await loop.run_in_executor(
None, # Use default thread pool
self._initialize_recipe_cache_sync # Run synchronous version in thread
)
# Calculate elapsed time and log it
elapsed_time = time.time() - start_time
recipe_count = len(cache.raw_data) if cache and hasattr(cache, 'raw_data') else 0
logger.info(f"Recipe cache initialized in {elapsed_time:.2f} seconds. Found {recipe_count} recipes")
finally:
# Mark initialization as complete regardless of outcome
self._is_initializing = False
except Exception as e:
logger.error(f"Recipe Scanner: Error initializing cache in background: {e}")
def _initialize_recipe_cache_sync(self):
"""Synchronous version of recipe cache initialization for thread pool execution"""
try:
# Create a new event loop for this thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Create a synchronous method to bypass the async lock
def sync_initialize_cache():
# We need to implement scan_all_recipes logic synchronously here
# instead of calling the async method to avoid event loop issues
recipes = []
recipes_dir = self.recipes_dir
if not recipes_dir or not os.path.exists(recipes_dir):
logger.warning(f"Recipes directory not found: {recipes_dir}")
return recipes
# Get all recipe JSON files in the recipes directory
recipe_files = []
for root, _, files in os.walk(recipes_dir):
recipe_count = sum(1 for f in files if f.lower().endswith('.recipe.json'))
if recipe_count > 0:
for file in files:
if file.lower().endswith('.recipe.json'):
recipe_files.append(os.path.join(root, file))
# Process each recipe file
for recipe_path in recipe_files:
try:
with open(recipe_path, 'r', encoding='utf-8') as f:
recipe_data = json.load(f)
# Validate recipe data
if not recipe_data or not isinstance(recipe_data, dict):
logger.warning(f"Invalid recipe data in {recipe_path}")
continue
# Ensure required fields exist
required_fields = ['id', 'file_path', 'title']
if not all(field in recipe_data for field in required_fields):
logger.warning(f"Missing required fields in {recipe_path}")
continue
# Ensure the image file exists
image_path = recipe_data.get('file_path')
if not os.path.exists(image_path):
recipe_dir = os.path.dirname(recipe_path)
image_filename = os.path.basename(image_path)
alternative_path = os.path.join(recipe_dir, image_filename)
if os.path.exists(alternative_path):
recipe_data['file_path'] = alternative_path
# Ensure loras array exists
if 'loras' not in recipe_data:
recipe_data['loras'] = []
# Ensure gen_params exists
if 'gen_params' not in recipe_data:
recipe_data['gen_params'] = {}
# Add to list without async operations
recipes.append(recipe_data)
except Exception as e:
logger.error(f"Error loading recipe file {recipe_path}: {e}")
import traceback
traceback.print_exc(file=sys.stderr)
# Update cache with the collected data
self._cache.raw_data = recipes
# Create a simplified resort function that doesn't use await
if hasattr(self._cache, "resort"):
try:
# Sort by name
self._cache.sorted_by_name = sorted(
self._cache.raw_data,
key=lambda x: x.get('title', '').lower()
)
# Sort by date (modified or created)
self._cache.sorted_by_date = sorted(
self._cache.raw_data,
key=lambda x: x.get('modified', x.get('created_date', 0)),
reverse=True
)
except Exception as e:
logger.error(f"Error sorting recipe cache: {e}")
return self._cache
# Run our sync initialization that avoids lock conflicts
return sync_initialize_cache()
except Exception as e:
logger.error(f"Error in thread-based recipe cache initialization: {e}")
return self._cache if hasattr(self, '_cache') else None
finally:
# Clean up the event loop
loop.close()
@property
def recipes_dir(self) -> str:
"""Get path to recipes directory"""
if not config.loras_roots:
return ""
# config.loras_roots already sorted case-insensitively, use the first one
recipes_dir = os.path.join(config.loras_roots[0], "recipes")
os.makedirs(recipes_dir, exist_ok=True)
return recipes_dir
async def get_cached_data(self, force_refresh: bool = False) -> RecipeCache:
"""Get cached recipe data, refresh if needed"""
# If cache is already initialized and no refresh is needed, return it immediately
if self._cache is not None and not force_refresh:
return self._cache
# If another initialization is already in progress, wait for it to complete
if self._is_initializing and not force_refresh:
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
# If force refresh is requested, initialize the cache directly
if force_refresh:
# Try to acquire the lock with a timeout to prevent deadlocks
try:
async with self._initialization_lock:
# Mark as initializing to prevent concurrent initializations
self._is_initializing = True
try:
# Scan for recipe data directly
raw_data = await self.scan_all_recipes()
# Update cache
self._cache = RecipeCache(
raw_data=raw_data,
sorted_by_name=[],
sorted_by_date=[]
)
# Resort cache
await self._cache.resort()
return self._cache
except Exception as e:
logger.error(f"Recipe Manager: Error initializing cache: {e}", exc_info=True)
# Create empty cache on error
self._cache = RecipeCache(
raw_data=[],
sorted_by_name=[],
sorted_by_date=[]
)
return self._cache
finally:
# Mark initialization as complete
self._is_initializing = False
except Exception as e:
logger.error(f"Unexpected error in get_cached_data: {e}")
# Return the cache (may be empty or partially initialized)
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
async def scan_all_recipes(self) -> List[Dict]:
"""Scan all recipe JSON files and return metadata"""
recipes = []
recipes_dir = self.recipes_dir
if not recipes_dir or not os.path.exists(recipes_dir):
logger.warning(f"Recipes directory not found: {recipes_dir}")
return recipes
# Get all recipe JSON files in the recipes directory
recipe_files = []
for root, _, files in os.walk(recipes_dir):
recipe_count = sum(1 for f in files if f.lower().endswith('.recipe.json'))
if recipe_count > 0:
for file in files:
if file.lower().endswith('.recipe.json'):
recipe_files.append(os.path.join(root, file))
# Process each recipe file
for recipe_path in recipe_files:
recipe_data = await self._load_recipe_file(recipe_path)
if recipe_data:
recipes.append(recipe_data)
return recipes
async def _load_recipe_file(self, recipe_path: str) -> Optional[Dict]:
"""Load recipe data from a JSON file"""
try:
with open(recipe_path, 'r', encoding='utf-8') as f:
recipe_data = json.load(f)
# Validate recipe data
if not recipe_data or not isinstance(recipe_data, dict):
logger.warning(f"Invalid recipe data in {recipe_path}")
return None
# Ensure required fields exist
required_fields = ['id', 'file_path', 'title']
for field in required_fields:
if field not in recipe_data:
logger.warning(f"Missing required field '{field}' in {recipe_path}")
return None
# Ensure the image file exists
image_path = recipe_data.get('file_path')
if not os.path.exists(image_path):
logger.warning(f"Recipe image not found: {image_path}")
# Try to find the image in the same directory as the recipe
recipe_dir = os.path.dirname(recipe_path)
image_filename = os.path.basename(image_path)
alternative_path = os.path.join(recipe_dir, image_filename)
if os.path.exists(alternative_path):
recipe_data['file_path'] = alternative_path
else:
logger.warning(f"Could not find alternative image path for {image_path}")
# Ensure loras array exists
if 'loras' not in recipe_data:
recipe_data['loras'] = []
# Ensure gen_params exists
if 'gen_params' not in recipe_data:
recipe_data['gen_params'] = {}
# Update lora information with local paths and availability
await self._update_lora_information(recipe_data)
return recipe_data
except Exception as e:
logger.error(f"Error loading recipe file {recipe_path}: {e}")
import traceback
traceback.print_exc(file=sys.stderr)
return None
async def _update_lora_information(self, recipe_data: Dict) -> bool:
"""Update LoRA information with hash and file_name
Returns:
bool: True if metadata was updated
"""
if not recipe_data.get('loras'):
return False
metadata_updated = False
for lora in recipe_data['loras']:
# Skip if already has complete information
if 'hash' in lora and 'file_name' in lora and lora['file_name']:
continue
# If has modelVersionId but no hash, look in lora cache first, then fetch from Civitai
if 'modelVersionId' in lora and not lora.get('hash'):
model_version_id = lora['modelVersionId']
# Try to find in lora cache first
hash_from_cache = await self._find_hash_in_lora_cache(model_version_id)
if hash_from_cache:
lora['hash'] = hash_from_cache
metadata_updated = True
else:
# If not in cache, fetch from Civitai
hash_from_civitai = await self._get_hash_from_civitai(model_version_id)
if hash_from_civitai:
lora['hash'] = hash_from_civitai
metadata_updated = True
else:
logger.debug(f"Could not get hash for modelVersionId {model_version_id}")
# If has hash but no file_name, look up in lora library
if 'hash' in lora and (not lora.get('file_name') or not lora['file_name']):
hash_value = lora['hash']
if self._lora_scanner.has_lora_hash(hash_value):
lora_path = self._lora_scanner.get_lora_path_by_hash(hash_value)
if lora_path:
file_name = os.path.splitext(os.path.basename(lora_path))[0]
lora['file_name'] = file_name
metadata_updated = True
else:
# Lora not in library
lora['file_name'] = ''
metadata_updated = True
return metadata_updated
async def _find_hash_in_lora_cache(self, model_version_id: str) -> Optional[str]:
"""Find hash in lora cache based on modelVersionId"""
try:
# Get all loras from cache
if not self._lora_scanner:
return None
cache = await self._lora_scanner.get_cached_data()
if not cache or not cache.raw_data:
return None
# Find lora with matching civitai.id
for lora in cache.raw_data:
civitai_data = lora.get('civitai', {})
if civitai_data and str(civitai_data.get('id', '')) == str(model_version_id):
return lora.get('sha256')
return None
except Exception as e:
logger.error(f"Error finding hash in lora cache: {e}")
return None
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
"""Get hash from Civitai API"""
try:
# Get CivitaiClient from ServiceRegistry
civitai_client = await self._get_civitai_client()
if not civitai_client:
logger.error("Failed to get CivitaiClient from ServiceRegistry")
return None
version_info = await civitai_client.get_model_version_info(model_version_id)
if not version_info or not version_info.get('files'):
logger.debug(f"No files found in version info for ID: {model_version_id}")
return None
# Get hash from the first file
for file_info in version_info.get('files', []):
if file_info.get('hashes', {}).get('SHA256'):
return file_info['hashes']['SHA256']
logger.debug(f"No SHA256 hash found in version info for ID: {model_version_id}")
return None
except Exception as e:
logger.error(f"Error getting hash from Civitai: {e}")
return None
async def _get_model_version_name(self, model_version_id: str) -> Optional[str]:
"""Get model version name from Civitai API"""
try:
# Get CivitaiClient from ServiceRegistry
civitai_client = await self._get_civitai_client()
if not civitai_client:
return None
version_info = await civitai_client.get_model_version_info(model_version_id)
if version_info and 'name' in version_info:
return version_info['name']
logger.debug(f"No version name found for modelVersionId {model_version_id}")
return None
except Exception as e:
logger.error(f"Error getting model version name from Civitai: {e}")
return None
async def _determine_base_model(self, loras: List[Dict]) -> Optional[str]:
"""Determine the most common base model among LoRAs"""
base_models = {}
# Count occurrences of each base model
for lora in loras:
if 'hash' in lora:
lora_path = self._lora_scanner.get_lora_path_by_hash(lora['hash'])
if lora_path:
base_model = await self._get_base_model_for_lora(lora_path)
if base_model:
base_models[base_model] = base_models.get(base_model, 0) + 1
# Return the most common base model
if base_models:
return max(base_models.items(), key=lambda x: x[1])[0]
return None
async def _get_base_model_for_lora(self, lora_path: str) -> Optional[str]:
"""Get base model for a LoRA from cache"""
try:
if not self._lora_scanner:
return None
cache = await self._lora_scanner.get_cached_data()
if not cache or not cache.raw_data:
return None
# Find matching lora in cache
for lora in cache.raw_data:
if lora.get('file_path') == lora_path:
return lora.get('base_model')
return None
except Exception as e:
logger.error(f"Error getting base model for lora: {e}")
return None
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'date', search: str = None, filters: dict = None, search_options: dict = None, lora_hash: str = None, bypass_filters: bool = True):
"""Get paginated and filtered recipe data
Args:
page: Current page number (1-based)
page_size: Number of items per page
sort_by: Sort method ('name' or 'date')
search: Search term
filters: Dictionary of filters to apply
search_options: Dictionary of search options to apply
lora_hash: Optional SHA256 hash of a LoRA to filter recipes by
bypass_filters: If True, ignore other filters when a lora_hash is provided
"""
cache = await self.get_cached_data()
# Get base dataset
filtered_data = cache.sorted_by_date if sort_by == 'date' else cache.sorted_by_name
# Special case: Filter by LoRA hash (takes precedence if bypass_filters is True)
if lora_hash:
# Filter recipes that contain this LoRA hash
filtered_data = [
item for item in filtered_data
if 'loras' in item and any(
lora.get('hash', '').lower() == lora_hash.lower()
for lora in item['loras']
)
]
if bypass_filters:
# Skip other filters if bypass_filters is True
pass
# Otherwise continue with normal filtering after applying LoRA hash filter
# Skip further filtering if we're only filtering by LoRA hash with bypass enabled
if not (lora_hash and bypass_filters):
# Apply search filter
if search:
# Default search options if none provided
if not search_options:
search_options = {
'title': True,
'tags': True,
'lora_name': True,
'lora_model': True
}
# Build the search predicate based on search options
def matches_search(item):
# Search in title if enabled
if search_options.get('title', True):
if fuzzy_match(str(item.get('title', '')), search):
return True
# Search in tags if enabled
if search_options.get('tags', True) and 'tags' in item:
for tag in item['tags']:
if fuzzy_match(tag, search):
return True
# Search in lora file names if enabled
if search_options.get('lora_name', True) and 'loras' in item:
for lora in item['loras']:
if fuzzy_match(str(lora.get('file_name', '')), search):
return True
# Search in lora model names if enabled
if search_options.get('lora_model', True) and 'loras' in item:
for lora in item['loras']:
if fuzzy_match(str(lora.get('modelName', '')), search):
return True
# No match found
return False
# Filter the data using the search predicate
filtered_data = [item for item in filtered_data if matches_search(item)]
# Apply additional filters
if filters:
# Filter by base model
if 'base_model' in filters and filters['base_model']:
filtered_data = [
item for item in filtered_data
if item.get('base_model', '') in filters['base_model']
]
# Filter by tags
if 'tags' in filters and filters['tags']:
filtered_data = [
item for item in filtered_data
if any(tag in item.get('tags', []) for tag in filters['tags'])
]
# Calculate pagination
total_items = len(filtered_data)
start_idx = (page - 1) * page_size
end_idx = min(start_idx + page_size, total_items)
# Get paginated items
paginated_items = filtered_data[start_idx:end_idx]
# Add inLibrary information for each lora
for item in paginated_items:
if 'loras' in item:
for lora in item['loras']:
if 'hash' in lora and lora['hash']:
lora['inLibrary'] = self._lora_scanner.has_lora_hash(lora['hash'].lower())
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora['hash'].lower())
lora['localPath'] = self._lora_scanner.get_lora_path_by_hash(lora['hash'].lower())
result = {
'items': paginated_items,
'total': total_items,
'page': page,
'page_size': page_size,
'total_pages': (total_items + page_size - 1) // page_size
}
return result
async def get_recipe_by_id(self, recipe_id: str) -> dict:
"""Get a single recipe by ID with all metadata and formatted URLs
Args:
recipe_id: The ID of the recipe to retrieve
Returns:
Dict containing the recipe data or None if not found
"""
if not recipe_id:
return None
# Get all recipes from cache
cache = await self.get_cached_data()
# Find the recipe with the specified ID
recipe = next((r for r in cache.raw_data if str(r.get('id', '')) == recipe_id), None)
if not recipe:
return None
# Format the recipe with all needed information
formatted_recipe = {**recipe} # Copy all fields
# Format file path to URL
if 'file_path' in formatted_recipe:
formatted_recipe['file_url'] = self._format_file_url(formatted_recipe['file_path'])
# Format dates for display
for date_field in ['created_date', 'modified']:
if date_field in formatted_recipe:
formatted_recipe[f"{date_field}_formatted"] = self._format_timestamp(formatted_recipe[date_field])
# Add lora metadata
if 'loras' in formatted_recipe:
for lora in formatted_recipe['loras']:
if 'hash' in lora and lora['hash']:
lora_hash = lora['hash'].lower()
lora['inLibrary'] = self._lora_scanner.has_lora_hash(lora_hash)
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora_hash)
lora['localPath'] = self._lora_scanner.get_lora_path_by_hash(lora_hash)
return formatted_recipe
def _format_file_url(self, file_path: str) -> str:
"""Format file path as URL for serving in web UI"""
if not file_path:
return '/loras_static/images/no-preview.png'
try:
# Format file path as a URL that will work with static file serving
recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/')
if file_path.replace(os.sep, '/').startswith(recipes_dir):
relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/')
return f"/loras_static/root1/preview/{relative_path}"
# If not in recipes dir, try to create a valid URL from the file name
file_name = os.path.basename(file_path)
return f"/loras_static/root1/preview/recipes/{file_name}"
except Exception as e:
logger.error(f"Error formatting file URL: {e}")
return '/loras_static/images/no-preview.png'
def _format_timestamp(self, timestamp: float) -> str:
"""Format timestamp for display"""
from datetime import datetime
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
async def update_recipe_metadata(self, recipe_id: str, metadata: dict) -> bool:
"""Update recipe metadata (like title and tags) in both file system and cache
Args:
recipe_id: The ID of the recipe to update
metadata: Dictionary containing metadata fields to update (title, tags, etc.)
Returns:
bool: True if successful, False otherwise
"""
import os
import json
# First, find the recipe JSON file path
recipe_json_path = os.path.join(self.recipes_dir, f"{recipe_id}.recipe.json")
if not os.path.exists(recipe_json_path):
return False
try:
# Load existing recipe data
with open(recipe_json_path, 'r', encoding='utf-8') as f:
recipe_data = json.load(f)
# Update fields
for key, value in metadata.items():
recipe_data[key] = value
# Save updated recipe
with open(recipe_json_path, 'w', encoding='utf-8') as f:
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
# Update the cache if it exists
if self._cache is not None:
await self._cache.update_recipe_metadata(recipe_id, metadata)
# If the recipe has an image, update its EXIF metadata
from ..utils.exif_utils import ExifUtils
image_path = recipe_data.get('file_path')
if image_path and os.path.exists(image_path):
ExifUtils.append_recipe_metadata(image_path, recipe_data)
return True
except Exception as e:
import logging
logging.getLogger(__name__).error(f"Error updating recipe metadata: {e}", exc_info=True)
return False
async def update_lora_filename_by_hash(self, hash_value: str, new_file_name: str) -> Tuple[int, int]:
"""Update file_name in all recipes that contain a LoRA with the specified hash.
Args:
hash_value: The SHA256 hash value of the LoRA
new_file_name: The new file_name to set
Returns:
Tuple[int, int]: (number of recipes updated in files, number of recipes updated in cache)
"""
if not hash_value or not new_file_name:
return 0, 0
# Always use lowercase hash for consistency
hash_value = hash_value.lower()
# Get recipes directory
recipes_dir = self.recipes_dir
if not recipes_dir or not os.path.exists(recipes_dir):
logger.warning(f"Recipes directory not found: {recipes_dir}")
return 0, 0
# Check if cache is initialized
cache_initialized = self._cache is not None
cache_updated_count = 0
file_updated_count = 0
# Get all recipe JSON files in the recipes directory
recipe_files = []
for root, _, files in os.walk(recipes_dir):
for file in files:
if file.lower().endswith('.recipe.json'):
recipe_files.append(os.path.join(root, file))
# Process each recipe file
for recipe_path in recipe_files:
try:
# Load the recipe data
with open(recipe_path, 'r', encoding='utf-8') as f:
recipe_data = json.load(f)
# Skip if no loras or invalid structure
if not recipe_data or not isinstance(recipe_data, dict) or 'loras' not in recipe_data:
continue
# Check if any lora has matching hash
file_updated = False
for lora in recipe_data.get('loras', []):
if 'hash' in lora and lora['hash'].lower() == hash_value:
# Update file_name
old_file_name = lora.get('file_name', '')
lora['file_name'] = new_file_name
file_updated = True
logger.info(f"Updated file_name in recipe {recipe_path}: {old_file_name} -> {new_file_name}")
# If updated, save the file
if file_updated:
with open(recipe_path, 'w', encoding='utf-8') as f:
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
file_updated_count += 1
# Also update in cache if it exists
if cache_initialized:
recipe_id = recipe_data.get('id')
if recipe_id:
for cache_item in self._cache.raw_data:
if cache_item.get('id') == recipe_id:
# Replace loras array with updated version
cache_item['loras'] = recipe_data['loras']
cache_updated_count += 1
break
except Exception as e:
logger.error(f"Error updating recipe file {recipe_path}: {e}")
import traceback
traceback.print_exc(file=sys.stderr)
# Resort cache if updates were made
if cache_initialized and cache_updated_count > 0:
await self._cache.resort()
logger.info(f"Resorted recipe cache after updating {cache_updated_count} items")
return file_updated_count, cache_updated_count

View File

@@ -0,0 +1,124 @@
import asyncio
import logging
from typing import Optional, Dict, Any, TypeVar, Type
logger = logging.getLogger(__name__)
T = TypeVar('T') # Define a type variable for service types
class ServiceRegistry:
"""Centralized registry for service singletons"""
_instance = None
_services: Dict[str, Any] = {}
_lock = asyncio.Lock()
@classmethod
def get_instance(cls):
"""Get singleton instance of the registry"""
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
async def register_service(cls, service_name: str, service_instance: Any) -> None:
"""Register a service instance with the registry"""
registry = cls.get_instance()
async with cls._lock:
registry._services[service_name] = service_instance
logger.debug(f"Registered service: {service_name}")
@classmethod
async def get_service(cls, service_name: str) -> Any:
"""Get a service instance by name"""
registry = cls.get_instance()
async with cls._lock:
if service_name not in registry._services:
logger.debug(f"Service {service_name} not found in registry")
return None
return registry._services[service_name]
# Convenience methods for common services
@classmethod
async def get_lora_scanner(cls):
"""Get the LoraScanner instance"""
from .lora_scanner import LoraScanner
scanner = await cls.get_service("lora_scanner")
if scanner is None:
scanner = await LoraScanner.get_instance()
await cls.register_service("lora_scanner", scanner)
return scanner
@classmethod
async def get_checkpoint_scanner(cls):
"""Get the CheckpointScanner instance"""
from .checkpoint_scanner import CheckpointScanner
scanner = await cls.get_service("checkpoint_scanner")
if scanner is None:
scanner = await CheckpointScanner.get_instance()
await cls.register_service("checkpoint_scanner", scanner)
return scanner
@classmethod
async def get_lora_monitor(cls):
"""Get the LoraFileMonitor instance"""
from .file_monitor import LoraFileMonitor
monitor = await cls.get_service("lora_monitor")
if monitor is None:
monitor = await LoraFileMonitor.get_instance()
await cls.register_service("lora_monitor", monitor)
return monitor
@classmethod
async def get_checkpoint_monitor(cls):
"""Get the CheckpointFileMonitor instance"""
from .file_monitor import CheckpointFileMonitor
monitor = await cls.get_service("checkpoint_monitor")
if monitor is None:
monitor = await CheckpointFileMonitor.get_instance()
await cls.register_service("checkpoint_monitor", monitor)
return monitor
@classmethod
async def get_civitai_client(cls):
"""Get the CivitaiClient instance"""
from .civitai_client import CivitaiClient
client = await cls.get_service("civitai_client")
if client is None:
client = await CivitaiClient.get_instance()
await cls.register_service("civitai_client", client)
return client
@classmethod
async def get_download_manager(cls):
"""Get the DownloadManager instance"""
from .download_manager import DownloadManager
manager = await cls.get_service("download_manager")
if manager is None:
# We'll let DownloadManager.get_instance handle file_monitor parameter
manager = await DownloadManager.get_instance()
await cls.register_service("download_manager", manager)
return manager
@classmethod
async def get_recipe_scanner(cls):
"""Get the RecipeScanner instance"""
from .recipe_scanner import RecipeScanner
scanner = await cls.get_service("recipe_scanner")
if scanner is None:
lora_scanner = await cls.get_lora_scanner()
scanner = RecipeScanner(lora_scanner)
await cls.register_service("recipe_scanner", scanner)
return scanner
@classmethod
async def get_websocket_manager(cls):
"""Get the WebSocketManager instance"""
from .websocket_manager import ws_manager
manager = await cls.get_service("websocket_manager")
if manager is None:
# ws_manager is already a global instance in websocket_manager.py
from .websocket_manager import ws_manager
await cls.register_service("websocket_manager", ws_manager)
manager = ws_manager
return manager

View File

@@ -37,7 +37,8 @@ class SettingsManager:
def _get_default_settings(self) -> Dict[str, Any]:
"""Return default settings"""
return {
"civitai_api_key": ""
"civitai_api_key": "",
"show_only_sfw": False
}
def get(self, key: str, default: Any = None) -> Any:

View File

@@ -9,6 +9,8 @@ class WebSocketManager:
def __init__(self):
self._websockets: Set[web.WebSocketResponse] = set()
self._init_websockets: Set[web.WebSocketResponse] = set() # New set for initialization progress clients
self._checkpoint_websockets: Set[web.WebSocketResponse] = set() # New set for checkpoint download progress
async def handle_connection(self, request: web.Request) -> web.WebSocketResponse:
"""Handle new WebSocket connection"""
@@ -23,6 +25,34 @@ class WebSocketManager:
finally:
self._websockets.discard(ws)
return ws
async def handle_init_connection(self, request: web.Request) -> web.WebSocketResponse:
"""Handle new WebSocket connection for initialization progress"""
ws = web.WebSocketResponse()
await ws.prepare(request)
self._init_websockets.add(ws)
try:
async for msg in ws:
if msg.type == web.WSMsgType.ERROR:
logger.error(f'Init WebSocket error: {ws.exception()}')
finally:
self._init_websockets.discard(ws)
return ws
async def handle_checkpoint_connection(self, request: web.Request) -> web.WebSocketResponse:
"""Handle new WebSocket connection for checkpoint download progress"""
ws = web.WebSocketResponse()
await ws.prepare(request)
self._checkpoint_websockets.add(ws)
try:
async for msg in ws:
if msg.type == web.WSMsgType.ERROR:
logger.error(f'Checkpoint WebSocket error: {ws.exception()}')
finally:
self._checkpoint_websockets.discard(ws)
return ws
async def broadcast(self, data: Dict):
"""Broadcast message to all connected clients"""
@@ -34,10 +64,48 @@ class WebSocketManager:
await ws.send_json(data)
except Exception as e:
logger.error(f"Error sending progress: {e}")
async def broadcast_init_progress(self, data: Dict):
"""Broadcast initialization progress to connected clients"""
if not self._init_websockets:
return
# Ensure data has all required fields
if 'stage' not in data:
data['stage'] = 'processing'
if 'progress' not in data:
data['progress'] = 0
if 'details' not in data:
data['details'] = 'Processing...'
for ws in self._init_websockets:
try:
await ws.send_json(data)
except Exception as e:
logger.error(f"Error sending initialization progress: {e}")
async def broadcast_checkpoint_progress(self, data: Dict):
"""Broadcast checkpoint download progress to connected clients"""
if not self._checkpoint_websockets:
return
for ws in self._checkpoint_websockets:
try:
await ws.send_json(data)
except Exception as e:
logger.error(f"Error sending checkpoint progress: {e}")
def get_connected_clients_count(self) -> int:
"""Get number of connected clients"""
return len(self._websockets)
def get_init_clients_count(self) -> int:
"""Get number of initialization progress clients"""
return len(self._init_websockets)
def get_checkpoint_clients_count(self) -> int:
"""Get number of checkpoint progress clients"""
return len(self._checkpoint_websockets)
# Global instance
ws_manager = WebSocketManager()
ws_manager = WebSocketManager()

25
py/utils/constants.py Normal file
View File

@@ -0,0 +1,25 @@
NSFW_LEVELS = {
"PG": 1,
"PG13": 2,
"R": 4,
"X": 8,
"XXX": 16,
"Blocked": 32, # Probably not actually visible through the API without being logged in on model owner account?
}
# preview extensions
PREVIEW_EXTENSIONS = [
'.webp',
'.preview.webp',
'.preview.png',
'.preview.jpeg',
'.preview.jpg',
'.preview.mp4',
'.png',
'.jpeg',
'.jpg',
'.mp4'
]
# Card preview image width
CARD_PREVIEW_WIDTH = 480

315
py/utils/exif_utils.py Normal file
View File

@@ -0,0 +1,315 @@
import piexif
import json
import logging
from typing import Optional
from io import BytesIO
import os
from PIL import Image
logger = logging.getLogger(__name__)
class ExifUtils:
"""Utility functions for working with EXIF data in images"""
@staticmethod
def extract_image_metadata(image_path: str) -> Optional[str]:
"""Extract metadata from image including UserComment or parameters field
Args:
image_path (str): Path to the image file
Returns:
Optional[str]: Extracted metadata or None if not found
"""
try:
# First try to open the image
with Image.open(image_path) as img:
# Method 1: Check for parameters in image info
if hasattr(img, 'info') and 'parameters' in img.info:
return img.info['parameters']
# Method 2: Check EXIF UserComment field
if img.format not in ['JPEG', 'TIFF', 'WEBP']:
# For non-JPEG/TIFF/WEBP images, try to get EXIF through PIL
exif = img._getexif()
if exif and piexif.ExifIFD.UserComment in exif:
user_comment = exif[piexif.ExifIFD.UserComment]
if isinstance(user_comment, bytes):
if user_comment.startswith(b'UNICODE\0'):
return user_comment[8:].decode('utf-16be')
return user_comment.decode('utf-8', errors='ignore')
return user_comment
# For JPEG/TIFF/WEBP, use piexif
try:
exif_dict = piexif.load(image_path)
if piexif.ExifIFD.UserComment in exif_dict.get('Exif', {}):
user_comment = exif_dict['Exif'][piexif.ExifIFD.UserComment]
if isinstance(user_comment, bytes):
if user_comment.startswith(b'UNICODE\0'):
user_comment = user_comment[8:].decode('utf-16be')
else:
user_comment = user_comment.decode('utf-8', errors='ignore')
return user_comment
except Exception as e:
logger.debug(f"Error loading EXIF data: {e}")
# Method 3: Check PNG metadata for workflow info (for ComfyUI images)
if img.format == 'PNG':
# Look for workflow or prompt metadata in PNG chunks
for key in img.info:
if key in ['workflow', 'prompt', 'parameters']:
return img.info[key]
return None
except Exception as e:
logger.error(f"Error extracting image metadata: {e}", exc_info=True)
return None
@staticmethod
def update_image_metadata(image_path: str, metadata: str) -> str:
"""Update metadata in image's EXIF data or parameters fields
Args:
image_path (str): Path to the image file
metadata (str): Metadata string to save
Returns:
str: Path to the updated image
"""
try:
# Load the image and check its format
with Image.open(image_path) as img:
img_format = img.format
# For PNG, try to update parameters directly
if img_format == 'PNG':
# We'll save with parameters in the PNG info
info_dict = {'parameters': metadata}
img.save(image_path, format='PNG', pnginfo=info_dict)
return image_path
# For WebP format, use PIL's exif parameter directly
elif img_format == 'WEBP':
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
exif_bytes = piexif.dump(exif_dict)
# Save with the exif data
img.save(image_path, format='WEBP', exif=exif_bytes, quality=85)
return image_path
# For other formats, use standard EXIF approach
else:
try:
exif_dict = piexif.load(img.info.get('exif', b''))
except:
exif_dict = {'0th':{}, 'Exif':{}, 'GPS':{}, 'Interop':{}, '1st':{}}
# If no Exif dictionary exists, create one
if 'Exif' not in exif_dict:
exif_dict['Exif'] = {}
# Update the UserComment field - use UNICODE format
unicode_bytes = metadata.encode('utf-16be')
metadata_bytes = b'UNICODE\0' + unicode_bytes
exif_dict['Exif'][piexif.ExifIFD.UserComment] = metadata_bytes
# Convert EXIF dict back to bytes
exif_bytes = piexif.dump(exif_dict)
# Save the image with updated EXIF data
img.save(image_path, exif=exif_bytes)
return image_path
except Exception as e:
logger.error(f"Error updating metadata in {image_path}: {e}")
return image_path
@staticmethod
def append_recipe_metadata(image_path, recipe_data) -> str:
"""Append recipe metadata to an image's EXIF data"""
try:
# First, extract existing metadata
metadata = ExifUtils.extract_image_metadata(image_path)
# Check if there's already recipe metadata
if metadata:
# Remove any existing recipe metadata
metadata = ExifUtils.remove_recipe_metadata(metadata)
# Prepare simplified loras data
simplified_loras = []
for lora in recipe_data.get("loras", []):
simplified_lora = {
"file_name": lora.get("file_name", ""),
"hash": lora.get("hash", "").lower() if lora.get("hash") else "",
"strength": float(lora.get("strength", 1.0)),
"modelVersionId": lora.get("modelVersionId", ""),
"modelName": lora.get("modelName", ""),
"modelVersionName": lora.get("modelVersionName", ""),
}
simplified_loras.append(simplified_lora)
# Create recipe metadata JSON
recipe_metadata = {
'title': recipe_data.get('title', ''),
'base_model': recipe_data.get('base_model', ''),
'loras': simplified_loras,
'gen_params': recipe_data.get('gen_params', {}),
'tags': recipe_data.get('tags', [])
}
# Convert to JSON string
recipe_metadata_json = json.dumps(recipe_metadata)
# Create the recipe metadata marker
recipe_metadata_marker = f"Recipe metadata: {recipe_metadata_json}"
# Append to existing metadata or create new one
new_metadata = f"{metadata} \n {recipe_metadata_marker}" if metadata else recipe_metadata_marker
# Write back to the image
return ExifUtils.update_image_metadata(image_path, new_metadata)
except Exception as e:
logger.error(f"Error appending recipe metadata: {e}", exc_info=True)
return image_path
@staticmethod
def remove_recipe_metadata(user_comment):
"""Remove recipe metadata from user comment"""
if not user_comment:
return ""
# Find the recipe metadata marker
recipe_marker_index = user_comment.find("Recipe metadata: ")
if recipe_marker_index == -1:
return user_comment
# If recipe metadata is not at the start, remove the preceding ", "
if recipe_marker_index >= 2 and user_comment[recipe_marker_index-2:recipe_marker_index] == ", ":
recipe_marker_index -= 2
# Remove the recipe metadata part
# First, find where the metadata ends (next line or end of string)
next_line_index = user_comment.find("\n", recipe_marker_index)
if next_line_index == -1:
# Metadata is at the end of the string
return user_comment[:recipe_marker_index].rstrip()
else:
# Metadata is in the middle of the string
return user_comment[:recipe_marker_index] + user_comment[next_line_index:]
@staticmethod
def optimize_image(image_data, target_width=250, format='webp', quality=85, preserve_metadata=True):
"""
Optimize an image by resizing and converting to WebP format
Args:
image_data: Binary image data or path to image file
target_width: Width to resize the image to (preserves aspect ratio)
format: Output format (default: webp)
quality: Output quality (0-100)
preserve_metadata: Whether to preserve EXIF metadata
Returns:
Tuple of (optimized_image_data, extension)
"""
try:
# Extract metadata if needed
metadata = None
if preserve_metadata:
if isinstance(image_data, str) and os.path.exists(image_data):
# It's a file path
metadata = ExifUtils.extract_image_metadata(image_data)
img = Image.open(image_data)
else:
# It's binary data
temp_img = BytesIO(image_data)
img = Image.open(temp_img)
# Save to a temporary file to extract metadata
import tempfile
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
temp_path = temp_file.name
temp_file.write(image_data)
metadata = ExifUtils.extract_image_metadata(temp_path)
os.unlink(temp_path)
else:
# Just open the image without extracting metadata
if isinstance(image_data, str) and os.path.exists(image_data):
img = Image.open(image_data)
else:
img = Image.open(BytesIO(image_data))
# Calculate new height to maintain aspect ratio
width, height = img.size
new_height = int(height * (target_width / width))
# Resize the image
resized_img = img.resize((target_width, new_height), Image.LANCZOS)
# Save to BytesIO in the specified format
output = BytesIO()
# WebP format
if format.lower() == 'webp':
resized_img.save(output, format='WEBP', quality=quality)
extension = '.webp'
# JPEG format
elif format.lower() in ('jpg', 'jpeg'):
resized_img.save(output, format='JPEG', quality=quality)
extension = '.jpg'
# PNG format
elif format.lower() == 'png':
resized_img.save(output, format='PNG', optimize=True)
extension = '.png'
else:
# Default to WebP
resized_img.save(output, format='WEBP', quality=quality)
extension = '.webp'
# Get the optimized image data
optimized_data = output.getvalue()
# If we need to preserve metadata, write it to a temporary file
if preserve_metadata and metadata:
# For WebP format, we'll directly save with metadata
if format.lower() == 'webp':
# Create a new BytesIO with metadata
output_with_metadata = BytesIO()
# Create EXIF data with user comment
exif_dict = {'Exif': {piexif.ExifIFD.UserComment: b'UNICODE\0' + metadata.encode('utf-16be')}}
exif_bytes = piexif.dump(exif_dict)
# Save with metadata
resized_img.save(output_with_metadata, format='WEBP', exif=exif_bytes, quality=quality)
optimized_data = output_with_metadata.getvalue()
else:
# For other formats, use the temporary file approach
import tempfile
with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as temp_file:
temp_path = temp_file.name
temp_file.write(optimized_data)
# Add the metadata back
ExifUtils.update_image_metadata(temp_path, metadata)
# Read the file with metadata
with open(temp_path, 'rb') as f:
optimized_data = f.read()
# Clean up
os.unlink(temp_path)
return optimized_data, extension
except Exception as e:
logger.error(f"Error optimizing image: {e}", exc_info=True)
# Return original data if optimization fails
if isinstance(image_data, str) and os.path.exists(image_data):
with open(image_data, 'rb') as f:
return f.read(), os.path.splitext(image_data)[1]
return image_data, '.jpg'

View File

@@ -2,10 +2,14 @@ import logging
import os
import hashlib
import json
from typing import Dict, Optional
import time
from typing import Dict, Optional, Type
from .lora_metadata import extract_lora_metadata
from .models import LoraMetadata
from .model_utils import determine_base_model
from .lora_metadata import extract_lora_metadata, extract_checkpoint_metadata
from .models import BaseModelMetadata, LoraMetadata, CheckpointMetadata
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
from .exif_utils import ExifUtils
logger = logging.getLogger(__name__)
@@ -13,35 +17,56 @@ async def calculate_sha256(file_path: str) -> str:
"""Calculate SHA256 hash of a file"""
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
for byte_block in iter(lambda: f.read(128 * 1024), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def _find_preview_file(base_name: str, dir_path: str) -> str:
def find_preview_file(base_name: str, dir_path: str) -> str:
"""Find preview file for given base name in directory"""
preview_patterns = [
f"{base_name}.preview.png",
f"{base_name}.preview.jpg",
f"{base_name}.preview.jpeg",
f"{base_name}.preview.mp4",
f"{base_name}.png",
f"{base_name}.jpg",
f"{base_name}.jpeg",
f"{base_name}.mp4"
]
for pattern in preview_patterns:
full_pattern = os.path.join(dir_path, pattern)
for ext in PREVIEW_EXTENSIONS:
full_pattern = os.path.join(dir_path, f"{base_name}{ext}")
if os.path.exists(full_pattern):
# Check if this is an image and not already webp
if ext.lower().endswith(('.jpg', '.jpeg', '.png')) and not ext.lower().endswith('.webp'):
try:
# Optimize the image to webp format
webp_path = os.path.join(dir_path, f"{base_name}.webp")
# Use ExifUtils to optimize the image
with open(full_pattern, 'rb') as f:
image_data = f.read()
optimized_data, _ = ExifUtils.optimize_image(
image_data=image_data,
target_width=CARD_PREVIEW_WIDTH,
format='webp',
quality=85,
preserve_metadata=True
)
# Save the optimized webp file
with open(webp_path, 'wb') as f:
f.write(optimized_data)
logger.debug(f"Optimized preview image from {full_pattern} to {webp_path}")
return webp_path.replace(os.sep, "/")
except Exception as e:
logger.error(f"Error optimizing preview image {full_pattern}: {e}")
# Fall back to original file if optimization fails
return full_pattern.replace(os.sep, "/")
# Return the original path for webp images or non-image files
return full_pattern.replace(os.sep, "/")
return ""
def normalize_path(path: str) -> str:
"""Normalize file path to use forward slashes"""
return path.replace(os.sep, "/") if path else path
async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
"""Get basic file information as LoraMetadata object"""
async def get_file_info(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
"""Get basic file information as a model metadata object"""
# First check if file actually exists and resolve symlinks
try:
real_path = os.path.realpath(file_path)
@@ -54,26 +79,81 @@ async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
base_name = os.path.splitext(os.path.basename(file_path))[0]
dir_path = os.path.dirname(file_path)
preview_url = _find_preview_file(base_name, dir_path)
preview_url = find_preview_file(base_name, dir_path)
# Check if a .json file exists with SHA256 hash to avoid recalculation
json_path = f"{os.path.splitext(file_path)[0]}.json"
sha256 = None
if os.path.exists(json_path):
try:
with open(json_path, 'r', encoding='utf-8') as f:
json_data = json.load(f)
if 'sha256' in json_data:
sha256 = json_data['sha256'].lower()
logger.debug(f"Using SHA256 from .json file for {file_path}")
except Exception as e:
logger.error(f"Error reading .json file for {file_path}: {e}")
# If SHA256 is still not found, check for a .sha256 file
if sha256 is None:
sha256_file = f"{os.path.splitext(file_path)[0]}.sha256"
if os.path.exists(sha256_file):
try:
with open(sha256_file, 'r', encoding='utf-8') as f:
sha256 = f.read().strip().lower()
logger.debug(f"Using SHA256 from .sha256 file for {file_path}")
except Exception as e:
logger.error(f"Error reading .sha256 file for {file_path}: {e}")
try:
metadata = LoraMetadata(
file_name=base_name,
model_name=base_name,
file_path=normalize_path(file_path),
size=os.path.getsize(real_path),
modified=os.path.getmtime(real_path),
sha256=await calculate_sha256(real_path),
base_model="Unknown", # Will be updated later
usage_tips="",
notes="",
from_civitai=True,
preview_url=normalize_path(preview_url),
)
# If we didn't get SHA256 from the .json file, calculate it
if not sha256:
start_time = time.time()
sha256 = await calculate_sha256(real_path)
logger.debug(f"Calculated SHA256 for {file_path} in {time.time() - start_time:.2f} seconds")
# Create default metadata based on model class
if model_class == CheckpointMetadata:
metadata = CheckpointMetadata(
file_name=base_name,
model_name=base_name,
file_path=normalize_path(file_path),
size=os.path.getsize(real_path),
modified=os.path.getmtime(real_path),
sha256=sha256,
base_model="Unknown", # Will be updated later
preview_url=normalize_path(preview_url),
tags=[],
modelDescription="",
model_type="checkpoint"
)
# Extract checkpoint-specific metadata
# model_info = await extract_checkpoint_metadata(real_path)
# metadata.base_model = model_info['base_model']
# if 'model_type' in model_info:
# metadata.model_type = model_info['model_type']
else: # Default to LoraMetadata
metadata = LoraMetadata(
file_name=base_name,
model_name=base_name,
file_path=normalize_path(file_path),
size=os.path.getsize(real_path),
modified=os.path.getmtime(real_path),
sha256=sha256,
base_model="Unknown", # Will be updated later
usage_tips="{}",
preview_url=normalize_path(preview_url),
tags=[],
modelDescription=""
)
# Extract lora-specific metadata
model_info = await extract_lora_metadata(real_path)
metadata.base_model = model_info['base_model']
# create metadata file
base_model_info = await extract_lora_metadata(real_path)
metadata.base_model = base_model_info['base_model']
# Save metadata to file
await save_metadata(file_path, metadata)
return metadata
@@ -81,7 +161,7 @@ async def get_file_info(file_path: str) -> Optional[LoraMetadata]:
logger.error(f"Error getting file info for {file_path}: {e}")
return None
async def save_metadata(file_path: str, metadata: LoraMetadata) -> None:
async def save_metadata(file_path: str, metadata: BaseModelMetadata) -> None:
"""Save metadata to .metadata.json file"""
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
try:
@@ -94,7 +174,7 @@ async def save_metadata(file_path: str, metadata: LoraMetadata) -> None:
except Exception as e:
print(f"Error saving metadata to {metadata_path}: {str(e)}")
async def load_metadata(file_path: str) -> Optional[LoraMetadata]:
async def load_metadata(file_path: str, model_class: Type[BaseModelMetadata] = LoraMetadata) -> Optional[BaseModelMetadata]:
"""Load metadata from .metadata.json file"""
metadata_path = f"{os.path.splitext(file_path)[0]}.metadata.json"
try:
@@ -103,28 +183,61 @@ async def load_metadata(file_path: str) -> Optional[LoraMetadata]:
data = json.load(f)
needs_update = False
if data['file_path'] != normalize_path(data['file_path']):
data['file_path'] = normalize_path(data['file_path'])
# Check and normalize base model name
normalized_base_model = determine_base_model(data['base_model'])
if data['base_model'] != normalized_base_model:
data['base_model'] = normalized_base_model
needs_update = True
# Compare paths without extensions
stored_path_base = os.path.splitext(data['file_path'])[0]
current_path_base = os.path.splitext(normalize_path(file_path))[0]
if stored_path_base != current_path_base:
data['file_path'] = normalize_path(file_path)
needs_update = True
# TODO: optimize preview image to webp format if not already done
preview_url = data.get('preview_url', '')
if not preview_url or not os.path.exists(preview_url):
base_name = os.path.splitext(os.path.basename(file_path))[0]
dir_path = os.path.dirname(file_path)
new_preview_url = normalize_path(_find_preview_file(base_name, dir_path))
new_preview_url = normalize_path(find_preview_file(base_name, dir_path))
if new_preview_url != preview_url:
data['preview_url'] = new_preview_url
needs_update = True
elif preview_url != normalize_path(preview_url):
data['preview_url'] = normalize_path(preview_url)
else:
# Compare preview paths without extensions
stored_preview_base = os.path.splitext(preview_url)[0]
current_preview_base = os.path.splitext(normalize_path(preview_url))[0]
if stored_preview_base != current_preview_base:
data['preview_url'] = normalize_path(preview_url)
needs_update = True
# Ensure all fields are present
if 'tags' not in data:
data['tags'] = []
needs_update = True
if 'modelDescription' not in data:
data['modelDescription'] = ""
needs_update = True
# For checkpoint metadata
if model_class == CheckpointMetadata and 'model_type' not in data:
data['model_type'] = "checkpoint"
needs_update = True
# For lora metadata
if model_class == LoraMetadata and 'usage_tips' not in data:
data['usage_tips'] = "{}"
needs_update = True
if needs_update:
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(data, f, indent=2, ensure_ascii=False)
return LoraMetadata.from_dict(data)
return model_class.from_dict(data)
except Exception as e:
print(f"Error loading metadata from {metadata_path}: {str(e)}")

View File

@@ -1,6 +1,7 @@
from safetensors import safe_open
from typing import Dict
from .model_utils import determine_base_model
import os
async def extract_lora_metadata(file_path: str) -> Dict:
"""Extract essential metadata from safetensors file"""
@@ -13,4 +14,67 @@ async def extract_lora_metadata(file_path: str) -> Dict:
return {"base_model": base_model}
except Exception as e:
print(f"Error reading metadata from {file_path}: {str(e)}")
return {"base_model": "Unknown"}
return {"base_model": "Unknown"}
async def extract_checkpoint_metadata(file_path: str) -> dict:
"""Extract metadata from a checkpoint file to determine model type and base model"""
try:
# Analyze filename for clues about the model
filename = os.path.basename(file_path).lower()
model_info = {
'base_model': 'Unknown',
'model_type': 'checkpoint'
}
# Detect base model from filename
if 'xl' in filename or 'sdxl' in filename:
model_info['base_model'] = 'SDXL'
elif 'sd3' in filename:
model_info['base_model'] = 'SD3'
elif 'sd2' in filename or 'v2' in filename:
model_info['base_model'] = 'SD2.x'
elif 'sd1' in filename or 'v1' in filename:
model_info['base_model'] = 'SD1.5'
# Detect model type from filename
if 'inpaint' in filename:
model_info['model_type'] = 'inpainting'
elif 'anime' in filename:
model_info['model_type'] = 'anime'
elif 'realistic' in filename:
model_info['model_type'] = 'realistic'
# Try to peek at the safetensors file structure if available
if file_path.endswith('.safetensors'):
import json
import struct
with open(file_path, 'rb') as f:
header_size = struct.unpack('<Q', f.read(8))[0]
header_json = f.read(header_size)
header = json.loads(header_json)
# Look for specific keys to identify model type
metadata = header.get('__metadata__', {})
if metadata:
# Try to determine if it's SDXL
if any(key.startswith('conditioner.embedders.1') for key in header):
model_info['base_model'] = 'SDXL'
# Look for model type info
if metadata.get('modelspec.architecture') == 'SD-XL':
model_info['base_model'] = 'SDXL'
elif metadata.get('modelspec.architecture') == 'SD-3':
model_info['base_model'] = 'SD3'
# Check for specific use case
if metadata.get('modelspec.purpose') == 'inpainting':
model_info['model_type'] = 'inpainting'
return model_info
except Exception as e:
logger.error(f"Error extracting checkpoint metadata for {file_path}: {e}")
# Return default values
return {'base_model': 'Unknown', 'model_type': 'checkpoint'}

View File

@@ -2,13 +2,15 @@ from typing import Optional
# Base model mapping based on version string
BASE_MODEL_MAPPING = {
"sd_1.5": "SD 1.5",
"sd-v1-5": "SD 1.5",
"sd-v2-1": "SD 2.1",
"sdxl": "SDXL 1.0",
"sd-v2": "SD 2.0",
"flux1": "Flux.1 D",
"flux.1 d": "Flux.1 D",
"illustrious": "IL",
"illustrious": "Illustrious",
"il": "Illustrious",
"pony": "Pony",
"Hunyuan Video": "Hunyuan Video"
}

View File

@@ -1,51 +1,38 @@
from dataclasses import dataclass, asdict
from typing import Dict, Optional
from typing import Dict, Optional, List
from datetime import datetime
import os
from .model_utils import determine_base_model
@dataclass
class LoraMetadata:
"""Represents the metadata structure for a Lora model"""
file_name: str # The filename without extension of the lora
model_name: str # The lora's name defined by the creator, initially same as file_name
file_path: str # Full path to the safetensors file
class BaseModelMetadata:
"""Base class for all model metadata structures"""
file_name: str # The filename without extension
model_name: str # The model's name defined by the creator
file_path: str # Full path to the model file
size: int # File size in bytes
modified: float # Last modified timestamp
sha256: str # SHA256 hash of the file
base_model: str # Base model (SD1.5/SD2.1/SDXL/etc.)
base_model: str # Base model type (SD1.5/SD2.1/SDXL/etc.)
preview_url: str # Preview image URL
usage_tips: str = "{}" # Usage tips for the model, json string
preview_nsfw_level: int = 0 # NSFW level of the preview image
notes: str = "" # Additional notes
from_civitai: bool = True # Whether the lora is from Civitai
from_civitai: bool = True # Whether from Civitai
civitai: Optional[Dict] = None # Civitai API data if available
tags: List[str] = None # Model tags
modelDescription: str = "" # Full model description
def __post_init__(self):
# Initialize empty lists to avoid mutable default parameter issue
if self.tags is None:
self.tags = []
@classmethod
def from_dict(cls, data: Dict) -> 'LoraMetadata':
"""Create LoraMetadata instance from dictionary"""
# Create a copy of the data to avoid modifying the input
def from_dict(cls, data: Dict) -> 'BaseModelMetadata':
"""Create instance from dictionary"""
data_copy = data.copy()
return cls(**data_copy)
@classmethod
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
"""Create LoraMetadata instance from Civitai version info"""
file_name = file_info['name']
base_model = determine_base_model(version_info.get('baseModel', ''))
return cls(
file_name=os.path.splitext(file_name)[0],
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
file_path=save_path.replace(os.sep, '/'),
size=file_info.get('sizeKB', 0) * 1024,
modified=datetime.now().timestamp(),
sha256=file_info['hashes'].get('SHA256', ''),
base_model=base_model,
preview_url=None, # Will be updated after preview download
from_civitai=True,
civitai=version_info
)
def to_dict(self) -> Dict:
"""Convert to dictionary for JSON serialization"""
return asdict(self)
@@ -66,3 +53,55 @@ class LoraMetadata:
self.modified = os.path.getmtime(file_path)
self.file_path = file_path.replace(os.sep, '/')
@dataclass
class LoraMetadata(BaseModelMetadata):
"""Represents the metadata structure for a Lora model"""
usage_tips: str = "{}" # Usage tips for the model, json string
@classmethod
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'LoraMetadata':
"""Create LoraMetadata instance from Civitai version info"""
file_name = file_info['name']
base_model = determine_base_model(version_info.get('baseModel', ''))
return cls(
file_name=os.path.splitext(file_name)[0],
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
file_path=save_path.replace(os.sep, '/'),
size=file_info.get('sizeKB', 0) * 1024,
modified=datetime.now().timestamp(),
sha256=file_info['hashes'].get('SHA256', '').lower(),
base_model=base_model,
preview_url=None, # Will be updated after preview download
preview_nsfw_level=0, # Will be updated after preview download
from_civitai=True,
civitai=version_info
)
@dataclass
class CheckpointMetadata(BaseModelMetadata):
"""Represents the metadata structure for a Checkpoint model"""
model_type: str = "checkpoint" # Model type (checkpoint, inpainting, etc.)
@classmethod
def from_civitai_info(cls, version_info: Dict, file_info: Dict, save_path: str) -> 'CheckpointMetadata':
"""Create CheckpointMetadata instance from Civitai version info"""
file_name = file_info['name']
base_model = determine_base_model(version_info.get('baseModel', ''))
model_type = version_info.get('type', 'checkpoint')
return cls(
file_name=os.path.splitext(file_name)[0],
model_name=version_info.get('model').get('name', os.path.splitext(file_name)[0]),
file_path=save_path.replace(os.sep, '/'),
size=file_info.get('sizeKB', 0) * 1024,
modified=datetime.now().timestamp(),
sha256=file_info['hashes'].get('SHA256', '').lower(),
base_model=base_model,
preview_url=None, # Will be updated after preview download
preview_nsfw_level=0,
from_civitai=True,
civitai=version_info,
model_type=model_type
)

1083
py/utils/recipe_parsers.py Normal file

File diff suppressed because it is too large Load Diff

503
py/utils/routes_common.py Normal file
View File

@@ -0,0 +1,503 @@
import os
import json
import logging
from typing import Dict, List, Callable, Awaitable
from aiohttp import web
from .model_utils import determine_base_model
from .constants import PREVIEW_EXTENSIONS, CARD_PREVIEW_WIDTH
from ..config import config
from ..services.civitai_client import CivitaiClient
from ..utils.exif_utils import ExifUtils
from ..services.download_manager import DownloadManager
logger = logging.getLogger(__name__)
class ModelRouteUtils:
"""Shared utilities for model routes (LoRAs, Checkpoints, etc.)"""
@staticmethod
async def load_local_metadata(metadata_path: str) -> Dict:
"""Load local metadata file"""
if os.path.exists(metadata_path):
try:
with open(metadata_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
logger.error(f"Error loading metadata from {metadata_path}: {e}")
return {}
@staticmethod
async def handle_not_found_on_civitai(metadata_path: str, local_metadata: Dict) -> None:
"""Handle case when model is not found on CivitAI"""
local_metadata['from_civitai'] = False
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
@staticmethod
async def update_model_metadata(metadata_path: str, local_metadata: Dict,
civitai_metadata: Dict, client: CivitaiClient) -> None:
"""Update local metadata with CivitAI data"""
local_metadata['civitai'] = civitai_metadata
# Update model name if available
if 'model' in civitai_metadata:
if civitai_metadata.get('model', {}).get('name'):
local_metadata['model_name'] = civitai_metadata['model']['name']
# Fetch additional model metadata (description and tags) if we have model ID
model_id = civitai_metadata['modelId']
if model_id:
model_metadata, _ = await client.get_model_metadata(str(model_id))
if model_metadata:
local_metadata['modelDescription'] = model_metadata.get('description', '')
local_metadata['tags'] = model_metadata.get('tags', [])
# Update base model
local_metadata['base_model'] = determine_base_model(civitai_metadata.get('baseModel'))
# Update preview if needed
if not local_metadata.get('preview_url') or not os.path.exists(local_metadata['preview_url']):
first_preview = next((img for img in civitai_metadata.get('images', [])), None)
if first_preview:
# Determine if content is video or image
is_video = first_preview['type'] == 'video'
if is_video:
# For videos use .mp4 extension
preview_ext = '.mp4'
else:
# For images use .webp extension
preview_ext = '.webp'
base_name = os.path.splitext(os.path.splitext(os.path.basename(metadata_path))[0])[0]
preview_filename = base_name + preview_ext
preview_path = os.path.join(os.path.dirname(metadata_path), preview_filename)
if is_video:
# Download video as is
if await client.download_preview_image(first_preview['url'], preview_path):
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
else:
# For images, download and then optimize to WebP
temp_path = preview_path + ".temp"
if await client.download_preview_image(first_preview['url'], temp_path):
try:
# Read the downloaded image
with open(temp_path, 'rb') as f:
image_data = f.read()
# Optimize and convert to WebP
optimized_data, _ = ExifUtils.optimize_image(
image_data=image_data,
target_width=CARD_PREVIEW_WIDTH,
format='webp',
quality=85,
preserve_metadata=True
)
# Save the optimized WebP image
with open(preview_path, 'wb') as f:
f.write(optimized_data)
# Update metadata
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
# Remove the temporary file
if os.path.exists(temp_path):
os.remove(temp_path)
except Exception as e:
logger.error(f"Error optimizing preview image: {e}")
# If optimization fails, try to use the downloaded image directly
if os.path.exists(temp_path):
os.rename(temp_path, preview_path)
local_metadata['preview_url'] = preview_path.replace(os.sep, '/')
local_metadata['preview_nsfw_level'] = first_preview.get('nsfwLevel', 0)
# Save updated metadata
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
@staticmethod
async def fetch_and_update_model(
sha256: str,
file_path: str,
model_data: dict,
update_cache_func: Callable[[str, str, Dict], Awaitable[bool]]
) -> bool:
"""Fetch and update metadata for a single model
Args:
sha256: SHA256 hash of the model file
file_path: Path to the model file
model_data: The model object in cache to update
update_cache_func: Function to update the cache with new metadata
Returns:
bool: True if successful, False otherwise
"""
client = CivitaiClient()
try:
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
# Check if model metadata exists
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
# Fetch metadata from Civitai
civitai_metadata = await client.get_model_by_hash(sha256)
if not civitai_metadata:
# Mark as not from CivitAI if not found
local_metadata['from_civitai'] = False
model_data['from_civitai'] = False
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(local_metadata, f, indent=2, ensure_ascii=False)
return False
# Update metadata
await ModelRouteUtils.update_model_metadata(
metadata_path,
local_metadata,
civitai_metadata,
client
)
# Update cache object directly
model_data.update({
'model_name': local_metadata.get('model_name'),
'preview_url': local_metadata.get('preview_url'),
'from_civitai': True,
'civitai': civitai_metadata
})
# Update cache using the provided function
await update_cache_func(file_path, file_path, local_metadata)
return True
except Exception as e:
logger.error(f"Error fetching CivitAI data: {e}")
return False
finally:
await client.close()
@staticmethod
def filter_civitai_data(data: Dict) -> Dict:
"""Filter relevant fields from CivitAI data"""
if not data:
return {}
fields = [
"id", "modelId", "name", "createdAt", "updatedAt",
"publishedAt", "trainedWords", "baseModel", "description",
"model", "images"
]
return {k: data[k] for k in fields if k in data}
@staticmethod
async def delete_model_files(target_dir: str, file_name: str, file_monitor=None) -> List[str]:
"""Delete model and associated files
Args:
target_dir: Directory containing the model files
file_name: Base name of the model file without extension
file_monitor: Optional file monitor to ignore delete events
Returns:
List of deleted file paths
"""
patterns = [
f"{file_name}.safetensors", # Required
f"{file_name}.metadata.json",
]
# Add all preview file extensions
for ext in PREVIEW_EXTENSIONS:
patterns.append(f"{file_name}{ext}")
deleted = []
main_file = patterns[0]
main_path = os.path.join(target_dir, main_file).replace(os.sep, '/')
if os.path.exists(main_path):
# Notify file monitor to ignore delete event if available
if file_monitor:
file_monitor.handler.add_ignore_path(main_path, 0)
# Delete file
os.remove(main_path)
deleted.append(main_path)
else:
logger.warning(f"Model file not found: {main_file}")
# Delete optional files
for pattern in patterns[1:]:
path = os.path.join(target_dir, pattern)
if os.path.exists(path):
try:
os.remove(path)
deleted.append(pattern)
except Exception as e:
logger.warning(f"Failed to delete {pattern}: {e}")
return deleted
@staticmethod
def get_multipart_ext(filename):
"""Get extension that may have multiple parts like .metadata.json"""
parts = filename.split(".")
if len(parts) > 2: # If contains multi-part extension
return "." + ".".join(parts[-2:]) # Take the last two parts, like ".metadata.json"
return os.path.splitext(filename)[1] # Otherwise take the regular extension, like ".safetensors"
# New common endpoint handlers
@staticmethod
async def handle_delete_model(request: web.Request, scanner) -> web.Response:
"""Handle model deletion request
Args:
request: The aiohttp request
scanner: The model scanner instance with cache management methods
Returns:
web.Response: The HTTP response
"""
try:
data = await request.json()
file_path = data.get('file_path')
if not file_path:
return web.Response(text='Model path is required', status=400)
target_dir = os.path.dirname(file_path)
file_name = os.path.splitext(os.path.basename(file_path))[0]
# Get the file monitor from the scanner if available
file_monitor = getattr(scanner, 'file_monitor', None)
deleted_files = await ModelRouteUtils.delete_model_files(
target_dir,
file_name,
file_monitor
)
# Remove from cache
cache = await scanner.get_cached_data()
cache.raw_data = [item for item in cache.raw_data if item['file_path'] != file_path]
await cache.resort()
# Update hash index if available
if hasattr(scanner, '_hash_index') and scanner._hash_index:
scanner._hash_index.remove_by_path(file_path)
return web.json_response({
'success': True,
'deleted_files': deleted_files
})
except Exception as e:
logger.error(f"Error deleting model: {e}", exc_info=True)
return web.Response(text=str(e), status=500)
@staticmethod
async def handle_fetch_civitai(request: web.Request, scanner) -> web.Response:
"""Handle CivitAI metadata fetch request
Args:
request: The aiohttp request
scanner: The model scanner instance with cache management methods
Returns:
web.Response: The HTTP response
"""
try:
data = await request.json()
metadata_path = os.path.splitext(data['file_path'])[0] + '.metadata.json'
# Check if model metadata exists
local_metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
if not local_metadata or not local_metadata.get('sha256'):
return web.json_response({"success": False, "error": "No SHA256 hash found"}, status=400)
# Create a client for fetching from Civitai
client = CivitaiClient()
try:
# Fetch and update metadata
civitai_metadata = await client.get_model_by_hash(local_metadata["sha256"])
if not civitai_metadata:
await ModelRouteUtils.handle_not_found_on_civitai(metadata_path, local_metadata)
return web.json_response({"success": False, "error": "Not found on CivitAI"}, status=404)
await ModelRouteUtils.update_model_metadata(metadata_path, local_metadata, civitai_metadata, client)
# Update the cache
await scanner.update_single_model_cache(data['file_path'], data['file_path'], local_metadata)
return web.json_response({"success": True})
finally:
await client.close()
except Exception as e:
logger.error(f"Error fetching from CivitAI: {e}", exc_info=True)
return web.json_response({"success": False, "error": str(e)}, status=500)
@staticmethod
async def handle_replace_preview(request: web.Request, scanner) -> web.Response:
"""Handle preview image replacement request
Args:
request: The aiohttp request
scanner: The model scanner instance with methods to update cache
Returns:
web.Response: The HTTP response
"""
try:
reader = await request.multipart()
# Read preview file data
field = await reader.next()
if field.name != 'preview_file':
raise ValueError("Expected 'preview_file' field")
content_type = field.headers.get('Content-Type', 'image/png')
preview_data = await field.read()
# Read model path
field = await reader.next()
if field.name != 'model_path':
raise ValueError("Expected 'model_path' field")
model_path = (await field.read()).decode()
# Save preview file
base_name = os.path.splitext(os.path.basename(model_path))[0]
folder = os.path.dirname(model_path)
# Determine if content is video or image
if content_type.startswith('video/'):
# For videos, keep original format and use .mp4 extension
extension = '.mp4'
optimized_data = preview_data
else:
# For images, optimize and convert to WebP
optimized_data, _ = ExifUtils.optimize_image(
image_data=preview_data,
target_width=CARD_PREVIEW_WIDTH,
format='webp',
quality=85,
preserve_metadata=True
)
extension = '.webp' # Use .webp without .preview part
preview_path = os.path.join(folder, base_name + extension).replace(os.sep, '/')
with open(preview_path, 'wb') as f:
f.write(optimized_data)
# Update preview path in metadata
metadata_path = os.path.splitext(model_path)[0] + '.metadata.json'
if os.path.exists(metadata_path):
try:
with open(metadata_path, 'r', encoding='utf-8') as f:
metadata = json.load(f)
# Update preview_url directly in the metadata dict
metadata['preview_url'] = preview_path
with open(metadata_path, 'w', encoding='utf-8') as f:
json.dump(metadata, f, indent=2, ensure_ascii=False)
except Exception as e:
logger.error(f"Error updating metadata: {e}")
# Update preview URL in scanner cache
if hasattr(scanner, 'update_preview_in_cache'):
await scanner.update_preview_in_cache(model_path, preview_path)
return web.json_response({
"success": True,
"preview_url": config.get_preview_static_url(preview_path)
})
except Exception as e:
logger.error(f"Error replacing preview: {e}", exc_info=True)
return web.Response(text=str(e), status=500)
@staticmethod
async def handle_download_model(request: web.Request, download_manager: DownloadManager, model_type="lora") -> web.Response:
"""Handle model download request
Args:
request: The aiohttp request
download_manager: Instance of DownloadManager
model_type: Type of model ('lora' or 'checkpoint')
Returns:
web.Response: The HTTP response
"""
try:
data = await request.json()
# Create progress callback
async def progress_callback(progress):
from ..services.websocket_manager import ws_manager
await ws_manager.broadcast({
'status': 'progress',
'progress': progress
})
# Check which identifier is provided
download_url = data.get('download_url')
model_hash = data.get('model_hash')
model_version_id = data.get('model_version_id')
# Validate that at least one identifier is provided
if not any([download_url, model_hash, model_version_id]):
return web.Response(
status=400,
text="Missing required parameter: Please provide either 'download_url', 'hash', or 'modelVersionId'"
)
# Use the correct root directory based on model type
root_key = 'checkpoint_root' if model_type == 'checkpoint' else 'lora_root'
save_dir = data.get(root_key)
result = await download_manager.download_from_civitai(
download_url=download_url,
model_hash=model_hash,
model_version_id=model_version_id,
save_dir=save_dir,
relative_path=data.get('relative_path', ''),
progress_callback=progress_callback,
model_type=model_type
)
if not result.get('success', False):
error_message = result.get('error', 'Unknown error')
# Return 401 for early access errors
if 'early access' in error_message.lower():
logger.warning(f"Early access download failed: {error_message}")
return web.Response(
status=401, # Use 401 status code to match Civitai's response
text=f"Early Access Restriction: {error_message}"
)
return web.Response(status=500, text=error_message)
return web.json_response(result)
except Exception as e:
error_message = str(e)
# Check if this might be an early access error
if '401' in error_message:
logger.warning(f"Early access error (401): {error_message}")
return web.Response(
status=401,
text="Early Access Restriction: This model requires purchase. Please buy early access on Civitai.com."
)
logger.error(f"Error downloading {model_type}: {error_message}")
return web.Response(status=500, text=error_message)

116
py/utils/utils.py Normal file
View File

@@ -0,0 +1,116 @@
from difflib import SequenceMatcher
import requests
import tempfile
import re
from bs4 import BeautifulSoup
def download_twitter_image(url):
"""Download image from a URL containing twitter:image meta tag
Args:
url (str): The URL to download image from
Returns:
str: Path to downloaded temporary image file
"""
try:
# Download page content
response = requests.get(url)
response.raise_for_status()
# Parse HTML
soup = BeautifulSoup(response.text, 'html.parser')
# Find twitter:image meta tag
meta_tag = soup.find('meta', attrs={'property': 'twitter:image'})
if not meta_tag:
return None
image_url = meta_tag['content']
# Download image
image_response = requests.get(image_url)
image_response.raise_for_status()
# Save to temp file
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
temp_file.write(image_response.content)
return temp_file.name
except Exception as e:
print(f"Error downloading twitter image: {e}")
return None
def download_civitai_image(url):
"""Download image from a URL containing avatar image with specific class and style attributes
Args:
url (str): The URL to download image from
Returns:
str: Path to downloaded temporary image file
"""
try:
# Download page content
response = requests.get(url)
response.raise_for_status()
# Parse HTML
soup = BeautifulSoup(response.text, 'html.parser')
# Find image with specific class and style attributes
image = soup.select_one('img.EdgeImage_image__iH4_q.max-h-full.w-auto.max-w-full')
if not image or 'src' not in image.attrs:
return None
image_url = image['src']
# Download image
image_response = requests.get(image_url)
image_response.raise_for_status()
# Save to temp file
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
temp_file.write(image_response.content)
return temp_file.name
except Exception as e:
print(f"Error downloading civitai avatar: {e}")
return None
def fuzzy_match(text: str, pattern: str, threshold: float = 0.7) -> bool:
"""
Check if text matches pattern using fuzzy matching.
Returns True if similarity ratio is above threshold.
"""
if not pattern or not text:
return False
# Convert both to lowercase for case-insensitive matching
text = text.lower()
pattern = pattern.lower()
# Split pattern into words
search_words = pattern.split()
# Check each word
for word in search_words:
# First check if word is a substring (faster)
if word in text:
continue
# If not found as substring, try fuzzy matching
# Check if any part of the text matches this word
found_match = False
for text_part in text.split():
ratio = SequenceMatcher(None, text_part, word).ratio()
if ratio >= threshold:
found_match = True
break
if not found_match:
return False
# All words found either as substrings or fuzzy matches
return True

3
py/workflow/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
"""
ComfyUI workflow parsing module to extract generation parameters
"""

58
py/workflow/cli.py Normal file
View File

@@ -0,0 +1,58 @@
"""
Command-line interface for the ComfyUI workflow parser
"""
import argparse
import json
import os
import logging
import sys
from .parser import parse_workflow
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
def main():
"""Entry point for the CLI"""
parser = argparse.ArgumentParser(description='Parse ComfyUI workflow files')
parser.add_argument('input', help='Input workflow JSON file path')
parser.add_argument('-o', '--output', help='Output JSON file path')
parser.add_argument('-p', '--pretty', action='store_true', help='Pretty print JSON output')
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
args = parser.parse_args()
# Set logging level
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
# Validate input file
if not os.path.isfile(args.input):
logger.error(f"Input file not found: {args.input}")
sys.exit(1)
# Parse workflow
try:
result = parse_workflow(args.input, args.output)
# Print result to console if output file not specified
if not args.output:
if args.pretty:
print(json.dumps(result, indent=4))
else:
print(json.dumps(result))
else:
logger.info(f"Output saved to: {args.output}")
except Exception as e:
logger.error(f"Error parsing workflow: {e}")
if args.debug:
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,3 @@
"""
Extension directory for custom node mappers
"""

View File

@@ -0,0 +1,285 @@
"""
ComfyUI Core nodes mappers extension for workflow parsing
"""
import logging
from typing import Dict, Any, List
logger = logging.getLogger(__name__)
# =============================================================================
# Transform Functions
# =============================================================================
def transform_random_noise(inputs: Dict) -> Dict:
"""Transform function for RandomNoise node"""
return {"seed": str(inputs.get("noise_seed", ""))}
def transform_ksampler_select(inputs: Dict) -> Dict:
"""Transform function for KSamplerSelect node"""
return {"sampler": inputs.get("sampler_name", "")}
def transform_basic_scheduler(inputs: Dict) -> Dict:
"""Transform function for BasicScheduler node"""
result = {
"scheduler": inputs.get("scheduler", ""),
"denoise": str(inputs.get("denoise", "1.0"))
}
# Get steps from inputs or steps input
if "steps" in inputs:
if isinstance(inputs["steps"], str):
result["steps"] = inputs["steps"]
elif isinstance(inputs["steps"], dict) and "value" in inputs["steps"]:
result["steps"] = str(inputs["steps"]["value"])
else:
result["steps"] = str(inputs["steps"])
return result
def transform_basic_guider(inputs: Dict) -> Dict:
"""Transform function for BasicGuider node"""
result = {}
# Process conditioning
if "conditioning" in inputs:
if isinstance(inputs["conditioning"], str):
result["prompt"] = inputs["conditioning"]
elif isinstance(inputs["conditioning"], dict):
result["conditioning"] = inputs["conditioning"]
# Get model information if needed
if "model" in inputs and isinstance(inputs["model"], dict):
result["model"] = inputs["model"]
return result
def transform_model_sampling_flux(inputs: Dict) -> Dict:
"""Transform function for ModelSamplingFlux - mostly a pass-through node"""
# This node is primarily used for routing, so we mostly pass through values
return inputs["model"]
def transform_sampler_custom_advanced(inputs: Dict) -> Dict:
"""Transform function for SamplerCustomAdvanced node"""
result = {}
# Extract seed from noise
if "noise" in inputs and isinstance(inputs["noise"], dict):
result["seed"] = str(inputs["noise"].get("seed", ""))
# Extract sampler info
if "sampler" in inputs and isinstance(inputs["sampler"], dict):
sampler = inputs["sampler"].get("sampler", "")
if sampler:
result["sampler"] = sampler
# Extract scheduler, steps, denoise from sigmas
if "sigmas" in inputs and isinstance(inputs["sigmas"], dict):
sigmas = inputs["sigmas"]
result["scheduler"] = sigmas.get("scheduler", "")
result["steps"] = str(sigmas.get("steps", ""))
result["denoise"] = str(sigmas.get("denoise", "1.0"))
# Extract prompt and guidance from guider
if "guider" in inputs and isinstance(inputs["guider"], dict):
guider = inputs["guider"]
# Get prompt from conditioning
if "conditioning" in guider and isinstance(guider["conditioning"], str):
result["prompt"] = guider["conditioning"]
elif "conditioning" in guider and isinstance(guider["conditioning"], dict):
result["guidance"] = guider["conditioning"].get("guidance", "")
result["prompt"] = guider["conditioning"].get("prompt", "")
if "model" in guider and isinstance(guider["model"], dict):
result["checkpoint"] = guider["model"].get("checkpoint", "")
result["loras"] = guider["model"].get("loras", "")
result["clip_skip"] = str(int(guider["model"].get("clip_skip", "-1")) * -1)
# Extract dimensions from latent_image
if "latent_image" in inputs and isinstance(inputs["latent_image"], dict):
latent = inputs["latent_image"]
width = latent.get("width", 0)
height = latent.get("height", 0)
if width and height:
result["width"] = width
result["height"] = height
result["size"] = f"{width}x{height}"
return result
def transform_ksampler(inputs: Dict) -> Dict:
"""Transform function for KSampler nodes"""
result = {
"seed": str(inputs.get("seed", "")),
"steps": str(inputs.get("steps", "")),
"cfg": str(inputs.get("cfg", "")),
"sampler": inputs.get("sampler_name", ""),
"scheduler": inputs.get("scheduler", ""),
}
# Process positive prompt
if "positive" in inputs:
result["prompt"] = inputs["positive"]
# Process negative prompt
if "negative" in inputs:
result["negative_prompt"] = inputs["negative"]
# Get dimensions from latent image
if "latent_image" in inputs and isinstance(inputs["latent_image"], dict):
width = inputs["latent_image"].get("width", 0)
height = inputs["latent_image"].get("height", 0)
if width and height:
result["size"] = f"{width}x{height}"
# Add clip_skip if present
if "clip_skip" in inputs:
result["clip_skip"] = str(inputs.get("clip_skip", ""))
# Add guidance if present
if "guidance" in inputs:
result["guidance"] = str(inputs.get("guidance", ""))
# Add model if present
if "model" in inputs:
result["checkpoint"] = inputs.get("model", {}).get("checkpoint", "")
result["loras"] = inputs.get("model", {}).get("loras", "")
result["clip_skip"] = str(inputs.get("model", {}).get("clip_skip", -1) * -1)
return result
def transform_empty_latent(inputs: Dict) -> Dict:
"""Transform function for EmptyLatentImage nodes"""
width = inputs.get("width", 0)
height = inputs.get("height", 0)
return {"width": width, "height": height, "size": f"{width}x{height}"}
def transform_clip_text(inputs: Dict) -> Any:
"""Transform function for CLIPTextEncode nodes"""
return inputs.get("text", "")
def transform_flux_guidance(inputs: Dict) -> Dict:
"""Transform function for FluxGuidance nodes"""
result = {}
if "guidance" in inputs:
result["guidance"] = inputs["guidance"]
if "conditioning" in inputs:
conditioning = inputs["conditioning"]
if isinstance(conditioning, str):
result["prompt"] = conditioning
else:
result["prompt"] = "Unknown prompt"
return result
def transform_unet_loader(inputs: Dict) -> Dict:
"""Transform function for UNETLoader node"""
unet_name = inputs.get("unet_name", "")
return {"checkpoint": unet_name} if unet_name else {}
def transform_checkpoint_loader(inputs: Dict) -> Dict:
"""Transform function for CheckpointLoaderSimple node"""
ckpt_name = inputs.get("ckpt_name", "")
return {"checkpoint": ckpt_name} if ckpt_name else {}
def transform_latent_upscale_by(inputs: Dict) -> Dict:
"""Transform function for LatentUpscaleBy node"""
result = {}
width = inputs["samples"].get("width", 0) * inputs["scale_by"]
height = inputs["samples"].get("height", 0) * inputs["scale_by"]
result["width"] = width
result["height"] = height
result["size"] = f"{width}x{height}"
return result
def transform_clip_set_last_layer(inputs: Dict) -> Dict:
"""Transform function for CLIPSetLastLayer node"""
result = {}
if "stop_at_clip_layer" in inputs:
result["clip_skip"] = inputs["stop_at_clip_layer"]
return result
# =============================================================================
# Node Mapper Definitions
# =============================================================================
# Define the mappers for ComfyUI core nodes not in main mapper
NODE_MAPPERS_EXT = {
# KSamplers
"SamplerCustomAdvanced": {
"inputs_to_track": ["noise", "guider", "sampler", "sigmas", "latent_image"],
"transform_func": transform_sampler_custom_advanced
},
"KSampler": {
"inputs_to_track": [
"seed", "steps", "cfg", "sampler_name", "scheduler",
"denoise", "positive", "negative", "latent_image",
"model", "clip_skip"
],
"transform_func": transform_ksampler
},
# ComfyUI core nodes
"EmptyLatentImage": {
"inputs_to_track": ["width", "height", "batch_size"],
"transform_func": transform_empty_latent
},
"EmptySD3LatentImage": {
"inputs_to_track": ["width", "height", "batch_size"],
"transform_func": transform_empty_latent
},
"CLIPTextEncode": {
"inputs_to_track": ["text", "clip"],
"transform_func": transform_clip_text
},
"FluxGuidance": {
"inputs_to_track": ["guidance", "conditioning"],
"transform_func": transform_flux_guidance
},
"RandomNoise": {
"inputs_to_track": ["noise_seed"],
"transform_func": transform_random_noise
},
"KSamplerSelect": {
"inputs_to_track": ["sampler_name"],
"transform_func": transform_ksampler_select
},
"BasicScheduler": {
"inputs_to_track": ["scheduler", "steps", "denoise", "model"],
"transform_func": transform_basic_scheduler
},
"BasicGuider": {
"inputs_to_track": ["model", "conditioning"],
"transform_func": transform_basic_guider
},
"ModelSamplingFlux": {
"inputs_to_track": ["max_shift", "base_shift", "width", "height", "model"],
"transform_func": transform_model_sampling_flux
},
"UNETLoader": {
"inputs_to_track": ["unet_name"],
"transform_func": transform_unet_loader
},
"CheckpointLoaderSimple": {
"inputs_to_track": ["ckpt_name"],
"transform_func": transform_checkpoint_loader
},
"LatentUpscale": {
"inputs_to_track": ["width", "height"],
"transform_func": transform_empty_latent
},
"LatentUpscaleBy": {
"inputs_to_track": ["samples", "scale_by"],
"transform_func": transform_latent_upscale_by
},
"CLIPSetLastLayer": {
"inputs_to_track": ["clip", "stop_at_clip_layer"],
"transform_func": transform_clip_set_last_layer
}
}

View File

@@ -0,0 +1,74 @@
"""
KJNodes mappers extension for ComfyUI workflow parsing
"""
import logging
import re
from typing import Dict, Any
logger = logging.getLogger(__name__)
# =============================================================================
# Transform Functions
# =============================================================================
def transform_join_strings(inputs: Dict) -> str:
"""Transform function for JoinStrings nodes"""
string1 = inputs.get("string1", "")
string2 = inputs.get("string2", "")
delimiter = inputs.get("delimiter", "")
return f"{string1}{delimiter}{string2}"
def transform_string_constant(inputs: Dict) -> str:
"""Transform function for StringConstant nodes"""
return inputs.get("string", "")
def transform_empty_latent_presets(inputs: Dict) -> Dict:
"""Transform function for EmptyLatentImagePresets nodes"""
dimensions = inputs.get("dimensions", "")
invert = inputs.get("invert", False)
# Extract width and height from dimensions string
# Expected format: "width x height (ratio)" or similar
width = 0
height = 0
if dimensions:
# Try to extract dimensions using regex
match = re.search(r'(\d+)\s*x\s*(\d+)', dimensions)
if match:
width = int(match.group(1))
height = int(match.group(2))
# If invert is True, swap width and height
if invert and width and height:
width, height = height, width
return {"width": width, "height": height, "size": f"{width}x{height}"}
def transform_int_constant(inputs: Dict) -> int:
"""Transform function for INTConstant nodes"""
return inputs.get("value", 0)
# =============================================================================
# Node Mapper Definitions
# =============================================================================
# Define the mappers for KJNodes
NODE_MAPPERS_EXT = {
"JoinStrings": {
"inputs_to_track": ["string1", "string2", "delimiter"],
"transform_func": transform_join_strings
},
"StringConstantMultiline": {
"inputs_to_track": ["string"],
"transform_func": transform_string_constant
},
"EmptyLatentImagePresets": {
"inputs_to_track": ["dimensions", "invert", "batch_size"],
"transform_func": transform_empty_latent_presets
},
"INTConstant": {
"inputs_to_track": ["value"],
"transform_func": transform_int_constant
}
}

37
py/workflow/main.py Normal file
View File

@@ -0,0 +1,37 @@
"""
Main entry point for the workflow parser module
"""
import os
import sys
import logging
from typing import Dict, Optional, Union
# Add the parent directory to sys.path to enable imports
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
sys.path.insert(0, os.path.dirname(SCRIPT_DIR))
from .parser import parse_workflow
logger = logging.getLogger(__name__)
def parse_comfyui_workflow(
workflow_path: str,
output_path: Optional[str] = None
) -> Dict:
"""
Parse a ComfyUI workflow file and extract generation parameters
Args:
workflow_path: Path to the workflow JSON file
output_path: Optional path to save the output JSON
Returns:
Dictionary containing extracted parameters
"""
return parse_workflow(workflow_path, output_path)
if __name__ == "__main__":
# If run directly, use the CLI
from .cli import main
main()

282
py/workflow/mappers.py Normal file
View File

@@ -0,0 +1,282 @@
"""
Node mappers for ComfyUI workflow parsing
"""
import logging
import os
import importlib.util
import inspect
from typing import Dict, List, Any, Optional, Union, Type, Callable, Tuple
logger = logging.getLogger(__name__)
# Global mapper registry
_MAPPER_REGISTRY: Dict[str, Dict] = {}
# =============================================================================
# Mapper Definition Functions
# =============================================================================
def create_mapper(
node_type: str,
inputs_to_track: List[str],
transform_func: Callable[[Dict], Any] = None
) -> Dict:
"""Create a mapper definition for a node type"""
mapper = {
"node_type": node_type,
"inputs_to_track": inputs_to_track,
"transform": transform_func or (lambda inputs: inputs)
}
return mapper
def register_mapper(mapper: Dict) -> None:
"""Register a node mapper in the global registry"""
_MAPPER_REGISTRY[mapper["node_type"]] = mapper
logger.debug(f"Registered mapper for node type: {mapper['node_type']}")
def get_mapper(node_type: str) -> Optional[Dict]:
"""Get a mapper for the specified node type"""
return _MAPPER_REGISTRY.get(node_type)
def get_all_mappers() -> Dict[str, Dict]:
"""Get all registered mappers"""
return _MAPPER_REGISTRY.copy()
# =============================================================================
# Node Processing Function
# =============================================================================
def process_node(node_id: str, node_data: Dict, workflow: Dict, parser: 'WorkflowParser') -> Any: # type: ignore
"""Process a node using its mapper and extract relevant information"""
node_type = node_data.get("class_type")
mapper = get_mapper(node_type)
if not mapper:
logger.warning(f"No mapper found for node type: {node_type}")
return None
result = {}
# Extract inputs based on the mapper's tracked inputs
for input_name in mapper["inputs_to_track"]:
if input_name in node_data.get("inputs", {}):
input_value = node_data["inputs"][input_name]
# Check if input is a reference to another node's output
if isinstance(input_value, list) and len(input_value) == 2:
try:
# Format is [node_id, output_slot]
ref_node_id, output_slot = input_value
# Convert node_id to string if it's an integer
if isinstance(ref_node_id, int):
ref_node_id = str(ref_node_id)
# Recursively process the referenced node
ref_value = parser.process_node(ref_node_id, workflow)
if ref_value is not None:
result[input_name] = ref_value
else:
# If we couldn't get a value from the reference, store the raw value
result[input_name] = input_value
except Exception as e:
logger.error(f"Error processing reference in node {node_id}, input {input_name}: {e}")
result[input_name] = input_value
else:
# Direct value
result[input_name] = input_value
# Apply the transform function
try:
return mapper["transform"](result)
except Exception as e:
logger.error(f"Error in transform function for node {node_id} of type {node_type}: {e}")
return result
# =============================================================================
# Transform Functions
# =============================================================================
def transform_lora_loader(inputs: Dict) -> Dict:
"""Transform function for LoraLoader nodes"""
loras_data = inputs.get("loras", [])
lora_stack = inputs.get("lora_stack", {}).get("lora_stack", [])
lora_texts = []
# Process loras array
if isinstance(loras_data, dict) and "__value__" in loras_data:
loras_list = loras_data["__value__"]
elif isinstance(loras_data, list):
loras_list = loras_data
else:
loras_list = []
# Process each active lora entry
for lora in loras_list:
if isinstance(lora, dict) and lora.get("active", False):
lora_name = lora.get("name", "")
strength = lora.get("strength", 1.0)
lora_texts.append(f"<lora:{lora_name}:{strength}>")
# Process lora_stack if valid
if lora_stack and isinstance(lora_stack, list):
if not (len(lora_stack) == 2 and isinstance(lora_stack[0], (str, int)) and isinstance(lora_stack[1], int)):
for stack_entry in lora_stack:
lora_name = stack_entry[0]
strength = stack_entry[1]
lora_texts.append(f"<lora:{lora_name}:{strength}>")
result = {
"checkpoint": inputs.get("model", {}).get("checkpoint", ""),
"loras": " ".join(lora_texts)
}
if "clip" in inputs and isinstance(inputs["clip"], dict):
result["clip_skip"] = inputs["clip"].get("clip_skip", "-1")
return result
def transform_lora_stacker(inputs: Dict) -> Dict:
"""Transform function for LoraStacker nodes"""
loras_data = inputs.get("loras", [])
result_stack = []
# Handle existing stack entries
existing_stack = []
lora_stack_input = inputs.get("lora_stack", [])
if isinstance(lora_stack_input, dict) and "lora_stack" in lora_stack_input:
existing_stack = lora_stack_input["lora_stack"]
elif isinstance(lora_stack_input, list):
if not (len(lora_stack_input) == 2 and isinstance(lora_stack_input[0], (str, int)) and
isinstance(lora_stack_input[1], int)):
existing_stack = lora_stack_input
# Add existing entries
if existing_stack:
result_stack.extend(existing_stack)
# Process new loras
if isinstance(loras_data, dict) and "__value__" in loras_data:
loras_list = loras_data["__value__"]
elif isinstance(loras_data, list):
loras_list = loras_data
else:
loras_list = []
for lora in loras_list:
if isinstance(lora, dict) and lora.get("active", False):
lora_name = lora.get("name", "")
strength = float(lora.get("strength", 1.0))
result_stack.append((lora_name, strength))
return {"lora_stack": result_stack}
def transform_trigger_word_toggle(inputs: Dict) -> str:
"""Transform function for TriggerWordToggle nodes"""
toggle_data = inputs.get("toggle_trigger_words", [])
if isinstance(toggle_data, dict) and "__value__" in toggle_data:
toggle_words = toggle_data["__value__"]
elif isinstance(toggle_data, list):
toggle_words = toggle_data
else:
toggle_words = []
# Filter active trigger words
active_words = []
for item in toggle_words:
if isinstance(item, dict) and item.get("active", False):
word = item.get("text", "")
if word and not word.startswith("__dummy"):
active_words.append(word)
return ", ".join(active_words)
# =============================================================================
# Node Mapper Definitions
# =============================================================================
# Central definition of all supported node types and their configurations
NODE_MAPPERS = {
# LoraManager nodes
"Lora Loader (LoraManager)": {
"inputs_to_track": ["model", "clip", "loras", "lora_stack"],
"transform_func": transform_lora_loader
},
"Lora Stacker (LoraManager)": {
"inputs_to_track": ["loras", "lora_stack"],
"transform_func": transform_lora_stacker
},
"TriggerWord Toggle (LoraManager)": {
"inputs_to_track": ["toggle_trigger_words"],
"transform_func": transform_trigger_word_toggle
}
}
def register_all_mappers() -> None:
"""Register all mappers from the NODE_MAPPERS dictionary"""
for node_type, config in NODE_MAPPERS.items():
mapper = create_mapper(
node_type=node_type,
inputs_to_track=config["inputs_to_track"],
transform_func=config["transform_func"]
)
register_mapper(mapper)
logger.info(f"Registered {len(NODE_MAPPERS)} node mappers")
# =============================================================================
# Extension Loading
# =============================================================================
def load_extensions(ext_dir: str = None) -> None:
"""
Load mapper extensions from the specified directory
Extension files should define a NODE_MAPPERS_EXT dictionary containing mapper configurations.
These will be added to the global NODE_MAPPERS dictionary and registered automatically.
"""
# Use default path if none provided
if ext_dir is None:
# Get the directory of this file
current_dir = os.path.dirname(os.path.abspath(__file__))
ext_dir = os.path.join(current_dir, 'ext')
# Ensure the extension directory exists
if not os.path.exists(ext_dir):
os.makedirs(ext_dir, exist_ok=True)
logger.info(f"Created extension directory: {ext_dir}")
return
# Load each Python file in the extension directory
for filename in os.listdir(ext_dir):
if filename.endswith('.py') and not filename.startswith('_'):
module_path = os.path.join(ext_dir, filename)
module_name = f"workflow.ext.{filename[:-3]}" # Remove .py
try:
# Load the module
spec = importlib.util.spec_from_file_location(module_name, module_path)
if spec and spec.loader:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Check if the module defines NODE_MAPPERS_EXT
if hasattr(module, 'NODE_MAPPERS_EXT'):
# Add the extension mappers to the global NODE_MAPPERS dictionary
NODE_MAPPERS.update(module.NODE_MAPPERS_EXT)
logger.info(f"Added {len(module.NODE_MAPPERS_EXT)} mappers from extension: {filename}")
else:
logger.warning(f"Extension {filename} does not define NODE_MAPPERS_EXT dictionary")
except Exception as e:
logger.warning(f"Error loading extension {filename}: {e}")
# Re-register all mappers after loading extensions
register_all_mappers()
# Initialize the registry with default mappers
# register_default_mappers()

181
py/workflow/parser.py Normal file
View File

@@ -0,0 +1,181 @@
"""
Main workflow parser implementation for ComfyUI
"""
import json
import logging
from typing import Dict, List, Any, Optional, Union, Set
from .mappers import get_mapper, get_all_mappers, load_extensions, process_node
from .utils import (
load_workflow, save_output, find_node_by_type,
trace_model_path
)
logger = logging.getLogger(__name__)
class WorkflowParser:
"""Parser for ComfyUI workflows"""
def __init__(self):
"""Initialize the parser with mappers"""
self.processed_nodes: Set[str] = set() # Track processed nodes to avoid cycles
self.node_results_cache: Dict[str, Any] = {} # Cache for processed node results
# Load extensions
load_extensions()
def process_node(self, node_id: str, workflow: Dict) -> Any:
"""Process a single node and extract relevant information"""
# Return cached result if available
if node_id in self.node_results_cache:
return self.node_results_cache[node_id]
# Check if we're in a cycle
if node_id in self.processed_nodes:
return None
# Mark this node as being processed (to detect cycles)
self.processed_nodes.add(node_id)
if node_id not in workflow:
self.processed_nodes.remove(node_id)
return None
node_data = workflow[node_id]
node_type = node_data.get("class_type")
result = None
if get_mapper(node_type):
try:
result = process_node(node_id, node_data, workflow, self)
# Cache the result
self.node_results_cache[node_id] = result
except Exception as e:
logger.error(f"Error processing node {node_id} of type {node_type}: {e}", exc_info=True)
# Return a partial result or None depending on how we want to handle errors
result = {}
# Remove node from processed set to allow it to be processed again in a different context
self.processed_nodes.remove(node_id)
return result
def find_primary_sampler_node(self, workflow: Dict) -> Optional[str]:
"""
Find the primary sampler node in the workflow.
Priority:
1. First try to find a SamplerCustomAdvanced node
2. If not found, look for KSampler nodes with denoise=1.0
3. If still not found, use the first KSampler node
Args:
workflow: The workflow data as a dictionary
Returns:
The node ID of the primary sampler node, or None if not found
"""
# First check for SamplerCustomAdvanced nodes
sampler_advanced_nodes = []
ksampler_nodes = []
# Scan workflow for sampler nodes
for node_id, node_data in workflow.items():
node_type = node_data.get("class_type")
if node_type == "SamplerCustomAdvanced":
sampler_advanced_nodes.append(node_id)
elif node_type == "KSampler":
ksampler_nodes.append(node_id)
# If we found SamplerCustomAdvanced nodes, return the first one
if sampler_advanced_nodes:
logger.debug(f"Found SamplerCustomAdvanced node: {sampler_advanced_nodes[0]}")
return sampler_advanced_nodes[0]
# If we have KSampler nodes, look for one with denoise=1.0
if ksampler_nodes:
for node_id in ksampler_nodes:
node_data = workflow[node_id]
inputs = node_data.get("inputs", {})
denoise = inputs.get("denoise", 0)
# Check if denoise is 1.0 (allowing for small floating point differences)
if abs(float(denoise) - 1.0) < 0.001:
logger.debug(f"Found KSampler node with denoise=1.0: {node_id}")
return node_id
# If no KSampler with denoise=1.0 found, use the first one
logger.debug(f"No KSampler with denoise=1.0 found, using first KSampler: {ksampler_nodes[0]}")
return ksampler_nodes[0]
# No sampler nodes found
logger.warning("No sampler nodes found in workflow")
return None
def parse_workflow(self, workflow_data: Union[str, Dict], output_path: Optional[str] = None) -> Dict:
"""
Parse the workflow and extract generation parameters
Args:
workflow_data: The workflow data as a dictionary or a file path
output_path: Optional path to save the output JSON
Returns:
Dictionary containing extracted parameters
"""
# Load workflow from file if needed
if isinstance(workflow_data, str):
workflow = load_workflow(workflow_data)
else:
workflow = workflow_data
# Reset the processed nodes tracker and cache
self.processed_nodes = set()
self.node_results_cache = {}
# Find the primary sampler node
sampler_node_id = self.find_primary_sampler_node(workflow)
if not sampler_node_id:
logger.warning("No suitable sampler node found in workflow")
return {}
# Process sampler node to extract parameters
sampler_result = self.process_node(sampler_node_id, workflow)
if not sampler_result:
return {}
# Return the sampler result directly - it's already in the format we need
# This simplifies the structure and makes it easier to use in recipe_routes.py
# Handle standard ComfyUI names vs our output format
if "cfg" in sampler_result:
sampler_result["cfg_scale"] = sampler_result.pop("cfg")
# Add clip_skip = 1 to match reference output if not already present
if "clip_skip" not in sampler_result:
sampler_result["clip_skip"] = "1"
# Ensure the prompt is a string and not a nested dictionary
if "prompt" in sampler_result and isinstance(sampler_result["prompt"], dict):
if "prompt" in sampler_result["prompt"]:
sampler_result["prompt"] = sampler_result["prompt"]["prompt"]
# Save the result if requested
if output_path:
save_output(sampler_result, output_path)
return sampler_result
def parse_workflow(workflow_path: str, output_path: Optional[str] = None) -> Dict:
"""
Parse a ComfyUI workflow file and extract generation parameters
Args:
workflow_path: Path to the workflow JSON file
output_path: Optional path to save the output JSON
Returns:
Dictionary containing extracted parameters
"""
parser = WorkflowParser()
return parser.parse_workflow(workflow_path, output_path)

63
py/workflow/test.py Normal file
View File

@@ -0,0 +1,63 @@
"""
Test script for the ComfyUI workflow parser
"""
import os
import json
import logging
from .parser import parse_workflow
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)
# Configure paths
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..'))
REFS_DIR = os.path.join(ROOT_DIR, 'refs')
OUTPUT_DIR = os.path.join(ROOT_DIR, 'output')
def test_parse_flux_workflow():
"""Test parsing the flux example workflow"""
# Ensure output directory exists
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Define input and output paths
input_path = os.path.join(REFS_DIR, 'flux_prompt.json')
output_path = os.path.join(OUTPUT_DIR, 'parsed_flux_output.json')
# Parse workflow
logger.info(f"Parsing workflow: {input_path}")
result = parse_workflow(input_path, output_path)
# Print result summary
logger.info(f"Output saved to: {output_path}")
logger.info(f"Parsing completed. Result summary:")
logger.info(f" LoRAs: {result.get('loras', '')}")
gen_params = result.get('gen_params', {})
logger.info(f" Prompt: {gen_params.get('prompt', '')[:50]}...")
logger.info(f" Steps: {gen_params.get('steps', '')}")
logger.info(f" Sampler: {gen_params.get('sampler', '')}")
logger.info(f" Size: {gen_params.get('size', '')}")
# Compare with reference output
ref_output_path = os.path.join(REFS_DIR, 'flux_output.json')
try:
with open(ref_output_path, 'r') as f:
ref_output = json.load(f)
# Simple validation
loras_match = result.get('loras', '') == ref_output.get('loras', '')
prompt_match = gen_params.get('prompt', '') == ref_output.get('gen_params', {}).get('prompt', '')
logger.info(f"Validation against reference:")
logger.info(f" LoRAs match: {loras_match}")
logger.info(f" Prompt match: {prompt_match}")
except Exception as e:
logger.warning(f"Failed to compare with reference output: {e}")
if __name__ == "__main__":
test_parse_flux_workflow()

120
py/workflow/utils.py Normal file
View File

@@ -0,0 +1,120 @@
"""
Utility functions for ComfyUI workflow parsing
"""
import json
import os
import logging
from typing import Dict, List, Any, Optional, Union, Set, Tuple
logger = logging.getLogger(__name__)
def load_workflow(workflow_path: str) -> Dict:
"""Load a workflow from a JSON file"""
try:
with open(workflow_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
logger.error(f"Error loading workflow from {workflow_path}: {e}")
raise
def save_output(output: Dict, output_path: str) -> None:
"""Save the parsed output to a JSON file"""
os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
try:
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(output, f, indent=4)
except Exception as e:
logger.error(f"Error saving output to {output_path}: {e}")
raise
def find_node_by_type(workflow: Dict, node_type: str) -> Optional[str]:
"""Find a node of the specified type in the workflow"""
for node_id, node_data in workflow.items():
if node_data.get("class_type") == node_type:
return node_id
return None
def find_nodes_by_type(workflow: Dict, node_type: str) -> List[str]:
"""Find all nodes of the specified type in the workflow"""
return [node_id for node_id, node_data in workflow.items()
if node_data.get("class_type") == node_type]
def get_input_node_ids(workflow: Dict, node_id: str) -> Dict[str, Tuple[str, int]]:
"""
Get the node IDs for all inputs of the given node
Returns a dictionary mapping input names to (node_id, output_slot) tuples
"""
result = {}
if node_id not in workflow:
return result
node_data = workflow[node_id]
for input_name, input_value in node_data.get("inputs", {}).items():
# Check if this input is connected to another node
if isinstance(input_value, list) and len(input_value) == 2:
# Input is connected to another node's output
# Format: [node_id, output_slot]
ref_node_id, output_slot = input_value
result[input_name] = (str(ref_node_id), output_slot)
return result
def trace_model_path(workflow: Dict, start_node_id: str) -> List[str]:
"""
Trace the model path backward from KSampler to find all LoRA nodes
Args:
workflow: The workflow data
start_node_id: The starting node ID (usually KSampler)
Returns:
List of node IDs in the model path
"""
model_path_nodes = []
# Get the model input from the start node
if start_node_id not in workflow:
return model_path_nodes
# Track visited nodes to avoid cycles
visited = set()
# Stack for depth-first search
stack = []
# Get model input reference if available
start_node = workflow[start_node_id]
if "inputs" in start_node and "model" in start_node["inputs"] and isinstance(start_node["inputs"]["model"], list):
model_ref = start_node["inputs"]["model"]
stack.append(str(model_ref[0]))
# Perform depth-first search
while stack:
node_id = stack.pop()
# Skip if already visited
if node_id in visited:
continue
# Mark as visited
visited.add(node_id)
# Skip if node doesn't exist
if node_id not in workflow:
continue
node = workflow[node_id]
node_type = node.get("class_type", "")
# Add current node to result list if it's a LoRA node
if "Lora" in node_type:
model_path_nodes.append(node_id)
# Add all input nodes that have a "model" or "lora_stack" output to the stack
if "inputs" in node:
for input_name, input_value in node["inputs"].items():
if input_name in ["model", "lora_stack"] and isinstance(input_value, list) and len(input_value) == 2:
stack.append(str(input_value[0]))
return model_path_nodes

View File

@@ -1,13 +1,18 @@
[project]
name = "comfyui-lora-manager"
description = "LoRA Manager for ComfyUI - Access it at http://localhost:8188/loras for managing LoRA models with previews and metadata integration."
version = "0.7.35-beta"
version = "0.8.6"
license = {file = "LICENSE"}
dependencies = [
"aiohttp",
"jinja2",
"safetensors",
"watchdog"
"watchdog",
"beautifulsoup4",
"piexif",
"Pillow",
"olefile", # for getting rid of warning message
"requests"
]
[project.urls]

View File

@@ -0,0 +1,100 @@
{
"id": 1387174,
"modelId": 1231067,
"name": "v1.0",
"createdAt": "2025-02-08T11:15:47.197Z",
"updatedAt": "2025-02-08T11:29:04.526Z",
"status": "Published",
"publishedAt": "2025-02-08T11:29:04.487Z",
"trainedWords": [
"ppstorybook"
],
"trainingStatus": null,
"trainingDetails": null,
"baseModel": "Flux.1 D",
"baseModelType": null,
"earlyAccessEndsAt": null,
"earlyAccessConfig": null,
"description": null,
"uploadType": "Created",
"usageControl": "Download",
"air": "urn:air:flux1:lora:civitai:1231067@1387174",
"stats": {
"downloadCount": 1436,
"ratingCount": 0,
"rating": 0,
"thumbsUpCount": 316
},
"model": {
"name": "Vivid Impressions Storybook Style",
"type": "LORA",
"nsfw": false,
"poi": false
},
"files": [
{
"id": 1289799,
"sizeKB": 18829.1484375,
"name": "pp-storybook_rank2_bf16.safetensors",
"type": "Model",
"pickleScanResult": "Success",
"pickleScanMessage": "No Pickle imports",
"virusScanResult": "Success",
"virusScanMessage": null,
"scannedAt": "2025-02-08T11:21:04.247Z",
"metadata": {
"format": "SafeTensor",
"size": null,
"fp": null
},
"hashes": {
"AutoV1": "F414C813",
"AutoV2": "9753338AB6",
"SHA256": "9753338AB693CA82BF89ED77A5D1912879E40051463EC6E330FB9866CE798668",
"CRC32": "A65AE7B3",
"BLAKE3": "A5F8AB95AC2486345E4ACCAE541FF19D97ED53EFB0A7CC9226636975A0437591",
"AutoV3": "34A22376739D"
},
"primary": true,
"downloadUrl": "https://civitai.com/api/download/models/1387174"
}
],
"images": [
{
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/42b875cf-c62b-41fa-a349-383b7f074351/width=832/56547310.jpeg",
"nsfwLevel": 1,
"width": 832,
"height": 1216,
"hash": "U5IiO6s-4Vn+0~EO^5xa00VsL#IU_O?E7yWC",
"type": "image",
"metadata": {
"hash": "U5IiO6s-4Vn+0~EO^5xa00VsL#IU_O?E7yWC",
"size": 1361590,
"width": 832,
"height": 1216
},
"meta": {
"Size": "832x1216",
"seed": 1116375220995209,
"Model": "flux_dev_fp8",
"steps": 23,
"hashes": {
"model": ""
},
"prompt": "ppstorybook,A dreamy bunny hopping across a rainbow bridge, with fluffy clouds surrounding it and tiny birds flying alongside, rendered in a magical, soft-focus style with pastel hues and glowing accents.",
"Version": "ComfyUI",
"sampler": "DPM++ 2M",
"cfgScale": 3.5,
"clipSkip": 1,
"resources": [],
"Model hash": ""
},
"availability": "Public",
"hasMeta": true,
"hasPositivePrompt": true,
"onSite": false,
"remixOfId": null
}
],
"downloadUrl": "https://civitai.com/api/download/models/1387174"
}

View File

@@ -0,0 +1,153 @@
{
"resource-stack": {
"class_type": "CheckpointLoaderSimple",
"inputs": { "ckpt_name": "urn:air:sdxl:checkpoint:civitai:827184@1410435" }
},
"resource-stack-1": {
"class_type": "LoraLoader",
"inputs": {
"lora_name": "urn:air:sdxl:lora:civitai:1107767@1253442",
"strength_model": 1,
"strength_clip": 1,
"model": ["resource-stack", 0],
"clip": ["resource-stack", 1]
}
},
"resource-stack-2": {
"class_type": "LoraLoader",
"inputs": {
"lora_name": "urn:air:sdxl:lora:civitai:1342708@1516344",
"strength_model": 1,
"strength_clip": 1,
"model": ["resource-stack-1", 0],
"clip": ["resource-stack-1", 1]
}
},
"resource-stack-3": {
"class_type": "LoraLoader",
"inputs": {
"lora_name": "urn:air:sdxl:lora:civitai:122359@135867",
"strength_model": 1.55,
"strength_clip": 1,
"model": ["resource-stack-2", 0],
"clip": ["resource-stack-2", 1]
}
},
"6": {
"class_type": "smZ CLIPTextEncode",
"inputs": {
"text": "masterpiece, best quality, amazing quality, detailed setting, detailed background, 1girl, yunyun (konosuba), nude, red eyes, hair ornament, braid, hair between eyes,low twintails, pink ribbon, bow, hair bow, pussy, frilled skirt, layered skirt, belt, pink thighhighs, (pussy juice), large insertion, vaginal tugging, pussy grip, detailed skin, detailed soles, stretched pussy, feet in stockings, ass, nipples, medium breasts, french kiss, anus, shocked, nervous, penis awe, BREAK Professor\u0027s office, college student, pornographic, 1boy, close eyes, (musscular male, detailed large cock), vaginal sex, college office setting, ass grab, fucking, riding, cowgirl, erotic, side view, deep fucking",
"parser": "comfy",
"text_g": "",
"text_l": "",
"ascore": 2.5,
"width": 0,
"height": 0,
"crop_w": 0,
"crop_h": 0,
"target_width": 0,
"target_height": 0,
"smZ_steps": 1,
"mean_normalization": true,
"multi_conditioning": true,
"use_old_emphasis_implementation": false,
"with_SDXL": false,
"clip": ["resource-stack-3", 1]
},
"_meta": { "title": "Positive" }
},
"7": {
"class_type": "smZ CLIPTextEncode",
"inputs": {
"text": "bad quality,worst quality,worst detail,sketch,censor",
"parser": "comfy",
"text_g": "",
"text_l": "",
"ascore": 2.5,
"width": 0,
"height": 0,
"crop_w": 0,
"crop_h": 0,
"target_width": 0,
"target_height": 0,
"smZ_steps": 1,
"mean_normalization": true,
"multi_conditioning": true,
"use_old_emphasis_implementation": false,
"with_SDXL": false,
"clip": ["resource-stack-3", 1]
},
"_meta": { "title": "Negative" }
},
"20": {
"class_type": "UpscaleModelLoader",
"inputs": { "model_name": "urn:air:other:upscaler:civitai:147759@164821" },
"_meta": { "title": "Load Upscale Model" }
},
"17": {
"class_type": "LoadImage",
"inputs": {
"image": "https://orchestration.civitai.com/v2/consumer/blobs/5KZ6358TW8CNEGPZKD08NVDB30",
"upload": "image"
},
"_meta": { "title": "Image Load" }
},
"19": {
"class_type": "ImageUpscaleWithModel",
"inputs": { "upscale_model": ["20", 0], "image": ["17", 0] },
"_meta": { "title": "Upscale Image (using Model)" }
},
"23": {
"class_type": "ImageScale",
"inputs": {
"upscale_method": "nearest-exact",
"crop": "disabled",
"width": 1280,
"height": 1856,
"image": ["19", 0]
},
"_meta": { "title": "Upscale Image" }
},
"21": {
"class_type": "VAEEncode",
"inputs": { "pixels": ["23", 0], "vae": ["resource-stack", 2] },
"_meta": { "title": "VAE Encode" }
},
"11": {
"class_type": "KSampler",
"inputs": {
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"seed": 2088370631,
"steps": 47,
"cfg": 6.5,
"denoise": 0.3,
"model": ["resource-stack-3", 0],
"positive": ["6", 0],
"negative": ["7", 0],
"latent_image": ["21", 0]
},
"_meta": { "title": "KSampler" }
},
"13": {
"class_type": "VAEDecode",
"inputs": { "samples": ["11", 0], "vae": ["resource-stack", 2] },
"_meta": { "title": "VAE Decode" }
},
"12": {
"class_type": "SaveImage",
"inputs": { "filename_prefix": "ComfyUI", "images": ["13", 0] },
"_meta": { "title": "Save Image" }
},
"extra": {
"airs": [
"urn:air:other:upscaler:civitai:147759@164821",
"urn:air:sdxl:checkpoint:civitai:827184@1410435",
"urn:air:sdxl:lora:civitai:1107767@1253442",
"urn:air:sdxl:lora:civitai:1342708@1516344",
"urn:air:sdxl:lora:civitai:122359@135867"
]
},
"extraMetadata": "{\u0022prompt\u0022:\u0022masterpiece, best quality, amazing quality, detailed setting, detailed background, 1girl, yunyun (konosuba), nude, red eyes, hair ornament, braid, hair between eyes,low twintails, pink ribbon, bow, hair bow, pussy, frilled skirt, layered skirt, belt, pink thighhighs, (pussy juice), large insertion, vaginal tugging, pussy grip, detailed skin, detailed soles, stretched pussy, feet in stockings, ass, nipples, medium breasts, french kiss, anus, shocked, nervous, penis awe, BREAK Professor\u0027s office, college student, pornographic, 1boy, close eyes, (musscular male, detailed large cock), vaginal sex, college office setting, ass grab, fucking, riding, cowgirl, erotic, side view, deep fucking\u0022,\u0022negativePrompt\u0022:\u0022bad quality,worst quality,worst detail,sketch,censor\u0022,\u0022steps\u0022:47,\u0022cfgScale\u0022:6.5,\u0022sampler\u0022:\u0022euler_ancestral\u0022,\u0022workflowId\u0022:\u0022img2img-hires\u0022,\u0022resources\u0022:[{\u0022modelVersionId\u0022:1410435,\u0022strength\u0022:1},{\u0022modelVersionId\u0022:1410435,\u0022strength\u0022:1},{\u0022modelVersionId\u0022:1253442,\u0022strength\u0022:1},{\u0022modelVersionId\u0022:1516344,\u0022strength\u0022:1},{\u0022modelVersionId\u0022:135867,\u0022strength\u0022:1.55}],\u0022remixOfId\u0022:32140259}"
}

15
refs/flux_output.json Normal file
View File

@@ -0,0 +1,15 @@
{
"loras": "<lora:pp-enchanted-whimsy:0.9> <lora:ral-frctlgmtry_flux:1> <lora:pp-storybook_rank2_bf16:0.8>",
"gen_params": {
"prompt": "in the style of ppWhimsy, ral-frctlgmtry, ppstorybook,Stylized geek cat artist with glasses and a paintbrush, smiling at the viewer while holding a sign that reads 'Stay tuned!', solid white background",
"negative_prompt": "",
"steps": "25",
"sampler": "dpmpp_2m",
"scheduler": "beta",
"cfg": "1",
"seed": "48",
"guidance": 3.5,
"size": "896x1152",
"clip_skip": "2"
}
}

314
refs/flux_prompt.json Normal file
View File

@@ -0,0 +1,314 @@
{
"6": {
"inputs": {
"text": [
"46",
0
],
"clip": [
"58",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"31",
0
],
"vae": [
"39",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"27": {
"inputs": {
"width": 896,
"height": 1152,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "EmptySD3LatentImage"
}
},
"31": {
"inputs": {
"seed": 44,
"steps": 25,
"cfg": 1,
"sampler_name": "dpmpp_2m",
"scheduler": "beta",
"denoise": 1,
"model": [
"58",
0
],
"positive": [
"35",
0
],
"negative": [
"33",
0
],
"latent_image": [
"27",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"33": {
"inputs": {
"text": "",
"clip": [
"58",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Negative Prompt)"
}
},
"35": {
"inputs": {
"guidance": 3.5,
"conditioning": [
"6",
0
]
},
"class_type": "FluxGuidance",
"_meta": {
"title": "FluxGuidance"
}
},
"37": {
"inputs": {
"unet_name": "flux\\flux1-dev-fp8-e4m3fn.safetensors",
"weight_dtype": "fp8_e4m3fn_fast"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"38": {
"inputs": {
"clip_name1": "t5xxl_fp8_e4m3fn.safetensors",
"clip_name2": "clip_l.safetensors",
"type": "flux",
"device": "default"
},
"class_type": "DualCLIPLoader",
"_meta": {
"title": "DualCLIPLoader"
}
},
"39": {
"inputs": {
"vae_name": "flux1\\ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"46": {
"inputs": {
"string1": [
"59",
0
],
"string2": [
"51",
0
],
"delimiter": ","
},
"class_type": "JoinStrings",
"_meta": {
"title": "Join Strings"
}
},
"50": {
"inputs": {
"images": [
"8",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"51": {
"inputs": {
"string": "Stylized geek cat artist with glasses and a paintbrush, smiling at the viewer while holding a sign that reads 'Stay tuned!', solid white background",
"strip_newlines": true
},
"class_type": "StringConstantMultiline",
"_meta": {
"title": "positive"
}
},
"58": {
"inputs": {
"text": "<lora:pp-enchanted-whimsy:0.9><lora:ral-frctlgmtry_flux:1><lora:pp-storybook_rank2_bf16:0.8>",
"loras": [
{
"name": "pp-enchanted-whimsy",
"strength": "0.90",
"active": false
},
{
"name": "ral-frctlgmtry_flux",
"strength": "0.85",
"active": false
},
{
"name": "pp-storybook_rank2_bf16",
"strength": 0.8,
"active": true
},
{
"name": "__dummy_item1__",
"strength": 0,
"active": false,
"_isDummy": true
},
{
"name": "__dummy_item2__",
"strength": 0,
"active": false,
"_isDummy": true
}
],
"model": [
"37",
0
],
"clip": [
"38",
0
]
},
"class_type": "Lora Loader (LoraManager)",
"_meta": {
"title": "Lora Loader (LoraManager)"
}
},
"59": {
"inputs": {
"group_mode": "",
"toggle_trigger_words": [
{
"text": "ppstorybook",
"active": false
},
{
"text": "__dummy_item__",
"active": false,
"_isDummy": true
},
{
"text": "__dummy_item__",
"active": false,
"_isDummy": true
}
],
"orinalMessage": "ppstorybook",
"trigger_words": [
"58",
2
]
},
"class_type": "TriggerWord Toggle (LoraManager)",
"_meta": {
"title": "TriggerWord Toggle (LoraManager)"
}
},
"61": {
"inputs": {
"add_noise": "enable",
"noise_seed": 1111423448930884,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"start_at_step": 0,
"end_at_step": 10000,
"return_with_leftover_noise": "disable"
},
"class_type": "KSamplerAdvanced",
"_meta": {
"title": "KSampler (Advanced)"
}
},
"62": {
"inputs": {
"sigmas": [
"63",
0
]
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "SamplerCustomAdvanced"
}
},
"63": {
"inputs": {
"scheduler": "normal",
"steps": 20,
"denoise": 1
},
"class_type": "BasicScheduler",
"_meta": {
"title": "BasicScheduler"
}
},
"64": {
"inputs": {
"seed": 1089899258710474,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"65": {
"inputs": {
"text": ",Stylized geek cat artist with glasses and a paintbrush, smiling at the viewer while holding a sign that reads 'Stay tuned!', solid white background",
"anything": [
"46",
0
]
},
"class_type": "easy showAnything",
"_meta": {
"title": "Show Any"
}
}
}

View File

@@ -0,0 +1,18 @@
a dynamic and dramatic digital artwork featuring a stylized anthropomorphic white tiger with striking yellow eyes. The tiger is depicted in a powerful stance, wielding a katana with one hand raised above its head. Its fur is detailed with black stripes, and its mane flows wildly, blending with the stormy background. The scene is set amidst swirling dark clouds and flashes of lightning, enhancing the sense of movement and energy. The composition is vertical, with the tiger positioned centrally, creating a sense of depth and intensity. The color palette is dominated by shades of blue, gray, and white, with bright highlights from the lightning. The overall style is reminiscent of fantasy or manga art, with a focus on dynamic action and dramatic lighting.
Negative prompt:
Steps: 30, Sampler: Undefined, CFG scale: 3.5, Seed: 90300501, Size: 832x1216, Clip skip: 2, Created Date: 2025-03-05T13:51:18.1770234Z, Civitai resources: [{"type":"checkpoint","modelVersionId":691639,"modelName":"FLUX","modelVersionName":"Dev"},{"type":"lora","weight":0.4,"modelVersionId":1202162,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Gothic Lines"},{"type":"lora","weight":0.8,"modelVersionId":1470588,"modelName":"Velvet\u0027s Mythic Fantasy Styles | Flux \u002B Pony \u002B illustrious","modelVersionName":"Flux Retro"},{"type":"lora","weight":0.75,"modelVersionId":746484,"modelName":"Elden Ring - Yoshitaka Amano","modelVersionName":"V1"},{"type":"lora","weight":0.2,"modelVersionId":914935,"modelName":"Ink-style","modelVersionName":"ink-dynamic"},{"type":"lora","weight":0.2,"modelVersionId":1189379,"modelName":"Painterly Fantasy by ChronoKnight - [FLUX \u0026 IL]","modelVersionName":"FLUX"},{"type":"lora","weight":0.2,"modelVersionId":757030,"modelName":"Mezzotint Artstyle for Flux - by Ethanar","modelVersionName":"V1"}], Civitai metadata: {}
masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject,
dynamic angle, dutch angle, from below, epic half body portrait, gritty, wabi sabi, looking at viewer, woman is a geisha, parted lips,
holographic skin, holofoil glitter, faint, glowing, ethereal, neon hair, glowing hair, otherworldly glow, she is dangerous
<lora:ck-shadow-circuit-IL:0.78>, <lora:ck-nc-cyberpunk-IL-000011:0.4>, <lora:ck-neon-retrowave-IL:0.2>, <lora:ck-yoneyama-mai-IL-000014:0.4>
Negative prompt: score_6, score_5, score_4, bad quality, worst quality, worst detail, sketch, censorship, furry, window, headphones,
Steps: 30, Sampler: Euler a, Schedule type: Simple, CFG scale: 7, Seed: 1405717592, Size: 832x1216, Model hash: 1ad6ca7f70, Model: waiNSFWIllustrious_v100, Denoising strength: 0.35, Hires CFG Scale: 5, Hires upscale: 1.3, Hires steps: 20, Hires upscaler: 4x-AnimeSharp, Lora hashes: "ck-shadow-circuit-IL: 88e247aa8c3d, ck-nc-cyberpunk-IL-000011: 935e6755554c, ck-neon-retrowave-IL: edafb9df7da1, ck-yoneyama-mai-IL-000014: 1b9305692a2e", Version: f2.0.1v1.10.1-1.10.1, Diffusion in Low Bits: Automatic (fp16 LoRA)
Masterpiece, best quality, high quality, newest, highres, 8K, HDR, absurdres, 1girl, solo, futuristic warrior, sleek exosuit with glowing energy cores, long braided hair flowing behind, gripping a high-tech bow with an energy arrow drawn, standing on a floating platform overlooking a massive space station, planets and nebulae in the distance, soft glow from distant stars, cinematic depth, foreshortening, dynamic pose, dramatic sci-fi lighting.
Negative prompt: worst quality, normal quality, anatomical nonsense, bad anatomy,interlocked fingers, extra fingers,watermark,simple background, loli,
Steps: 20, Sampler: euler_ancestral_karras, CFG scale: 8.0, Seed: 691121152183439, Model: il\waiNSFWIllustrious_v110.safetensors, Model hash: c3688ee04c, Lora_0 Model name: iLLMythAn1m3Style.safetensors, Lora_0 Model hash: ba7a040786, Lora_0 Strength model: 1.0, Lora_0 Strength clip: 1.0, Hashes: {"model": "c3688ee04c", "lora:iLLMythAn1m3Style": "ba7a040786"}
Immerse yourself in the enchanting journey, where harmonious transmutation of Bauhaus art unites photographic precision and contemporary illustration, capturing an enthralling blend between vivid abstract nature and urban landscapes. Let your eyes be captivated by a kaleidoscope of rich, deep reds and yellows, entwined with intriguing shades that beckon a somber atmosphere. As your spirit ventures along this haunting path, witness the mysterious, high-angle perspective dominated by scattered clouds granting you a mesmerizing glimpse into the ever-transforming realm of metamorphosing environments. ,<lora:flux/fav/ck-charcoal-drawing-000014.safetensors:1.0:1.0>
Negative prompt:
Steps: 20, Sampler: Euler, CFG scale: 3.5, Seed: 885491426361006, Size: 832x1216, Model hash: 4610115bb0, Model: flux_dev, Hashes: {"LORA:flux/fav/ck-charcoal-drawing-000014.safetensors": "34d36c17c1", "model": "4610115bb0"}, Version: ComfyUI

3
refs/meta_format.txt Normal file
View File

@@ -0,0 +1,3 @@
In this ethereal masterpiece, metallic sculptures juxtapose effortlessly against a subtle backdrop of misty neutral hues. Exquisite curvatures and geometric shapes converge harmoniously, creating an illuminating realm of polished metallic surfaces. Shimmering copper, gleaming silver, and lustrous gold hues dance in perfect balance, highlighting the intricate play of light and shadow cast upon these celestial forms. A halo of diffused radiance envelops each piece, enhancing their textured depths and metallic brilliance while allowing delicate details to emerge from obscurity. The composition conveys a serene yet mesmerizing atmosphere, as if suspended in a dreamlike limbo between reality and fantasy. The tantalizing interplay of colors within this transcendent realm creates a profound sense of depth and grandeur that invites the viewer into an enchanting voyage through abstract metallic beauty. This captivating artwork evokes emotions of boundless curiosity and reverence reminiscent of the timeless works by artists such as Giorgio de Chirico or Paul Klee, while asserting a unique, modern artistic sensibility. With every observation, a new nuance unfolds, as if a never-ending story waiting to be discovered through the lens of metallic artistry.
Negative prompt:
Steps: 25, Sampler: dpmpp_2m_sgm_uniform, Seed: 471889513588087, Model: Fluxmania V5P.safetensors, Model hash: 8ae0583b06, VAE: ae.sft, VAE hash: afc8e28272, Lora_0 Model name: ArtVador I.safetensors, Lora_0 Model hash: 08f7133a58, Lora_0 Strength model: 0.65, Lora_0 Strength clip: 0.65, Lora_1 Model name: Kaoru Yamada.safetensors, Lora_1 Model hash: d4893f7202, Lora_1 Strength model: 0.75, Lora_1 Strength clip: 0.75, Hashes: {"model": "8ae0583b06", "vae": "afc8e28272", "lora:ArtVador I": "08f7133a58", "lora:Kaoru Yamada": "d4893f7202"}

11
refs/output.json Normal file
View File

@@ -0,0 +1,11 @@
{
"loras": "<lora:ck-neon-retrowave-IL-000012:0.8> <lora:aorunIllstrious:1> <lora:ck-shadow-circuit-IL-000012:0.78> <lora:MoriiMee_Gothic_Niji_Style_Illustrious_r1:0.45> <lora:ck-nc-cyberpunk-IL-000011:0.4>",
"prompt": "in the style of ck-rw, aorun, scales, makeup, bare shoulders, pointy ears, dress, claws, in the style of cksc, artist:moriimee, in the style of cknc, masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing",
"negative_prompt": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw",
"steps": "20",
"sampler": "euler_ancestral",
"cfg_scale": "8",
"seed": "241",
"size": "832x1216",
"clip_skip": "2"
}

401
refs/prompt.json Normal file
View File

@@ -0,0 +1,401 @@
{
"6": {
"inputs": {
"text": [
"301",
0
],
"clip": [
"299",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"13",
1
],
"vae": [
"10",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"10": {
"inputs": {
"vae_name": "flux1\\ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"11": {
"inputs": {
"clip_name1": "t5xxl_fp8_e4m3fn.safetensors",
"clip_name2": "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
"type": "flux",
"device": "default"
},
"class_type": "DualCLIPLoader",
"_meta": {
"title": "DualCLIPLoader"
}
},
"13": {
"inputs": {
"noise": [
"147",
0
],
"guider": [
"22",
0
],
"sampler": [
"16",
0
],
"sigmas": [
"17",
0
],
"latent_image": [
"48",
0
]
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "SamplerCustomAdvanced"
}
},
"16": {
"inputs": {
"sampler_name": "dpmpp_2m"
},
"class_type": "KSamplerSelect",
"_meta": {
"title": "KSamplerSelect"
}
},
"17": {
"inputs": {
"scheduler": "beta",
"steps": [
"246",
0
],
"denoise": 1,
"model": [
"28",
0
]
},
"class_type": "BasicScheduler",
"_meta": {
"title": "BasicScheduler"
}
},
"22": {
"inputs": {
"model": [
"28",
0
],
"conditioning": [
"29",
0
]
},
"class_type": "BasicGuider",
"_meta": {
"title": "BasicGuider"
}
},
"28": {
"inputs": {
"max_shift": 1.1500000000000001,
"base_shift": 0.5,
"width": [
"48",
1
],
"height": [
"48",
2
],
"model": [
"299",
0
]
},
"class_type": "ModelSamplingFlux",
"_meta": {
"title": "ModelSamplingFlux"
}
},
"29": {
"inputs": {
"guidance": 3.5,
"conditioning": [
"6",
0
]
},
"class_type": "FluxGuidance",
"_meta": {
"title": "FluxGuidance"
}
},
"48": {
"inputs": {
"resolution": "832x1216 (0.68)",
"batch_size": 1,
"width_override": 0,
"height_override": 0
},
"class_type": "SDXLEmptyLatentSizePicker+",
"_meta": {
"title": "🔧 SDXL Empty Latent Size Picker"
}
},
"65": {
"inputs": {
"unet_name": "flux\\flux1-dev-fp8-e4m3fn.safetensors",
"weight_dtype": "fp8_e4m3fn_fast"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"147": {
"inputs": {
"noise_seed": 651532572596956
},
"class_type": "RandomNoise",
"_meta": {
"title": "RandomNoise"
}
},
"148": {
"inputs": {
"wildcard_text": "__some-prompts__",
"populated_text": "A surreal digital artwork showcases a forward-thinking inventor captivated by his intricate mechanical creation through a large magnifying glass. Viewed from an unconventional perspective, the scene reveals an eccentric assembly of gears, springs, and brass instruments within his workshop. Soft, ethereal light radiates from the invention, casting enigmatic shadows on the walls as time appears to bend around its metallic form, invoking a sense of curiosity, wonder, and exhilaration in discovery.",
"mode": "fixed",
"seed": 553084268162351,
"Select to add Wildcard": "Select the Wildcard to add to the text"
},
"class_type": "ImpactWildcardProcessor",
"_meta": {
"title": "ImpactWildcardProcessor"
}
},
"151": {
"inputs": {
"text": "A hyper-realistic close-up portrait of a young woman with shoulder-length black hair styled in edgy, futuristic layers, adorned with glowing tips. She wears mecha eyewear with a neon green visor that transitions into iridescent shades of teal and gold. The frame is sleek, with angular edges and fine mechanical detailing. Her expression is fierce and confident, with flawless skin highlighted by the neon reflections. She wears a high-tech bodysuit with integrated LED lines and metallic panels. The background depicts a hazy rendition of The Great Wave off Kanagawa by Hokusai, its powerful waves blending seamlessly with the neon tones, amplifying her intense, defiant aura."
},
"class_type": "Text Multiline",
"_meta": {
"title": "Text Multiline"
}
},
"191": {
"inputs": {
"text": "A cinematic, oil painting masterpiece captures the essence of impressionistic surrealism, inspired by Claude Monet. A mysterious woman in a flowing crimson dress stands at the edge of a tranquil lake, where lily pads shimmer under an ethereal, golden twilight. The waters surface reflects a dreamlike sky, its swirling hues of violet and sapphire melting together like liquid light. The thick, expressive brushstrokes lend depth to the scene, evoking a sense of nostalgia and quiet longing, as if the world itself is caught between reality and a fleeting dream. \nA mesmerizing oil painting masterpiece inspired by Salvador Dalí, blending surrealism with post-impressionist texture. A lone violinist plays atop a melting clock tower, his form distorted by the passage of time. The sky is a cascade of swirling, liquid oranges and deep blues, where floating staircases spiral endlessly into the horizon. The impasto technique gives depth and movement to the surreal elements, making time itself feel fluid, as if the world is dissolving into a dream. \nA stunning impressionistic oil painting evokes the spirit of Edvard Munch, capturing a solitary figure standing on a rain-soaked street, illuminated by the glow of flickering gas lamps. The swirling, chaotic strokes of deep blues and fiery reds reflect the turbulence of emotion, while the blurred reflections in the wet cobblestone suggest a merging of past and present. The faceless figure, draped in a dark overcoat, seems lost in thought, embodying the ephemeral nature of memory and time. \nA breathtaking oil painting masterpiece, inspired by Gustav Klimt, presents a celestial ballroom where faceless dancers swirl in an eternal waltz beneath a gilded, star-speckled sky. Their golden garments shimmer with intricate patterns, blending into the opulent mosaic floor that seems to stretch into infinity. The dreamlike composition, rich in warm amber and deep sapphire hues, captures an otherworldly elegance, as if the dancers are suspended in a moment that transcends time. \nA visionary oil painting inspired by Marc Chagall depicts a dreamlike cityscape where gravity ceases to exist. A couple floats above a crimson-tinted town, their forms dissolving into the swirling strokes of a vast, cerulean sky. The buildings below twist and bend in rhythmic motion, their windows glowing like tiny stars. The thick, textured brushwork conveys a sense of weightlessness and wonder, as if love itself has defied the laws of the universe. \nAn impressionistic oil painting in the style of J.M.W. Turner, depicting a ghostly ship sailing through a sea of swirling golden mist. The waves crash and dissolve into abstract, fiery strokes of orange and deep indigo, blurring the line between ocean and sky. The ship appears almost ethereal, as if drifting between worlds, lost in the ever-changing tides of memory and myth. The dynamic brushstrokes capture the relentless power of nature and the fleeting essence of time. \nA captivating oil painting masterpiece, infused with surrealist impressionism, portrays a grand library where books float midair, their pages unraveling into ribbons of light. The towering shelves twist into the heavens, vanishing into an infinite, starry void. A lone scholar, illuminated by the glow of a suspended lantern, reaches for a book that seems to pulse with life. The scene pulses with mystery, where the impasto textures bring depth to the interplay between knowledge and dreams. \nA luminous impressionistic oil painting captures the melancholic beauty of an abandoned carnival, its faded carousel horses frozen mid-gallop beneath a sky of swirling lavender and gold. The wind carries fragments of forgotten laughter through the empty fairground, where scattered ticket stubs and crumbling banners whisper tales of joy long past. The thick, textured brushstrokes blend nostalgia with an eerie dreamlike quality, as if the carnival exists only in the echoes of memory. \nA surreal oil painting in the spirit of René Magritte, featuring a towering lighthouse that emits not light, but cascading waterfalls from its peak. The swirling sky, painted in deep midnight blues, is punctuated by glowing, crescent moons that defy gravity. A lone figure stands at the waters edge, gazing up in quiet contemplation, as if caught between wonder and the unknown. The paintings rich textures and luminous colors create an enigmatic, dreamlike landscape. \nA striking impressionistic oil painting, reminiscent of Van Gogh, portrays a lone traveler on a winding cobblestone path, their silhouette bathed in the golden glow of lantern-lit cherry blossoms. The petals swirl through the night air like glowing embers, blending with the deep, rhythmic strokes of a star-filled indigo sky. The scene captures a feeling of wistful solitude, as if the traveler is walking not only through the city, but through the fleeting nature of time itself."
},
"class_type": "Text Multiline",
"_meta": {
"title": "Text Multiline"
}
},
"203": {
"inputs": {
"string1": [
"289",
0
],
"string2": [
"293",
0
],
"delimiter": ", "
},
"class_type": "JoinStrings",
"_meta": {
"title": "Join Strings"
}
},
"208": {
"inputs": {
"file_path": "",
"dictionary_name": "[filename]",
"label": "TextBatch",
"mode": "automatic",
"index": 0,
"multiline_text": [
"191",
0
]
},
"class_type": "Text Load Line From File",
"_meta": {
"title": "Text Load Line From File"
}
},
"226": {
"inputs": {
"images": [
"8",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"246": {
"inputs": {
"value": 25
},
"class_type": "INTConstant",
"_meta": {
"title": "Steps"
}
},
"289": {
"inputs": {
"group_mode": true,
"toggle_trigger_words": [
{
"text": "bo-exposure",
"active": true
},
{
"text": "__dummy_item__",
"active": false,
"_isDummy": true
},
{
"text": "__dummy_item__",
"active": false,
"_isDummy": true
}
],
"orinalMessage": "bo-exposure",
"trigger_words": [
"299",
2
]
},
"class_type": "TriggerWord Toggle (LoraManager)",
"_meta": {
"title": "TriggerWord Toggle (LoraManager)"
}
},
"293": {
"inputs": {
"input": 1,
"text1": [
"208",
0
],
"text2": [
"151",
0
]
},
"class_type": "easy textSwitch",
"_meta": {
"title": "Text Switch"
}
},
"297": {
"inputs": {
"text": ""
},
"class_type": "Lora Stacker (LoraManager)",
"_meta": {
"title": "Lora Stacker (LoraManager)"
}
},
"298": {
"inputs": {
"anything": [
"297",
0
]
},
"class_type": "easy showAnything",
"_meta": {
"title": "Show Any"
}
},
"299": {
"inputs": {
"text": "<lora:boFLUX Double Exposure Magic v2:0.8> <lora:FluxDFaeTasticDetails:0.65>",
"loras": [
{
"name": "boFLUX Double Exposure Magic v2",
"strength": 0.8,
"active": true
},
{
"name": "FluxDFaeTasticDetails",
"strength": 0.65,
"active": true
},
{
"name": "__dummy_item1__",
"strength": 0,
"active": false,
"_isDummy": true
},
{
"name": "__dummy_item2__",
"strength": 0,
"active": false,
"_isDummy": true
}
],
"model": [
"65",
0
],
"clip": [
"11",
0
],
"lora_stack": [
"297",
0
]
},
"class_type": "Lora Loader (LoraManager)",
"_meta": {
"title": "Lora Loader (LoraManager)"
}
},
"301": {
"inputs": {
"string": "A hyper-realistic close-up portrait of a young woman with shoulder-length black hair styled in edgy, futuristic layers, adorned with glowing tips. She wears mecha eyewear with a neon green visor that transitions into iridescent shades of teal and gold. The frame is sleek, with angular edges and fine mechanical detailing. Her expression is fierce and confident, with flawless skin highlighted by the neon reflections. She wears a high-tech bodysuit with integrated LED lines and metallic panels. The background depicts a hazy rendition of The Great Wave off Kanagawa by Hokusai, its powerful waves blending seamlessly with the neon tones, amplifying her intense, defiant aura.",
"strip_newlines": true
},
"class_type": "StringConstantMultiline",
"_meta": {
"title": "String Constant Multiline"
}
}
}

82
refs/recipe.json Normal file
View File

@@ -0,0 +1,82 @@
{
"id": "0448c06d-de1b-46ab-975c-c5aa60d90dbc",
"file_path": "D:/Workspace/ComfyUI/models/loras/recipes/0448c06d-de1b-46ab-975c-c5aa60d90dbc.jpg",
"title": "a mysterious, steampunk-inspired character standing in a dramatic pose",
"modified": 1741837612.3931093,
"created_date": 1741492786.5581934,
"base_model": "Flux.1 D",
"loras": [
{
"file_name": "ChronoDivinitiesFlux_r1",
"hash": "ddbc5abd00db46ad464f5e3ca85f8f7121bc14b594d6785f441d9b002fffe66a",
"strength": 0.8,
"modelVersionId": 1438879,
"modelName": "Chrono Divinities - By HailoKnight",
"modelVersionName": "Flux"
},
{
"file_name": "flux.1_lora_flyway_ink-dynamic",
"hash": "4b4f3b469a0d5d3a04a46886abfa33daa37a905db070ccfbd10b345c6fb00eff",
"strength": 0.2,
"modelVersionId": 914935,
"modelName": "Ink-style",
"modelVersionName": "ink-dynamic"
},
{
"file_name": "ck-painterly-fantasy-000017",
"hash": "48c67064e2936aec342580a2a729d91d75eb818e45ecf993b9650cc66c94c420",
"strength": 0.2,
"modelVersionId": 1189379,
"modelName": "Painterly Fantasy by ChronoKnight - [FLUX & IL]",
"modelVersionName": "FLUX"
},
{
"file_name": "RetroAnimeFluxV1",
"hash": "8f43c31b6c3238ac44195c970d511d759c5893bddd00f59f42b8fe51e8e76fa0",
"strength": 0.8,
"modelVersionId": 806265,
"modelName": "Retro Anime Flux - Style",
"modelVersionName": "v1.0"
},
{
"file_name": "Mezzotint_Artstyle_for_Flux_-_by_Ethanar",
"hash": "e6961502769123bf23a66c5c5298d76264fd6b9610f018319a0ccb091bfc308e",
"strength": 0.2,
"modelVersionId": 757030,
"modelName": "Mezzotint Artstyle for Flux - by Ethanar",
"modelVersionName": "V1"
},
{
"file_name": "FluxMythG0thicL1nes",
"hash": "ecb03595de62bd6183a0dd2b38bea35669fd4d509f4bbae5aa0572cfb7ef4279",
"strength": 0.4,
"modelVersionId": 1202162,
"modelName": "Velvet's Mythic Fantasy Styles | Flux + Pony + illustrious",
"modelVersionName": "Flux Gothic Lines"
},
{
"file_name": "Elden_Ring_-_Yoshitaka_Amano",
"hash": "c660c4c55320be7206cb6a917c59d8da3953cc07169fe10bda833a54ec0024f9",
"strength": 0.75,
"modelVersionId": 746484,
"modelName": "Elden Ring - Yoshitaka Amano",
"modelVersionName": "V1"
}
],
"gen_params": {
"prompt": "a mysterious, steampunk-inspired character standing in a dramatic pose. The character is dressed in a long, intricately detailed dark coat with ornate patterns, a wide-brimmed hat, and leather boots. The face is partially obscured by the hat's shadow, adding to the enigmatic aura. The background showcases a large, antique clock with Roman numerals, surrounded by dynamic lightning and ethereal white birds, enhancing the fantastical atmosphere. The color palette is dominated by dark tones with striking contrasts of white and blue lightning, creating a sense of tension and energy. The overall composition is vertical, with the character centrally positioned, exuding a sense of power and mystery. hkchrono",
"negative_prompt": "",
"checkpoint": {
"type": "checkpoint",
"modelVersionId": 691639,
"modelName": "FLUX",
"modelVersionName": "Dev"
},
"steps": "30",
"sampler": "Undefined",
"cfg_scale": "3.5",
"seed": "1472903449",
"size": "832x1216",
"clip_skip": "2"
}
}

294
refs/test_output.txt Normal file
View File

@@ -0,0 +1,294 @@
Loading workflow from D:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\refs\prompt.json
Expected output from D:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\refs\output.json
Expected output:
{
"loras": "<lora:ck-neon-retrowave-IL-000012:0.8> <lora:aorunIllstrious:1> <lora:ck-shadow-circuit-IL-000012:0.78> <lora:MoriiMee_Gothic_Niji_Style_Illustrious_r1:0.45> <lora:ck-nc-cyberpunk-IL-000011:0.4>",
"gen_params": {
"prompt": "in the style of ck-rw, aorun, scales, makeup, bare shoulders, pointy ears, dress, claws, in the style of cksc, artist:moriimee, in the style of cknc, masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing",
"negative_prompt": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw",
"steps": "20",
"sampler": "euler_ancestral",
"cfg_scale": "8",
"seed": "241",
"size": "832x1216",
"clip_skip": "2"
}
}
Sampler node:
{
"inputs": {
"seed": 241,
"steps": 20,
"cfg": 8,
"sampler_name": "euler_ancestral",
"scheduler": "karras",
"denoise": 1,
"model": [
"56",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
}
Extracted parameters:
seed: 241
steps: 20
cfg_scale: 8
Positive node (6):
{
"inputs": {
"text": [
"22",
0
],
"clip": [
"56",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
}
Text node (22):
{
"inputs": {
"string1": [
"55",
0
],
"string2": [
"21",
0
],
"delimiter": ", "
},
"class_type": "JoinStrings",
"_meta": {
"title": "Join Strings"
}
}
String1 node (55):
{
"inputs": {
"group_mode": true,
"toggle_trigger_words": [
{
"text": "in the style of ck-rw",
"active": true
},
{
"text": "aorun, scales, makeup, bare shoulders, pointy ears",
"active": true
},
{
"text": "dress",
"active": true
},
{
"text": "claws",
"active": true
},
{
"text": "in the style of cksc",
"active": true
},
{
"text": "artist:moriimee",
"active": true
},
{
"text": "in the style of cknc",
"active": true
},
{
"text": "__dummy_item__",
"active": false,
"_isDummy": true
},
{
"text": "__dummy_item__",
"active": false,
"_isDummy": true
}
],
"orinalMessage": "in the style of ck-rw,, aorun, scales, makeup, bare shoulders, pointy ears,, dress,, claws,, in the style of cksc,, artist:moriimee,, in the style of cknc",
"trigger_words": [
"56",
2
]
},
"class_type": "TriggerWord Toggle (LoraManager)",
"_meta": {
"title": "TriggerWord Toggle (LoraManager)"
}
}
String2 node (21):
{
"inputs": {
"string": "masterpiece, best quality, good quality, very aesthetic, absurdres, newest, 8K, depth of field, focused subject, close up, stylized, in gold and neon shades, wabi sabi, 1girl, rainbow angel wings, looking at viewer, dynamic angle, from below, from side, relaxing",
"strip_newlines": false
},
"class_type": "StringConstantMultiline",
"_meta": {
"title": "positive"
}
}
Negative node (7):
{
"inputs": {
"text": "bad quality, worst quality, worst detail, sketch ,signature, watermark, patreon logo, nsfw",
"clip": [
"56",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
}
LoRA nodes (3):
LoRA node 56:
{
"inputs": {
"text": "<lora:ck-shadow-circuit-IL-000012:0.78> <lora:MoriiMee_Gothic_Niji_Style_Illustrious_r1:0.45> <lora:ck-nc-cyberpunk-IL-000011:0.4>",
"loras": [
{
"name": "ck-shadow-circuit-IL-000012",
"strength": 0.78,
"active": true
},
{
"name": "MoriiMee_Gothic_Niji_Style_Illustrious_r1",
"strength": 0.45,
"active": true
},
{
"name": "ck-nc-cyberpunk-IL-000011",
"strength": 0.4,
"active": true
},
{
"name": "__dummy_item1__",
"strength": 0,
"active": false,
"_isDummy": true
},
{
"name": "__dummy_item2__",
"strength": 0,
"active": false,
"_isDummy": true
}
],
"model": [
"4",
0
],
"clip": [
"4",
1
],
"lora_stack": [
"57",
0
]
},
"class_type": "Lora Loader (LoraManager)",
"_meta": {
"title": "Lora Loader (LoraManager)"
}
}
LoRA node 57:
{
"inputs": {
"text": "<lora:aorunIllstrious:1>",
"loras": [
{
"name": "aorunIllstrious",
"strength": "0.90",
"active": true
},
{
"name": "__dummy_item1__",
"strength": 0,
"active": false,
"_isDummy": true
},
{
"name": "__dummy_item2__",
"strength": 0,
"active": false,
"_isDummy": true
}
],
"lora_stack": [
"59",
0
]
},
"class_type": "Lora Stacker (LoraManager)",
"_meta": {
"title": "Lora Stacker (LoraManager)"
}
}
LoRA node 59:
{
"inputs": {
"text": "<lora:ck-neon-retrowave-IL-000012:0.8>",
"loras": [
{
"name": "ck-neon-retrowave-IL-000012",
"strength": 0.8,
"active": true
},
{
"name": "__dummy_item1__",
"strength": 0,
"active": false,
"_isDummy": true
},
{
"name": "__dummy_item2__",
"strength": 0,
"active": false,
"_isDummy": true
}
]
},
"class_type": "Lora Stacker (LoraManager)",
"_meta": {
"title": "Lora Stacker (LoraManager)"
}
}
Test completed.

View File

@@ -1,4 +1,9 @@
aiohttp
jinja2
safetensors
watchdog
watchdog
beautifulsoup4
piexif
Pillow
olefile
requests

View File

@@ -1,6 +1,8 @@
/* 强制显示滚动条,防止页面跳动 */
html {
overflow-y: scroll;
html, body {
margin: 0;
padding: 0;
height: 100%;
overflow: hidden; /* Disable default scrolling */
}
/* 针对Firefox */
@@ -16,6 +18,7 @@ html {
::-webkit-scrollbar-track {
background: transparent;
margin-top: 0;
}
::-webkit-scrollbar-thumb {
@@ -35,6 +38,7 @@ html {
--lora-border: oklch(90% 0.02 256 / 0.15);
--lora-text: oklch(95% 0.02 256);
--lora-error: oklch(75% 0.32 29);
--lora-warning: oklch(75% 0.25 80); /* Add warning color for deleted LoRAs */
/* Spacing Scale */
--space-1: calc(8px * 1);
@@ -43,6 +47,7 @@ html {
/* Z-index Scale */
--z-base: 10;
--z-header: 100;
--z-modal: 1000;
--z-overlay: 2000;
@@ -64,11 +69,14 @@ html {
--lora-surface: oklch(25% 0.02 256 / 0.98);
--lora-border: oklch(90% 0.02 256 / 0.15);
--lora-text: oklch(98% 0.02 256);
--lora-warning: oklch(75% 0.25 80); /* Add warning color for dark theme too */
}
body {
margin: 0;
font-family: 'Segoe UI', sans-serif;
background: var(--bg-color);
color: var(--text-color);
display: flex;
flex-direction: column;
padding-top: 0; /* Remove the padding-top */
}

View File

@@ -262,6 +262,83 @@
background: var(--lora-accent);
}
/* NSFW Level Selector */
.nsfw-level-selector {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-base);
padding: 16px;
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
z-index: var(--z-modal);
width: 300px;
display: none;
}
.nsfw-level-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
.nsfw-level-header h3 {
margin: 0;
font-size: 16px;
font-weight: 500;
}
.close-nsfw-selector {
background: transparent;
border: none;
color: var(--text-color);
cursor: pointer;
padding: 4px;
border-radius: var(--border-radius-xs);
}
.close-nsfw-selector:hover {
background: var(--border-color);
}
.current-level {
margin-bottom: 12px;
padding: 8px;
background: var(--bg-color);
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
}
.nsfw-level-options {
display: flex;
flex-wrap: wrap;
gap: 8px;
}
.nsfw-level-btn {
flex: 1 0 calc(33% - 8px);
padding: 8px;
border-radius: var(--border-radius-xs);
background: var(--bg-color);
border: 1px solid var(--border-color);
color: var(--text-color);
cursor: pointer;
transition: all 0.2s ease;
}
.nsfw-level-btn:hover {
background: var(--lora-border);
}
.nsfw-level-btn.active {
background: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
}
/* Mobile optimizations */
@media (max-width: 768px) {
.selected-thumbnails-strip {

View File

@@ -20,6 +20,10 @@
aspect-ratio: 896/1152;
max-width: 260px; /* Adjusted from 320px to fit 5 cards */
margin: 0 auto;
cursor: pointer; /* Added from recipe-card */
display: flex; /* Added from recipe-card */
flex-direction: column; /* Added from recipe-card */
overflow: hidden; /* Add overflow hidden to contain children */
}
.lora-card:hover {
@@ -47,9 +51,11 @@
.card-preview {
position: relative;
width: 100%;
height: 100%;
height: 100%; /* This should work with aspect-ratio on parent */
border-radius: var(--border-radius-base);
overflow: hidden;
flex-shrink: 0; /* Prevent shrinking */
min-height: 0; /* Fix for potential flexbox sizing issue in Firefox */
}
.card-preview img,
@@ -57,6 +63,97 @@
width: 100%;
height: 100%;
object-fit: cover;
object-position: center top; /* Align the top of the image with the top of the container */
}
/* NSFW Content Blur */
.card-preview.blurred img,
.card-preview.blurred video {
filter: blur(25px);
}
.nsfw-overlay {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
display: flex;
align-items: center;
justify-content: center;
z-index: 2;
pointer-events: none;
}
.nsfw-warning {
text-align: center;
color: white;
background: rgba(0, 0, 0, 0.6);
padding: var(--space-2);
border-radius: var(--border-radius-base);
backdrop-filter: blur(4px);
max-width: 80%;
pointer-events: auto;
}
.nsfw-warning p {
margin: 0 0 var(--space-1);
font-weight: bold;
font-size: 1.1em;
text-shadow: 1px 1px 1px rgba(0, 0, 0, 0.5);
}
.toggle-blur-btn {
position: absolute;
left: var(--space-1);
top: var(--space-1);
background: rgba(0, 0, 0, 0.5);
border: none;
border-radius: 50%;
width: 24px;
height: 24px;
display: flex;
align-items: center;
justify-content: center;
color: white;
cursor: pointer;
z-index: 3;
transition: background-color 0.2s, transform 0.2s;
}
.toggle-blur-btn:hover {
background: rgba(0, 0, 0, 0.7);
transform: scale(1.1);
}
.toggle-blur-btn i {
font-size: 0.9em;
}
.show-content-btn {
background: var(--lora-accent);
color: white;
border: none;
border-radius: var(--border-radius-xs);
padding: 4px var(--space-1);
cursor: pointer;
font-size: 0.9em;
transition: background-color 0.2s, transform 0.2s;
}
.show-content-btn:hover {
background: oklch(58% 0.28 256);
transform: scale(1.05);
}
/* Adjust base model label positioning when toggle button is present */
.base-model-label.with-toggle {
margin-left: 28px; /* Make room for the toggle button */
}
/* Ensure card actions remain clickable */
.card-header .card-actions {
z-index: 3;
}
.card-footer {
@@ -183,4 +280,55 @@
border-radius: var(--border-radius-xs);
backdrop-filter: blur(2px);
font-size: 0.85em;
}
/* Recipe specific elements - migrated from recipe-card.css */
.recipe-indicator {
position: absolute;
top: 6px;
left: 8px;
width: 24px;
height: 24px;
background: var(--lora-primary);
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
color: white;
font-weight: bold;
z-index: 2;
}
.base-model-wrapper {
display: flex;
align-items: center;
gap: 8px;
margin-left: 32px; /* For accommodating the recipe indicator */
}
.lora-count {
display: flex;
align-items: center;
gap: 4px;
background: rgba(255, 255, 255, 0.2);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
position: relative;
}
.lora-count.ready {
background: rgba(46, 204, 113, 0.3);
}
.lora-count.missing {
background: rgba(231, 76, 60, 0.3);
}
.placeholder-message {
grid-column: 1 / -1;
text-align: center;
padding: 2rem;
background: var(--lora-surface-alt);
border-radius: var(--border-radius-base);
}

View File

@@ -23,12 +23,6 @@
color: var(--text-color);
}
.error-message {
color: var(--lora-error);
font-size: 0.9em;
margin-top: 4px;
}
/* Version List Styles */
.version-list {
max-height: 400px;
@@ -104,6 +98,7 @@
.version-info {
display: flex;
flex-wrap: wrap;
flex-direction: row !important;
gap: 8px;
align-items: center;
font-size: 0.9em;
@@ -130,50 +125,6 @@
gap: 4px;
}
/* Local Version Badge */
.local-badge {
display: inline-flex;
align-items: center;
background: var(--lora-accent);
color: var(--lora-text);
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
position: relative;
}
.local-badge i {
margin-right: 4px;
font-size: 0.9em;
}
.local-path {
display: none;
position: absolute;
top: 100%;
right: 0;
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: var(--space-1);
margin-top: 4px;
font-size: 0.9em;
color: var(--text-color);
white-space: normal;
word-break: break-all;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: 1;
min-width: 200px;
max-width: 300px;
}
.local-badge:hover .local-path {
display: block;
}
/* Folder Browser Styles */
.folder-browser {
border: 1px solid var(--border-color);
@@ -251,47 +202,4 @@
.version-item.exists-locally {
background: oklch(var(--lora-accent) / 0.05);
border-left: 4px solid var(--lora-accent);
}
.local-badge {
display: inline-flex;
align-items: center;
background: var(--lora-accent);
color: var(--lora-text);
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
position: relative;
}
.local-badge i {
margin-right: 4px;
font-size: 0.9em;
}
.local-path {
display: none;
position: absolute;
top: 100%;
right: 0;
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: var(--space-1);
margin-top: 4px;
font-size: 0.9em;
color: var(--text-color);
white-space: normal;
word-break: break-all;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: 1;
min-width: 200px;
max-width: 300px;
}
.local-badge:hover .local-path {
display: block;
}
}

View File

@@ -0,0 +1,84 @@
/* Filter indicator styles */
.control-group .filter-active {
display: flex;
align-items: center;
gap: 6px;
background: var(--lora-accent);
color: white;
border-radius: var(--border-radius-xs);
padding: 4px 10px;
transition: all 0.2s ease;
border: 1px solid var(--lora-accent);
cursor: pointer;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
font-size: 0.85em;
}
.control-group .filter-active:hover {
opacity: 0.92;
transform: translateY(-1px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.15);
}
.control-group .filter-active:active {
transform: translateY(0);
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
.control-group .filter-active i.fa-filter {
font-size: 0.9em;
margin-right: 2px;
opacity: 0.9;
}
.control-group .filter-active i.clear-filter {
transition: transform 0.2s ease, background-color 0.2s ease;
cursor: pointer;
margin-left: 4px;
border-radius: 50%;
font-size: 0.85em;
width: 16px;
height: 16px;
display: flex;
align-items: center;
justify-content: center;
}
.control-group .filter-active i.clear-filter:hover {
transform: scale(1.2);
background-color: rgba(255, 255, 255, 0.2);
}
.control-group .filter-active .lora-name {
font-weight: 500;
max-width: 150px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
/* Animation for filter indicator */
@keyframes filterPulse {
0% { transform: scale(1); box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); }
50% { transform: scale(1.03); box-shadow: 0 3px 8px rgba(0, 0, 0, 0.15); }
100% { transform: scale(1); box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); }
}
.filter-active.animate {
animation: filterPulse 0.6s ease;
}
/* Make responsive */
@media (max-width: 576px) {
.control-group .filter-active {
padding: 6px 10px;
}
.control-group .filter-active .lora-name {
max-width: 100px;
}
.control-group .filter-active:hover {
transform: none; /* Disable hover effects on mobile */
}
}

View File

@@ -0,0 +1,177 @@
.app-header {
background: var(--card-bg);
border-bottom: 1px solid var(--border-color);
position: fixed;
top: 0;
z-index: var(--z-header);
height: 48px; /* Reduced height */
width: 100%;
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
}
.header-container {
max-width: 1400px;
margin: 0 auto;
padding: 0 15px;
display: flex;
align-items: center;
justify-content: space-between;
height: 100%;
}
/* Logo and title styling */
.header-branding {
display: flex;
align-items: center;
flex-shrink: 0;
}
.logo-link {
display: flex;
align-items: center;
text-decoration: none;
color: var(--text-color);
gap: 8px;
}
.app-logo {
width: 24px;
height: 24px;
}
.app-title {
font-size: 1rem;
font-weight: 600;
margin: 0;
}
/* Navigation styling */
.main-nav {
display: flex;
gap: 0.5rem;
flex-shrink: 0;
margin-right: 1rem;
}
.nav-item {
padding: 0.25rem 0.75rem;
border-radius: var(--border-radius-xs);
color: var(--text-color);
text-decoration: none;
display: flex;
align-items: center;
gap: 0.5rem;
transition: all 0.2s ease;
font-size: 0.9rem;
}
.nav-item:hover {
background-color: var(--lora-surface-hover, oklch(95% 0.02 256));
}
.nav-item.active {
background-color: var(--lora-accent);
color: white;
}
/* Header search */
.header-search {
flex: 1;
max-width: 400px;
margin: 0 1rem;
}
/* Header controls (formerly corner controls) */
.header-controls {
display: flex;
align-items: center;
gap: 8px;
flex-shrink: 0;
}
.header-controls > div {
width: 32px;
height: 32px;
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var(--text-color);
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
transition: all 0.2s ease;
position: relative;
}
.header-controls > div:hover {
background: var(--lora-accent);
color: white;
transform: translateY(-2px);
}
.theme-toggle {
position: relative; /* Ensure relative positioning for the container */
}
.theme-toggle .light-icon,
.theme-toggle .dark-icon {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%); /* Center perfectly */
opacity: 0;
transition: opacity 0.3s ease;
}
.theme-toggle .dark-icon {
opacity: 1;
}
[data-theme="light"] .theme-toggle .light-icon {
opacity: 1;
}
[data-theme="light"] .theme-toggle .dark-icon {
opacity: 0;
}
/* Mobile adjustments */
@media (max-width: 768px) {
.app-title {
display: none; /* Hide text title on mobile */
}
.header-controls {
gap: 4px;
}
.header-controls > div {
width: 28px;
height: 28px;
}
.header-search {
max-width: none;
margin: 0 0.5rem;
}
.main-nav {
margin-right: 0.5rem;
}
}
/* For very small screens */
@media (max-width: 600px) {
.header-container {
padding: 0 8px;
}
.main-nav {
display: none; /* Hide navigation on very small screens */
}
.header-search {
flex: 1;
}
}

View File

@@ -0,0 +1,735 @@
/* Import Modal Styles */
.import-step {
margin: var(--space-2) 0;
transition: none !important; /* Disable any transitions that might affect display */
}
/* Import Mode Toggle */
.import-mode-toggle {
display: flex;
margin-bottom: var(--space-3);
border-radius: var(--border-radius-sm);
overflow: hidden;
border: 1px solid var(--border-color);
}
.toggle-btn {
flex: 1;
padding: 10px 16px;
background: var(--bg-color);
color: var(--text-color);
border: none;
cursor: pointer;
font-weight: 500;
display: flex;
align-items: center;
justify-content: center;
gap: 8px;
transition: background-color 0.2s, color 0.2s;
}
.toggle-btn:first-child {
border-right: 1px solid var(--border-color);
}
.toggle-btn.active {
background: var(--lora-accent);
color: var(--lora-text);
}
.toggle-btn:hover:not(.active) {
background: var(--lora-surface);
}
.import-section {
margin-bottom: var(--space-3);
}
/* File Input Styles */
.file-input-wrapper {
position: relative;
margin-bottom: var(--space-1);
}
.file-input-wrapper input[type="file"] {
position: absolute;
width: 100%;
height: 100%;
opacity: 0;
cursor: pointer;
z-index: 2;
}
.file-input-button {
display: flex;
align-items: center;
justify-content: center;
gap: 8px;
padding: 10px 16px;
background: var(--lora-accent);
color: var(--lora-text);
border-radius: var(--border-radius-xs);
font-weight: 500;
cursor: pointer;
transition: background-color 0.2s;
}
.file-input-button:hover {
background: oklch(from var(--lora-accent) l c h / 0.9);
}
.file-input-wrapper:hover .file-input-button {
background: oklch(from var(--lora-accent) l c h / 0.9);
}
/* Recipe Details Layout */
.recipe-details-layout {
display: grid;
grid-template-columns: 200px 1fr;
gap: var(--space-3);
margin-bottom: var(--space-3);
}
.recipe-image-container {
width: 100%;
height: 200px;
border-radius: var(--border-radius-sm);
overflow: hidden;
background: var(--lora-surface);
border: 1px solid var(--border-color);
}
.recipe-image {
width: 100%;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
}
.recipe-image img {
max-width: 100%;
max-height: 100%;
object-fit: contain;
}
.recipe-form-container {
display: flex;
flex-direction: column;
gap: var(--space-2);
}
/* Tags Input Styles */
.tag-input-container {
display: flex;
gap: 8px;
margin-bottom: var(--space-1);
}
.tag-input-container input {
flex: 1;
padding: 8px;
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
background: var(--bg-color);
color: var(--text-color);
}
.tags-container {
display: flex;
flex-wrap: wrap;
gap: 8px;
margin-top: var(--space-1);
min-height: 32px;
}
.recipe-tag {
display: inline-flex;
align-items: center;
gap: 6px;
padding: 4px 10px;
background: var(--lora-surface);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
font-size: 0.9em;
}
.recipe-tag i {
cursor: pointer;
opacity: 0.7;
transition: opacity 0.2s;
}
.recipe-tag i:hover {
opacity: 1;
color: var(--lora-error);
}
.empty-tags {
color: var(--text-color);
opacity: 0.6;
font-size: 0.9em;
font-style: italic;
}
/* LoRAs List Styles */
.loras-list {
max-height: 300px;
overflow-y: auto;
margin: var(--space-2) 0;
display: flex;
flex-direction: column;
gap: 12px;
padding: 1px;
}
.lora-item {
display: flex;
gap: var(--space-2);
padding: var(--space-2);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
background: var(--bg-color);
margin: 1px;
}
.lora-item.exists-locally {
background: oklch(var(--lora-accent) / 0.05);
border-left: 4px solid var(--lora-accent);
}
.lora-item.missing-locally {
border-left: 4px solid var(--lora-error);
}
.lora-item.is-deleted {
background: oklch(var(--lora-warning) / 0.05);
border-left: 4px solid var(--lora-warning);
}
.lora-item.is-early-access {
background: rgba(0, 184, 122, 0.05);
border-left: 4px solid #00B87A;
}
.lora-item.missing-locally {
border-left: 4px solid var(--lora-error);
}
.lora-thumbnail {
width: 80px;
height: 80px;
flex-shrink: 0;
border-radius: var(--border-radius-xs);
overflow: hidden;
background: var(--bg-color);
}
.lora-thumbnail img {
width: 100%;
height: 100%;
object-fit: cover;
}
.lora-content {
display: flex;
flex-direction: column;
gap: 8px;
flex: 1;
min-width: 0;
}
.lora-header {
display: flex;
align-items: flex-start;
justify-content: space-between;
gap: var(--space-2);
}
.lora-content h3 {
margin: 0;
font-size: 1.1em;
color: var(--text-color);
flex: 1;
}
.lora-info {
display: flex;
flex-wrap: wrap;
gap: 8px;
align-items: center;
font-size: 0.9em;
}
.lora-info .base-model {
background: oklch(var(--lora-accent) / 0.1);
color: var(--lora-accent);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
}
.lora-version {
font-size: 0.9em;
color: var(--text-color);
opacity: 0.7;
}
.weight-badge {
background: var(--lora-surface);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
}
/* Missing LoRAs List */
.missing-loras-list {
max-height: 200px;
overflow-y: auto;
margin: var(--space-2) 0;
display: flex;
flex-direction: column;
gap: 8px;
padding: var(--space-1);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
background: var(--lora-surface);
}
.missing-lora-item {
display: flex;
gap: var(--space-2);
padding: var(--space-1);
border-bottom: 1px solid var(--border-color);
}
.missing-lora-item:last-child {
border-bottom: none;
}
.missing-lora-item.is-early-access {
background: rgba(0, 184, 122, 0.05);
border-left: 3px solid #00B87A;
padding-left: 10px;
}
.missing-badge {
display: inline-flex;
align-items: center;
background: var(--lora-error);
color: white;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
}
.missing-badge i {
margin-right: 4px;
font-size: 0.9em;
}
.lora-count-info {
font-size: 0.85em;
opacity: 0.8;
font-weight: normal;
margin-left: 8px;
}
/* Location Selection Styles */
.location-selection {
margin: var(--space-2) 0;
padding: var(--space-2);
background: var(--lora-surface);
border-radius: var(--border-radius-sm);
}
/* Reuse folder browser and path preview styles from download-modal.css */
.folder-browser {
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: var(--space-1);
max-height: 200px;
overflow-y: auto;
}
.folder-item {
padding: 8px;
cursor: pointer;
border-radius: var(--border-radius-xs);
transition: background-color 0.2s;
}
.folder-item:hover {
background: var(--lora-surface);
}
.folder-item.selected {
background: oklch(var(--lora-accent) / 0.1);
border: 1px solid var(--lora-accent);
}
.path-preview {
margin-bottom: var(--space-3);
padding: var(--space-2);
background: var(--bg-color);
border-radius: var(--border-radius-sm);
border: 1px dashed var(--border-color);
}
.path-preview label {
display: block;
margin-bottom: 8px;
color: var(--text-color);
font-size: 0.9em;
opacity: 0.8;
}
.path-display {
padding: var(--space-1);
color: var(--text-color);
font-family: monospace;
font-size: 0.9em;
line-height: 1.4;
white-space: pre-wrap;
word-break: break-all;
opacity: 0.85;
background: var(--lora-surface);
border-radius: var(--border-radius-xs);
}
/* Input Group Styles */
.input-group {
margin-bottom: var(--space-2);
}
.input-with-button {
display: flex;
gap: 8px;
}
.input-with-button input {
flex: 1;
min-width: 0;
}
.input-with-button button {
flex-shrink: 0;
white-space: nowrap;
padding: 8px 16px;
background: var(--lora-accent);
color: var(--lora-text);
border: none;
border-radius: var(--border-radius-xs);
cursor: pointer;
transition: background-color 0.2s;
}
.input-with-button button:hover {
background: oklch(from var(--lora-accent) l c h / 0.9);
}
.input-group label {
display: block;
margin-bottom: 8px;
color: var(--text-color);
}
.input-group input,
.input-group select {
width: 100%;
padding: 8px;
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
background: var(--bg-color);
color: var(--text-color);
}
/* Dark theme adjustments */
[data-theme="dark"] .lora-item {
background: var(--lora-surface);
}
[data-theme="dark"] .recipe-tag {
background: var(--card-bg);
}
/* Responsive adjustments */
@media (max-width: 768px) {
.recipe-details-layout {
grid-template-columns: 1fr;
}
.recipe-image-container {
height: 150px;
}
}
/* Size badge for LoRA items */
.size-badge {
background: var(--lora-surface);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
color: var(--text-color);
opacity: 0.8;
}
/* Improved Missing LoRAs summary section */
.missing-loras-summary {
margin-bottom: var(--space-3);
padding: var(--space-2);
background: var(--bg-color);
border-radius: var(--border-radius-sm);
border: 1px solid var(--border-color);
}
.summary-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 0;
}
.summary-header h3 {
margin: 0;
font-size: 1.1em;
color: var(--text-color);
display: flex;
align-items: center;
gap: var(--space-1);
}
.lora-count-badge {
font-size: 0.9em;
font-weight: normal;
opacity: 0.7;
}
.total-size-badge {
font-size: 0.85em;
font-weight: normal;
background: var(--lora-surface);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
margin-left: var(--space-1);
}
.toggle-list-btn {
background: none;
border: none;
cursor: pointer;
color: var(--text-color);
padding: 4px 8px;
border-radius: var(--border-radius-xs);
}
.toggle-list-btn:hover {
background: var(--lora-surface);
}
.missing-loras-list {
max-height: 200px;
overflow-y: auto;
transition: max-height 0.3s ease, margin-top 0.3s ease, padding-top 0.3s ease;
margin-top: 0;
padding-top: 0;
}
.missing-loras-list.collapsed {
max-height: 0;
overflow: hidden;
padding-top: 0;
}
.missing-loras-list:not(.collapsed) {
margin-top: var(--space-1);
padding-top: var(--space-1);
border-top: 1px solid var(--border-color);
}
.missing-lora-item {
display: flex;
justify-content: space-between;
align-items: center;
padding: 8px;
border-bottom: 1px solid var(--border-color);
}
.missing-lora-item:last-child {
border-bottom: none;
}
.missing-lora-info {
display: flex;
flex-direction: column;
gap: 4px;
}
.missing-lora-name {
font-weight: 500;
}
.lora-base-model {
font-size: 0.85em;
color: var(--lora-accent);
background: oklch(var(--lora-accent) / 0.1);
padding: 2px 6px;
border-radius: var(--border-radius-xs);
display: inline-block;
}
.missing-lora-size {
font-size: 0.9em;
color: var(--text-color);
opacity: 0.8;
}
/* Recipe name input select-all behavior */
#recipeName:focus {
outline: 2px solid var(--lora-accent);
}
/* Prevent layout shift with scrollbar */
.modal-content {
overflow-y: scroll; /* Always show scrollbar */
scrollbar-gutter: stable; /* Reserve space for scrollbar */
}
/* For browsers that don't support scrollbar-gutter */
@supports not (scrollbar-gutter: stable) {
.modal-content {
padding-right: calc(var(--space-2) + var(--scrollbar-width)); /* Add extra padding for scrollbar */
}
}
/* Deleted LoRA styles - Fix layout issues */
.lora-item.is-deleted {
background: oklch(var(--lora-warning) / 0.05);
border-left: 4px solid var(--lora-warning);
}
.deleted-badge {
display: inline-flex;
align-items: center;
background: var(--lora-warning);
color: white;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
}
.deleted-badge i {
margin-right: 4px;
font-size: 0.9em;
}
.exclude-lora-checkbox {
display: none;
}
/* Deleted LoRAs warning - redesigned to not interfere with modal buttons */
.deleted-loras-warning {
display: flex;
align-items: flex-start;
gap: 12px;
padding: 12px 16px;
background: oklch(var(--lora-warning) / 0.1);
border: 1px solid var(--lora-warning);
border-radius: var(--border-radius-sm);
color: var(--text-color);
margin-bottom: var(--space-2);
}
.warning-icon {
color: var(--lora-warning);
font-size: 1.2em;
padding-top: 2px;
}
.warning-content {
flex: 1;
}
.warning-title {
font-weight: 600;
margin-bottom: 4px;
}
.warning-text {
font-size: 0.9em;
line-height: 1.4;
}
/* Remove the old warning-message styles that were causing layout issues */
.warning-message {
display: none; /* Hide the old style */
}
/* Update deleted badge to be more prominent */
.deleted-badge {
display: inline-flex;
align-items: center;
background: var(--lora-warning);
color: white;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
}
.deleted-badge i {
margin-right: 4px;
font-size: 0.9em;
}
/* Error message styling */
.error-message {
color: var(--lora-error);
font-size: 0.9em;
margin-top: 8px;
min-height: 20px; /* Ensure there's always space for the error message */
font-weight: 500;
}
.early-access-warning {
display: flex;
align-items: flex-start;
gap: 12px;
padding: 12px 16px;
background: rgba(0, 184, 122, 0.1);
border: 1px solid #00B87A;
border-radius: var(--border-radius-sm);
color: var(--text-color);
margin-bottom: var(--space-2);
}
/* Add special styling for early access badge in the missing loras list */
.missing-lora-item .early-access-badge {
padding: 2px 6px;
font-size: 0.75em;
margin-top: 4px;
display: inline-flex;
}
/* Specific styling for the early access warning container in import modal */
.early-access-warning .warning-icon {
color: #00B87A;
font-size: 1.2em;
}
.early-access-warning .warning-title {
font-weight: 600;
margin-bottom: 4px;
}
.early-access-warning .warning-text {
font-size: 0.9em;
line-height: 1.4;
}

View File

@@ -0,0 +1,359 @@
/* Initialization Component Styles */
.initialization-container {
width: 100%;
height: 100%;
padding: var(--space-3);
background: var(--lora-surface);
animation: fadeIn 0.3s ease-in-out;
display: flex;
align-items: center;
justify-content: center;
}
.initialization-content {
max-width: 800px;
width: 100%;
}
/* Override loading.css width for initialization component */
.initialization-container .loading-content {
width: 100%;
max-width: 100%;
background: transparent;
backdrop-filter: none;
border: none;
padding: 0;
}
.initialization-header {
text-align: center;
margin-bottom: var(--space-3);
}
.initialization-header h2 {
font-size: 1.8rem;
margin-bottom: var(--space-1);
color: var(--text-color);
}
.init-subtitle {
color: var(--text-color);
opacity: 0.8;
font-size: 1rem;
}
/* Progress Bar Styles specific to initialization */
.initialization-progress {
margin-bottom: var(--space-3);
}
/* Renamed container class */
.init-progress-container {
width: 100%; /* Use full width within its container */
height: 8px; /* Match height from previous .progress-bar-container */
background-color: var(--lora-border); /* Consistent background */
border-radius: 4px;
overflow: hidden;
margin: 0 auto var(--space-1); /* Center horizontally, add bottom margin */
}
/* Renamed progress bar class */
.init-progress-bar {
height: 100%;
/* Use a gradient consistent with the theme accent */
background: linear-gradient(90deg, var(--lora-accent) 0%, color-mix(in oklch, var(--lora-accent) 80%, transparent) 100%);
border-radius: 4px; /* Match container radius */
transition: width 0.3s ease;
width: 0%; /* Start at 0% */
}
/* Remove the old .progress-bar rule specific to initialization to avoid conflicts */
/* .progress-bar { ... } */
/* Progress Details */
.progress-details {
display: flex;
justify-content: space-between;
font-size: 0.9rem;
color: var(--text-color);
margin-top: var(--space-1);
padding: 0 2px;
}
#remainingTime {
font-style: italic;
color: var(--text-color);
opacity: 0.8;
}
/* Stages Styles */
.initialization-stages {
margin-bottom: var(--space-3);
}
.stage-item {
display: flex;
align-items: flex-start;
padding: var(--space-2);
border-radius: var(--border-radius-xs);
margin-bottom: var(--space-1);
transition: background-color 0.2s ease;
border: 1px solid transparent;
}
.stage-item.active {
background-color: rgba(var(--lora-accent), 0.1);
border-color: var(--lora-accent);
}
.stage-item.completed {
background-color: rgba(0, 150, 0, 0.05);
border-color: rgba(0, 150, 0, 0.2);
}
.stage-icon {
display: flex;
align-items: center;
justify-content: center;
width: 40px;
height: 40px;
background: var(--lora-border);
border-radius: 50%;
margin-right: var(--space-2);
}
.stage-item.active .stage-icon {
background: var(--lora-accent);
color: white;
}
.stage-item.completed .stage-icon {
background: rgb(0, 150, 0);
color: white;
}
.stage-content {
flex: 1;
}
.stage-content h4 {
margin: 0 0 5px 0;
font-size: 1rem;
color: var(--text-color);
}
.stage-details {
font-size: 0.85rem;
color: var(--text-color);
opacity: 0.8;
}
.stage-status {
display: flex;
align-items: center;
justify-content: center;
width: 24px;
height: 24px;
}
.stage-status.pending {
color: var(--text-color);
opacity: 0.5;
}
.stage-status.in-progress {
color: var(--lora-accent);
}
.stage-status.completed {
color: rgb(0, 150, 0);
}
/* Tips Container */
.tips-container {
margin-top: var(--space-3);
background: rgba(var(--lora-accent), 0.05);
border-radius: var(--border-radius-base);
padding: var(--space-2);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
}
.tips-header {
display: flex;
align-items: center;
margin-bottom: var(--space-2);
padding-bottom: var(--space-1);
border-bottom: 1px solid var(--lora-border);
}
.tips-header i {
margin-right: 10px;
color: var(--lora-accent);
font-size: 1.2rem;
}
.tips-header h3 {
font-size: 1.2rem;
margin: 0;
color: var(--text-color);
}
/* Tip Carousel with Images */
.tips-content {
position: relative;
}
.tip-carousel {
position: relative;
height: 160px;
overflow: hidden;
}
.tip-item {
position: absolute;
width: 100%;
height: 100%;
display: flex;
opacity: 0;
transition: opacity 0.5s ease;
padding: 0;
border-radius: var(--border-radius-sm);
overflow: hidden;
}
.tip-item.active {
opacity: 1;
}
.tip-image {
width: 40%;
overflow: hidden;
display: flex;
align-items: center;
justify-content: center;
background-color: var(--lora-border);
}
.tip-image img {
width: 100%;
height: 100%;
object-fit: cover;
}
.tip-text {
width: 60%;
padding: var(--space-2);
display: flex;
flex-direction: column;
justify-content: center;
}
.tip-text h4 {
margin: 0 0 var(--space-1) 0;
font-size: 1.1rem;
color: var(--text-color);
}
.tip-text p {
margin: 0;
line-height: 1.5;
font-size: 0.9rem;
color: var(--text-color);
}
.tip-navigation {
display: flex;
justify-content: center;
margin-top: var(--space-2);
}
.tip-dot {
width: 10px;
height: 10px;
border-radius: 50%;
background-color: var(--lora-border);
margin: 0 5px;
cursor: pointer;
transition: background-color 0.2s ease, transform 0.2s ease;
}
.tip-dot:hover {
transform: scale(1.2);
}
.tip-dot.active {
background-color: var(--lora-accent);
}
/* Animation */
@keyframes fadeIn {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
/* Different stage status animations */
@keyframes pulse {
0% {
transform: scale(1);
}
50% {
transform: scale(1.2);
}
100% {
transform: scale(1);
}
}
.stage-item.active .stage-icon i {
animation: pulse 1s infinite;
}
/* Responsive Adjustments */
@media (max-width: 768px) {
.initialization-container {
padding: var(--space-2);
}
.stage-item {
padding: var(--space-1);
}
.stage-icon {
width: 32px;
height: 32px;
min-width: 32px;
}
.tip-item {
flex-direction: column;
height: 220px;
}
.tip-image, .tip-text {
width: 100%;
}
.tip-image {
height: 120px;
}
.tip-carousel {
height: 220px;
}
}
@media (prefers-reduced-motion: reduce) {
.initialization-container,
.tip-item,
.tip-dot {
transition: none;
animation: none;
}
}

View File

@@ -56,6 +56,53 @@
transition: width 200ms ease-out;
}
/* Enhanced progress display */
.progress-details-container {
margin-top: var(--space-3);
width: 100%;
text-align: left;
}
.overall-progress-label {
font-size: 0.9rem;
margin-bottom: var(--space-1);
color: var(--text-color);
}
.current-item-progress {
margin-top: var(--space-2);
}
.current-item-label {
font-size: 0.9rem;
margin-bottom: var(--space-1);
color: var(--text-color);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.current-item-bar-container {
height: 8px;
background-color: var(--lora-border);
border-radius: 4px;
overflow: hidden;
margin-bottom: var(--space-1);
}
.current-item-bar {
height: 100%;
background-color: var(--lora-accent);
transition: width 200ms ease-out;
width: 0%;
}
.current-item-percent {
font-size: 0.8rem;
color: var(--text-color-secondary, var(--text-color));
opacity: 0.7;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
@@ -63,7 +110,8 @@
@media (prefers-reduced-motion: reduce) {
.lora-card,
.progress-bar {
.progress-bar,
.current-item-bar {
transition: none;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,13 +2,13 @@
.modal {
display: none;
position: fixed;
top: 0;
top: 48px; /* Start below the header */
left: 0;
width: 100%;
height: 100%;
height: calc(100% - 48px); /* Adjust height to exclude header */
background: rgba(0, 0, 0, 0.2); /* 调整为更淡的半透明黑色 */
z-index: var(--z-modal);
overflow: hidden; /* 改为 hidden防止双滚动条 */
overflow: auto; /* Change from hidden to auto to allow scrolling */
}
/* 当模态窗口打开时禁止body滚动 */
@@ -23,8 +23,8 @@ body.modal-open {
position: relative;
max-width: 800px;
height: auto;
max-height: 90vh;
margin: 2rem auto;
max-height: calc(90vh - 48px); /* Adjust to account for header height */
margin: 1rem auto; /* Keep reduced top margin */
background: var(--lora-surface);
border-radius: var(--border-radius-base);
padding: var(--space-3);
@@ -196,7 +196,7 @@ body.modal-open {
}
.settings-modal {
max-width: 500px;
max-width: 650px; /* Further increased from 600px for more space */
}
/* Settings Links */
@@ -266,14 +266,22 @@ body.modal-open {
}
}
/* API key input specific styles */
.api-key-input {
width: 100%; /* Take full width of parent */
position: relative;
display: flex;
align-items: center;
}
.api-key-input input {
padding-right: 40px;
width: 100%;
padding: 6px 40px 6px 10px; /* Add left padding */
height: 32px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--lora-surface);
color: var(--text-color);
}
.api-key-input .toggle-visibility {
@@ -294,8 +302,10 @@ body.modal-open {
.input-help {
font-size: 0.85em;
color: var(--text-color);
opacity: 0.8;
margin-top: 4px;
opacity: 0.7;
margin-top: 8px; /* Space between control and help */
line-height: 1.4;
width: 100%; /* Full width */
}
/* 统一各个 section 的样式 */
@@ -323,4 +333,244 @@ body.modal-open {
[data-theme="dark"] .path-preview {
background: rgba(255, 255, 255, 0.03);
border: 1px solid var(--lora-border);
}
/* Settings Styles */
.settings-section {
margin-top: var(--space-3);
border-top: 1px solid var(--lora-border);
padding-top: var(--space-2);
}
.settings-section h3 {
font-size: 1.1em;
margin-bottom: var(--space-2);
color: var(--text-color);
opacity: 0.9;
}
.setting-item {
display: flex;
flex-direction: column; /* Changed to column for help text placement */
margin-bottom: var(--space-3); /* Increased to provide more spacing between items */
padding: var(--space-1);
border-radius: var(--border-radius-xs);
}
.setting-item:hover {
background: rgba(0, 0, 0, 0.02);
}
[data-theme="dark"] .setting-item:hover {
background: rgba(255, 255, 255, 0.05);
}
/* Control row with label and input together */
.setting-row {
display: flex;
flex-direction: row;
justify-content: space-between;
align-items: center;
width: 100%;
}
.setting-info {
margin-bottom: 0;
width: 35%; /* Increased from 30% to prevent wrapping */
flex-shrink: 0; /* Prevent shrinking */
}
.setting-info label {
display: block;
font-weight: 500;
margin-bottom: 0;
white-space: nowrap; /* Prevent label wrapping */
}
.setting-control {
width: 60%; /* Decreased slightly from 65% */
margin-bottom: 0;
display: flex;
justify-content: flex-end; /* Right-align all controls */
}
/* Select Control Styles */
.select-control {
width: 100%;
display: flex;
justify-content: flex-end;
}
.select-control select {
width: 100%;
max-width: 100%; /* Increased from 200px */
padding: 6px 10px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--lora-surface);
color: var(--text-color);
font-size: 0.95em;
height: 32px;
}
/* Fix dark theme select dropdown text color */
[data-theme="dark"] .select-control select {
background-color: rgba(30, 30, 30, 0.9);
color: var(--text-color);
}
[data-theme="dark"] .select-control select option {
background-color: #2d2d2d;
color: var(--text-color);
}
.select-control select:focus {
border-color: var(--lora-accent);
outline: none;
}
/* Toggle Switch */
.toggle-switch {
position: relative;
display: inline-block;
width: 50px;
height: 24px;
cursor: pointer;
margin-left: auto; /* Push to right side */
}
.toggle-switch input {
opacity: 0;
width: 0;
height: 0;
}
.toggle-slider {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: var(--border-color);
transition: .3s;
border-radius: 24px;
}
.toggle-slider:before {
position: absolute;
content: "";
height: 18px;
width: 18px;
left: 3px;
bottom: 3px;
background-color: white;
transition: .3s;
border-radius: 50%;
}
input:checked + .toggle-slider {
background-color: var(--lora-accent);
}
input:checked + .toggle-slider:before {
transform: translateX(26px);
}
.toggle-label {
margin-left: 60px;
line-height: 24px;
}
/* Add small animation for the toggle */
.toggle-slider:active:before {
width: 22px;
}
/* Blur effect for NSFW content */
.nsfw-blur {
filter: blur(12px);
transition: filter 0.3s ease;
}
.nsfw-blur:hover {
filter: blur(8px);
}
/* Add styles for delete preview image */
.delete-preview {
max-width: 150px;
margin: 0 auto var(--space-2);
overflow: hidden;
}
.delete-preview img {
width: 100%;
height: auto;
max-height: 150px;
object-fit: contain;
border-radius: var(--border-radius-sm);
}
.delete-info {
text-align: center;
}
.delete-info h3 {
margin-bottom: var(--space-1);
word-break: break-word;
}
.delete-info p {
margin: var(--space-1) 0;
font-size: 0.9em;
opacity: 0.8;
}
.delete-note {
font-size: 0.85em;
color: var(--text-color);
opacity: 0.7;
font-style: italic;
margin-top: var(--space-1);
text-align: center;
}
/* Add styles for markdown elements in changelog */
.changelog-item ul {
padding-left: 20px;
margin-top: 8px;
}
.changelog-item li {
margin-bottom: 6px;
line-height: 1.4;
}
.changelog-item strong {
font-weight: 600;
}
.changelog-item em {
font-style: italic;
}
.changelog-item code {
background: rgba(0, 0, 0, 0.05);
padding: 2px 4px;
border-radius: 3px;
font-family: monospace;
font-size: 0.9em;
}
[data-theme="dark"] .changelog-item code {
background: rgba(255, 255, 255, 0.1);
}
.changelog-item a {
color: var(--lora-accent);
text-decoration: none;
}
.changelog-item a:hover {
text-decoration: underline;
}

View File

@@ -0,0 +1,862 @@
.recipe-modal-header {
display: flex;
flex-direction: column;
justify-content: flex-start;
align-items: flex-start;
border-bottom: 1px solid var(--lora-border);
padding-bottom: 10px;
margin-bottom: 10px;
}
.recipe-modal-header h2 {
font-size: 1.4em; /* Reduced from default h2 size */
line-height: 1.3;
margin: 0;
max-height: 2.6em; /* Limit to 2 lines */
overflow: hidden;
text-overflow: ellipsis;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
width: calc(100% - 20px);
}
/* Editable content styles */
.editable-content {
position: relative;
width: 100%;
display: flex;
align-items: center;
justify-content: space-between;
}
.editable-content.hide {
display: none;
}
.editable-content .content-text {
flex: 1;
min-width: 0;
overflow: hidden;
text-overflow: ellipsis;
}
.edit-icon {
background: none;
border: none;
color: var(--text-color);
opacity: 0;
cursor: pointer;
padding: 4px 8px;
margin-left: 8px;
border-radius: var(--border-radius-xs);
transition: all 0.2s;
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;
}
.editable-content:hover .edit-icon {
opacity: 0.6;
}
.edit-icon:hover {
opacity: 1 !important;
background: var(--lora-surface);
}
/* Content editor styles */
.content-editor {
display: none;
width: 100%;
padding: 4px 0;
}
.content-editor.active {
display: flex;
align-items: center;
gap: 8px;
}
.content-editor input {
flex: 1;
background: var(--bg-color);
border: 1px solid var(--lora-border);
border-radius: var(--border-radius-xs);
padding: 6px 8px;
font-size: 1em;
color: var(--text-color);
min-width: 0;
}
.content-editor.tags-editor input {
font-size: 0.9em;
}
/* 删除不再需要的按钮样式 */
.editor-actions {
display: none;
}
/* Special styling for tags content */
.tags-content {
display: flex;
align-items: center;
flex-wrap: nowrap;
gap: 8px;
}
.tags-display {
display: flex;
flex-wrap: nowrap;
gap: 6px;
align-items: center;
flex: 1;
min-width: 0;
overflow: hidden;
}
.no-tags {
font-size: 0.85em;
color: var(--text-color);
opacity: 0.6;
font-style: italic;
}
/* Recipe Tags styles */
.recipe-tags-container {
position: relative;
margin-top: 6px;
margin-bottom: 10px;
}
.recipe-tags-compact {
display: flex;
flex-wrap: nowrap;
gap: 6px;
align-items: center;
}
.recipe-tag-compact {
background: rgba(0, 0, 0, 0.03);
border: 1px solid rgba(0, 0, 0, 0.1);
border-radius: var(--border-radius-xs);
padding: 2px 8px;
font-size: 0.75em;
color: var(--text-color);
white-space: nowrap;
}
[data-theme="dark"] .recipe-tag-compact {
background: rgba(255, 255, 255, 0.03);
border: 1px solid var(--lora-border);
}
.recipe-tag-more {
background: var(--lora-accent);
color: var(--lora-text);
border-radius: var(--border-radius-xs);
padding: 2px 8px;
font-size: 0.75em;
cursor: pointer;
white-space: nowrap;
font-weight: 500;
}
.recipe-tags-tooltip {
position: absolute;
top: calc(100% + 8px);
left: 0;
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
box-shadow: 0 3px 8px rgba(0, 0, 0, 0.15);
padding: 10px 14px;
max-width: 400px;
z-index: 10;
opacity: 0;
visibility: hidden;
transform: translateY(-4px);
transition: all 0.2s ease;
pointer-events: none;
}
.recipe-tags-tooltip.visible {
opacity: 1;
visibility: visible;
transform: translateY(0);
pointer-events: auto;
}
.tooltip-content {
display: flex;
flex-wrap: wrap;
gap: 6px;
max-height: 200px;
overflow-y: auto;
}
.tooltip-tag {
background: rgba(0, 0, 0, 0.03);
border: 1px solid rgba(0, 0, 0, 0.1);
border-radius: var(--border-radius-xs);
padding: 3px 8px;
font-size: 0.75em;
color: var(--text-color);
}
[data-theme="dark"] .tooltip-tag {
background: rgba(255, 255, 255, 0.03);
border: 1px solid var(--lora-border);
}
/* Top Section: Preview and Gen Params */
.recipe-top-section {
display: grid;
grid-template-columns: 280px 1fr;
gap: var(--space-2);
flex-shrink: 0;
margin-bottom: var(--space-2);
}
/* Recipe Preview */
.recipe-preview-container {
width: 100%;
height: 360px;
border-radius: var(--border-radius-sm);
overflow: hidden;
background: var(--lora-surface);
border: 1px solid var(--border-color);
display: flex;
align-items: center;
justify-content: center;
}
.recipe-preview-container img,
.recipe-preview-container video {
max-width: 100%;
max-height: 100%;
object-fit: contain;
}
.recipe-preview-media {
max-width: 100%;
max-height: 100%;
object-fit: contain;
}
/* Generation Parameters */
.recipe-gen-params {
height: 360px;
display: flex;
flex-direction: column;
}
.recipe-gen-params h3 {
margin-top: 0;
margin-bottom: var(--space-2);
font-size: 1.2em;
color: var(--text-color);
padding-bottom: var(--space-1);
border-bottom: 1px solid var(--border-color);
flex-shrink: 0;
}
.gen-params-container {
display: flex;
flex-direction: column;
gap: var(--space-2);
overflow-y: auto;
flex: 1;
}
.param-group {
display: flex;
flex-direction: column;
gap: 8px;
}
.param-header {
display: flex;
justify-content: space-between;
align-items: center;
}
.param-header label {
font-weight: 500;
color: var(--text-color);
}
.copy-btn {
background: none;
border: none;
color: var(--text-color);
opacity: 0.6;
cursor: pointer;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
transition: all 0.2s;
}
.copy-btn:hover {
opacity: 1;
background: var(--lora-surface);
}
.param-content {
background: var(--lora-surface);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: var(--space-2);
color: var(--text-color);
font-size: 0.9em;
line-height: 1.5;
max-height: 150px;
overflow-y: auto;
white-space: pre-wrap;
word-break: break-word;
}
/* Other Parameters */
.other-params {
display: flex;
flex-wrap: wrap;
gap: 8px;
margin-top: var(--space-1);
}
.param-tag {
background: var(--lora-surface);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: 4px 8px;
font-size: 0.85em;
color: var(--text-color);
display: flex;
align-items: center;
gap: 6px;
}
.param-tag .param-name {
font-weight: 500;
opacity: 0.8;
}
/* Bottom Section: Resources */
.recipe-bottom-section {
max-height: 320px;
display: flex;
flex-direction: column;
border-top: 1px solid var(--border-color);
padding-top: var(--space-2);
}
.recipe-section-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: var(--space-2);
padding-bottom: var(--space-1);
border-bottom: 1px solid var(--border-color);
flex-shrink: 0;
}
.recipe-section-header h3 {
margin: 0;
font-size: 1.2em;
color: var(--text-color);
display: flex;
align-items: center;
gap: 8px;
}
.recipe-status {
display: inline-flex;
align-items: center;
font-size: 0.85em;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
margin-left: var(--space-1);
}
.recipe-status.ready {
background: oklch(var(--lora-accent) / 0.1);
color: var(--lora-accent);
}
.recipe-status.missing {
background: oklch(var(--lora-error) / 0.1);
color: var(--lora-error);
}
.recipe-status i {
margin-right: 4px;
}
.recipe-section-actions {
display: flex;
align-items: center;
gap: var(--space-1);
}
/* View LoRAs button */
.view-loras-btn {
background: none;
border: none;
color: var(--text-color);
opacity: 0.7;
cursor: pointer;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
transition: all 0.2s;
display: flex;
align-items: center;
justify-content: center;
}
.view-loras-btn:hover {
opacity: 1;
background: var(--lora-surface);
color: var(--lora-accent);
}
#recipeLorasCount {
font-size: 0.9em;
color: var(--text-color);
opacity: 0.8;
display: flex;
align-items: center;
gap: 6px;
}
#recipeLorasCount i {
font-size: 1em;
}
/* LoRAs List */
.recipe-loras-list {
display: flex;
flex-direction: column;
gap: 10px;
overflow-y: auto;
flex: 1;
padding-top: 4px; /* Add padding to prevent first item from being cut off when hovered */
}
.recipe-lora-item {
display: flex;
gap: var(--space-2);
padding: 10px var(--space-2);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-sm);
background: var(--bg-color);
/* Add will-change to create a new stacking context and force hardware acceleration */
will-change: transform;
/* Create a new containing block for absolutely positioned descendants */
transform: translateZ(0);
cursor: pointer; /* Make it clear the item is clickable */
transition: transform 0.2s ease, box-shadow 0.2s ease, border-color 0.2s ease;
}
.recipe-lora-item:hover {
transform: translateY(-1px);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.08);
border-color: var(--lora-accent);
}
.recipe-lora-item.exists-locally {
background: oklch(var(--lora-accent) / 0.05);
border-left: 4px solid var(--lora-accent);
}
.recipe-lora-item.missing-locally {
border-left: 4px solid var(--lora-error);
}
.recipe-lora-item.is-deleted {
background: rgba(127, 127, 127, 0.05);
border-left: 4px solid #777;
opacity: 0.8;
}
.recipe-lora-thumbnail {
width: 46px;
height: 46px;
flex-shrink: 0;
border-radius: var(--border-radius-xs);
overflow: hidden;
background: var(--bg-color);
display: flex;
align-items: center;
justify-content: center;
}
.recipe-lora-thumbnail img,
.recipe-lora-thumbnail video {
width: 100%;
height: 100%;
object-fit: cover;
}
.thumbnail-video {
width: 100%;
height: 100%;
object-fit: cover;
}
.recipe-lora-content {
display: flex;
flex-direction: column;
gap: 3px;
flex: 1;
min-width: 0;
}
.recipe-lora-header {
display: flex;
align-items: flex-start;
justify-content: space-between;
gap: var(--space-2);
position: relative;
min-height: 28px;
/* Ensure badges don't move during scroll in Chrome */
transform: translateZ(0);
}
.recipe-lora-content h4 {
margin: 0;
font-size: 1em;
color: var(--text-color);
flex: 1;
max-width: calc(100% - 120px); /* Make room for the badge */
overflow: hidden;
text-overflow: ellipsis;
display: -webkit-box;
-webkit-line-clamp: 2; /* Limit to 2 lines */
-webkit-box-orient: vertical;
line-height: 1.3;
}
.recipe-lora-info {
display: flex;
flex-wrap: wrap;
gap: 8px;
align-items: center;
font-size: 0.85em;
margin-top: 4px;
padding-right: 4px;
}
.recipe-lora-info .base-model {
background: oklch(var(--lora-accent) / 0.1);
color: var(--lora-accent);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
}
.recipe-lora-version {
font-size: 0.85em;
color: var(--text-color);
opacity: 0.7;
}
.recipe-lora-weight {
background: var(--lora-surface);
padding: 2px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
color: var(--lora-accent);
}
.local-badge,
.missing-badge {
position: absolute;
right: 0;
top: 0;
/* Force hardware acceleration for Chrome */
transform: translateZ(0);
backface-visibility: hidden;
}
/* Specific styles for recipe modal badges - update z-index */
.recipe-lora-header .local-badge,
.recipe-lora-header .missing-badge {
z-index: 2; /* Ensure the badge is above other elements */
backface-visibility: hidden;
}
/* Ensure local-path tooltip is properly positioned and won't move during scroll */
.recipe-lora-header .local-badge .local-path {
z-index: 3;
top: calc(100% + 4px); /* Position tooltip below the badge */
right: -4px; /* Align with the badge */
max-width: 250px;
/* Force hardware acceleration for Chrome */
transform: translateZ(0);
}
.missing-badge {
display: inline-flex;
align-items: center;
background: var(--lora-error);
color: white;
padding: 3px 6px;
border-radius: var(--border-radius-xs);
font-size: 0.75em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
}
.missing-badge i {
margin-right: 4px;
font-size: 0.9em;
}
/* Deleted badge with reconnect functionality */
.deleted-badge {
display: inline-flex;
align-items: center;
background: #777;
color: white;
padding: 3px 6px;
border-radius: var(--border-radius-xs);
font-size: 0.75em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
}
.deleted-badge i {
margin-right: 4px;
font-size: 0.9em;
}
/* Add reconnect functionality styles */
.deleted-badge.reconnectable {
position: relative;
cursor: pointer;
transition: background-color 0.2s ease;
}
.deleted-badge.reconnectable:hover {
background-color: var(--lora-accent);
}
.deleted-badge .reconnect-tooltip {
position: absolute;
display: none;
background-color: var(--card-bg);
color: var(--text-color);
padding: 8px 12px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: var(--z-overlay);
width: max-content;
max-width: 200px;
font-size: 0.85rem;
font-weight: normal;
top: calc(100% + 5px);
left: 0;
margin-left: -100px;
}
.deleted-badge.reconnectable:hover .reconnect-tooltip {
display: block;
}
/* LoRA reconnect container */
.lora-reconnect-container {
display: none;
flex-direction: column;
background: var(--lora-surface);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: 12px;
margin-top: 10px;
gap: 10px;
}
.lora-reconnect-container.active {
display: flex;
}
.reconnect-instructions {
display: flex;
flex-direction: column;
gap: 5px;
}
.reconnect-instructions p {
margin: 0;
font-size: 0.95em;
font-weight: 500;
color: var(--text-color);
}
.reconnect-instructions small {
color: var(--text-color);
opacity: 0.7;
font-size: 0.85em;
}
.reconnect-instructions code {
background: rgba(0, 0, 0, 0.1);
padding: 2px 4px;
border-radius: 3px;
font-family: monospace;
font-size: 0.9em;
}
[data-theme="dark"] .reconnect-instructions code {
background: rgba(255, 255, 255, 0.1);
}
.reconnect-form {
display: flex;
flex-direction: column;
gap: 10px;
}
.reconnect-input {
width: calc(100% - 20px);
padding: 8px 10px;
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
background: var(--bg-color);
color: var(--text-color);
font-size: 0.9em;
}
.reconnect-actions {
display: flex;
justify-content: flex-end;
gap: 8px;
}
.reconnect-cancel-btn,
.reconnect-confirm-btn {
padding: 6px 12px;
border-radius: var(--border-radius-xs);
font-size: 0.85em;
cursor: pointer;
border: none;
transition: all 0.2s;
}
.reconnect-cancel-btn {
background: var(--bg-color);
color: var(--text-color);
border: 1px solid var(--border-color);
}
.reconnect-confirm-btn {
background: var(--lora-accent);
color: white;
}
.reconnect-cancel-btn:hover {
background: var(--lora-surface);
}
.reconnect-confirm-btn:hover {
background: color-mix(in oklch, var(--lora-accent), black 10%);
}
/* Recipe status partial state */
.recipe-status.partial {
background: rgba(127, 127, 127, 0.1);
color: #777;
}
/* 标题输入框特定的样式 */
.title-input {
font-size: 1.2em !important; /* 调整为更合适的大小 */
line-height: 1.2;
font-weight: 500;
}
/* Responsive adjustments */
@media (max-width: 768px) {
.recipe-top-section {
grid-template-columns: 1fr;
}
.recipe-preview-container {
height: 200px;
}
.recipe-gen-params {
height: auto;
max-height: 300px;
}
}
.badge-container {
position: relative;
display: flex;
align-items: center;
justify-content: flex-end;
flex-shrink: 0;
min-width: 110px;
z-index: 2;
}
/* Update the local-badge and missing-badge to be positioned within the badge-container */
.badge-container .local-badge,
.badge-container .missing-badge,
.badge-container .deleted-badge {
position: static; /* Override absolute positioning */
transform: none; /* Remove the transform */
}
/* Ensure the tooltip is still properly positioned */
.badge-container .local-badge .local-path {
position: fixed; /* Keep as fixed for Chrome */
z-index: 100;
}
/* Add styles for missing LoRAs download feature */
.recipe-status.missing {
position: relative;
cursor: pointer;
transition: background-color 0.2s ease;
}
.recipe-status.missing:hover {
background-color: rgba(var(--lora-warning-rgb, 255, 165, 0), 0.2);
}
.recipe-status.missing .missing-tooltip {
position: absolute;
display: none;
background-color: var(--card-bg);
color: var(--text-color);
padding: 8px 12px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: var(--z-overlay);
width: max-content;
max-width: 200px;
font-size: 0.85rem;
font-weight: normal;
margin-left: -100px;
margin-top: -65px;
}
.recipe-status.missing:hover .missing-tooltip {
display: block;
}
.recipe-status.clickable {
cursor: pointer;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
}
.recipe-status.clickable:hover {
background-color: rgba(var(--lora-warning-rgb, 255, 165, 0), 0.2);
}

View File

@@ -1,9 +1,7 @@
/* Search Container Styles */
.search-container {
position: relative;
width: 250px;
margin-left: auto;
flex-shrink: 0; /* 防止搜索框被压缩 */
width: 100%;
display: flex;
align-items: center;
gap: 4px;
@@ -12,14 +10,14 @@
/* 调整搜索框样式以匹配其他控件 */
.search-container input {
width: 100%;
padding: 6px 75px 6px 12px; /* Increased right padding to accommodate both buttons */
border: 1px solid oklch(65% 0.02 256); /* 更深的边框颜色,提高对比度 */
padding: 6px 35px 6px 12px; /* Reduced right padding */
border: 1px solid oklch(65% 0.02 256);
border-radius: var(--border-radius-sm);
background: var(--lora-surface);
color: var(--text-color);
font-size: 0.9em;
height: 32px;
box-sizing: border-box; /* 确保padding不会增加总宽度 */
box-sizing: border-box;
}
.search-container input:focus {
@@ -34,7 +32,7 @@
transform: translateY(-50%);
color: oklch(var(--text-color) / 0.5);
pointer-events: none;
line-height: 1; /* 防止图标影响容器高度 */
line-height: 1;
}
/* 修改清空按钮样式 */
@@ -47,8 +45,8 @@
cursor: pointer;
border: none;
background: none;
padding: 4px 8px; /* 增加点击区域 */
display: none; /* 默认隐藏 */
padding: 4px 8px;
display: none;
line-height: 1;
transition: color 0.2s ease;
}
@@ -144,19 +142,19 @@
/* Filter Panel Styles */
.filter-panel {
position: absolute;
top: 140px; /* Adjust to be closer to the filter button */
position: fixed;
right: 20px;
width: 300px;
top: 50px; /* Position below header */
width: 320px;
background-color: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-base);
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
z-index: var(--z-overlay); /* Increase z-index to be above cards */
z-index: var(--z-overlay);
padding: 16px;
transition: transform 0.3s ease, opacity 0.3s ease;
transform-origin: top right;
max-height: calc(100vh - 160px);
max-height: calc(100vh - 70px); /* Adjusted for header height */
overflow-y: auto;
}
@@ -237,6 +235,44 @@
border-color: var(--lora-accent);
}
/* Tag filter styles */
.tag-filter {
display: flex;
align-items: center;
justify-content: space-between;
min-width: 60px;
}
.tag-count {
background: rgba(0, 0, 0, 0.1);
padding: 1px 6px;
border-radius: 10px;
font-size: 0.8em;
margin-left: 4px;
}
[data-theme="dark"] .tag-count {
background: rgba(255, 255, 255, 0.1);
}
.tag-filter.active .tag-count {
background: rgba(255, 255, 255, 0.3);
color: white;
}
.tags-loading, .tags-error, .no-tags {
width: 100%;
padding: 8px;
text-align: center;
font-size: 0.9em;
color: var(--text-color);
opacity: 0.7;
}
.tags-error {
color: var(--lora-error);
}
/* Filter actions */
.filter-actions {
display: flex;
@@ -274,6 +310,211 @@
width: calc(100% - 40px);
left: 20px;
right: 20px;
top: 140px;
top: 160px; /* Adjusted for mobile layout */
}
}
}
/* Search Options Toggle */
.search-options-toggle {
background: var(--lora-surface);
border: 1px solid oklch(65% 0.02 256);
border-radius: var(--border-radius-sm);
color: var(--text-color);
width: 32px;
height: 32px;
padding: 0;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
transition: all 0.2s ease;
flex-shrink: 0;
}
.search-options-toggle:hover {
background-color: var(--lora-surface-hover, oklch(95% 0.02 256));
color: var(--lora-accent);
border-color: var(--lora-accent);
}
.search-options-toggle.active {
background-color: oklch(95% 0.05 256);
color: var(--lora-accent);
border-color: var(--lora-accent);
}
.search-options-toggle i {
font-size: 0.9em;
}
/* Search Options Panel */
.search-options-panel {
position: fixed;
right: 20px;
top: 50px; /* Position below header */
width: 280px;
background-color: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-base);
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
z-index: var(--z-overlay);
padding: 16px;
transition: transform 0.3s ease, opacity 0.3s ease;
transform-origin: top right;
display: block; /* Ensure it's block by default */
}
.search-options-panel.hidden {
opacity: 0;
transform: scale(0.95);
pointer-events: none;
}
.options-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 16px;
}
.options-header h3 {
margin: 0;
font-size: 16px;
color: var(--text-color);
}
.close-options-btn {
background: none;
border: none;
color: var(--text-color);
cursor: pointer;
font-size: 16px;
padding: 4px;
display: flex;
align-items: center;
justify-content: center;
}
.close-options-btn:hover {
color: var(--lora-accent);
}
.options-section {
margin-bottom: 16px;
}
.options-section h4 {
margin: 0 0 8px 0;
font-size: 14px;
color: var(--text-color);
opacity: 0.8;
}
.search-option-tags {
display: flex;
flex-wrap: wrap;
gap: 8px; /* Increased gap for better spacing */
}
.search-option-tag {
padding: 6px 8px; /* Adjusted padding for better text display */
border-radius: var(--border-radius-sm);
background-color: var(--lora-surface);
border: 1px solid var(--border-color);
color: var(--text-color);
font-size: 13px; /* Slightly smaller font size */
cursor: pointer;
transition: all 0.2s ease;
user-select: none;
flex: 1;
text-align: center;
white-space: nowrap; /* Prevent text wrapping */
min-width: 80px; /* Ensure minimum width for each tag */
display: inline-flex; /* Better control over layout */
justify-content: center;
align-items: center;
}
.search-option-tag:hover {
background-color: var(--lora-surface-hover);
}
.search-option-tag.active {
background-color: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
}
/* Switch styles */
.search-option-switch {
display: flex;
justify-content: space-between;
align-items: center;
padding: 4px 0;
}
.switch {
position: relative;
display: inline-block;
width: 46px;
height: 24px;
}
.switch input {
opacity: 0;
width: 0;
height: 0;
}
.slider {
position: absolute;
cursor: pointer;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: #ccc;
transition: .4s;
}
.slider:before {
position: absolute;
content: "";
height: 18px;
width: 18px;
left: 3px;
bottom: 3px;
background-color: white;
transition: .4s;
}
input:checked + .slider {
background-color: var(--lora-accent);
}
input:focus + .slider {
box-shadow: 0 0 1px var(--lora-accent);
}
input:checked + .slider:before {
transform: translateX(22px);
}
.slider.round {
border-radius: 34px;
}
.slider.round:before {
border-radius: 50%;
}
/* Mobile adjustments */
@media (max-width: 768px) {
.search-options-panel,
.filter-panel {
width: calc(100% - 40px);
left: 20px;
right: 20px;
top: 160px; /* Adjusted for mobile layout */
}
}

View File

@@ -0,0 +1,111 @@
/* Local Version Badge */
.local-badge {
display: inline-flex;
align-items: center;
background: var(--lora-accent);
color: var(--lora-text);
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
position: relative;
/* Force hardware acceleration to prevent Chrome scroll issues */
transform: translateZ(0);
will-change: transform;
}
.local-badge i {
margin-right: 4px;
font-size: 0.9em;
}
/* Early Access Badge */
.early-access-badge {
display: inline-flex;
align-items: center;
background: #00B87A; /* Green for early access */
color: white;
padding: 4px 8px;
border-radius: var(--border-radius-xs);
font-size: 0.8em;
font-weight: 500;
white-space: nowrap;
flex-shrink: 0;
position: relative;
/* Force hardware acceleration to prevent Chrome scroll issues */
transform: translateZ(0);
will-change: transform;
}
.early-access-badge i {
margin-right: 4px;
font-size: 0.9em;
}
.early-access-info {
display: none;
position: absolute;
top: 100%;
right: 0;
background: var(--card-bg);
border: 1px solid #00B87A;
border-radius: var(--border-radius-xs);
padding: var(--space-1);
margin-top: 4px;
font-size: 0.9em;
color: var(--text-color);
white-space: normal;
word-break: break-all;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: 100; /* Higher z-index to ensure it's above other elements */
min-width: 300px;
max-width: 300px;
/* Create a separate layer with hardware acceleration */
transform: translateZ(0);
/* Use a fixed position to ensure it's in a separate layer from scrollable content */
position: fixed;
pointer-events: none; /* Don't block mouse events */
}
.early-access-badge:hover .early-access-info {
display: block;
pointer-events: auto; /* Allow interaction with the tooltip when visible */
}
.local-path {
display: none;
position: absolute;
top: 100%;
right: 0;
background: var(--card-bg);
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
padding: var(--space-1);
margin-top: 4px;
font-size: 0.9em;
color: var(--text-color);
white-space: normal;
word-break: break-all;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
z-index: 100; /* Higher z-index to ensure it's above other elements */
min-width: 200px;
max-width: 300px;
/* Create a separate layer with hardware acceleration */
transform: translateZ(0);
/* Use a fixed position to ensure it's in a separate layer from scrollable content */
position: fixed;
pointer-events: none; /* Don't block mouse events */
}
.local-badge:hover .local-path {
display: block;
pointer-events: auto; /* Allow interaction with the tooltip when visible */
}
.error-message {
color: var(--lora-error);
font-size: 0.9em;
margin-top: 4px;
}

View File

@@ -1,6 +1,6 @@
/* Support Modal Styles */
.support-modal {
max-width: 550px;
max-width: 570px;
}
.support-header {
@@ -141,7 +141,7 @@
.support-toggle:hover {
background: var(--lora-accent);
color: white;
color: var(--lora-error) !important;
transform: translateY(-2px);
}

View File

@@ -120,4 +120,63 @@
.tooltip:hover::after {
opacity: 1;
}
/* Toast Container for stacked notifications */
.toast-container {
position: fixed;
top: 0;
right: 0;
z-index: calc(var(--z-overlay) + 10);
display: flex;
flex-direction: column;
gap: 10px;
padding: 20px;
pointer-events: none; /* Allow clicking through the container */
width: 400px;
max-width: 100%;
}
/* Ensure each toast has pointer events */
.toast-container .toast {
pointer-events: auto;
position: relative; /* Override fixed positioning */
top: 0 !important; /* Let the container handle positioning */
right: 0 !important;
margin-bottom: 10px;
}
/* Add missing warning toast style */
.toast-warning {
border-left: 4px solid var(--lora-warning);
}
.toast-warning::before {
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='%23ff9800'%3E%3Cpath d='M1 21h22L12 2 1 21zm12-3h-2v-2h2v2zm0-4h-2v-4h2v4z'/%3E%3C/svg%3E");
}
/* Improve toast animation */
.toast {
transform: translateX(120%);
opacity: 0;
transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1),
opacity 0.3s cubic-bezier(0.4, 0, 0.2, 1);
}
.toast.show {
transform: translateX(0);
opacity: 1;
}
/* Responsive adjustments */
@media (max-width: 480px) {
.toast-container {
width: 100%;
padding: 10px;
}
.toast {
width: 100%;
max-width: none;
}
}

View File

@@ -153,56 +153,43 @@
border-top: 1px solid var(--lora-border);
margin-top: var(--space-2);
padding-top: var(--space-2);
}
/* Toggle switch styles */
.toggle-switch {
display: flex;
align-items: center;
gap: 12px;
justify-content: flex-start;
}
/* Override toggle switch styles for update preferences */
.update-preferences .toggle-switch {
position: relative;
display: inline-flex;
align-items: center;
width: auto;
height: 24px;
cursor: pointer;
user-select: none;
}
.toggle-switch input {
opacity: 0;
width: 0;
height: 0;
position: absolute;
}
.toggle-slider {
.update-preferences .toggle-slider {
position: relative;
display: inline-block;
width: 40px;
height: 20px;
background-color: var(--border-color);
border-radius: 20px;
transition: .4s;
width: 50px;
height: 24px;
flex-shrink: 0;
margin-right: 10px;
}
.toggle-slider:before {
position: absolute;
content: "";
height: 16px;
width: 16px;
left: 2px;
bottom: 2px;
background-color: white;
border-radius: 50%;
transition: .4s;
.update-preferences .toggle-label {
margin-left: 0;
white-space: nowrap;
line-height: 24px;
}
input:checked + .toggle-slider {
background-color: var(--lora-accent);
}
input:checked + .toggle-slider:before {
transform: translateX(20px);
}
.toggle-label {
font-size: 0.9em;
color: var(--text-color);
@media (max-width: 480px) {
.update-preferences {
flex-direction: row;
flex-wrap: wrap;
}
.update-preferences .toggle-label {
margin-top: 5px;
}
}

View File

@@ -1,7 +1,18 @@
.page-content {
height: calc(100vh - 48px); /* Full height minus header */
margin-top: 48px; /* Push down below header */
overflow-y: auto; /* Enable scrolling here */
width: 100%;
position: relative;
overflow-y: scroll;
}
.container {
max-width: 1400px;
margin: 20px auto;
padding: 0 15px;
position: relative;
z-index: var(--z-base);
}
.controls {
@@ -14,69 +25,101 @@
.actions {
display: flex;
align-items: center;
justify-content: space-between;
gap: var(--space-2);
flex-wrap: nowrap;
width: 100%;
}
/* Search and filter styles moved to components/search-filter.css */
/* Update corner-controls for collapsible behavior */
.corner-controls {
position: fixed;
top: 20px;
right: 20px;
z-index: var(--z-overlay);
.action-buttons {
display: flex;
flex-direction: column;
align-items: center;
transition: all 0.3s ease;
gap: var(--space-2);
flex-wrap: nowrap;
}
.corner-controls-toggle {
width: 36px;
height: 36px;
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var(--text-color);
/* Action button styling */
.control-group {
position: relative;
}
.control-group button {
min-width: 100px;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
gap: 4px;
border-radius: var(--border-radius-xs);
padding: 4px 10px;
border: 1px solid var(--border-color);
background: var(--card-bg);
color: var(--text-color);
font-size: 0.85em;
transition: all 0.2s ease;
z-index: 2;
margin-bottom: 10px;
cursor: pointer;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.corner-controls-toggle:hover {
.control-group button:hover {
border-color: var(--lora-accent);
background: var(--bg-color);
transform: translateY(-1px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.08);
}
.control-group button:active {
transform: translateY(0);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.control-group button i {
opacity: 0.8;
transition: opacity 0.2s ease;
}
.control-group button:hover i {
opacity: 1;
}
/* Active state for buttons that can be toggled */
.control-group button.active {
background: var(--lora-accent);
color: white;
transform: translateY(-2px);
border-color: var(--lora-accent);
}
.corner-controls-items {
display: flex;
flex-direction: column;
gap: 10px;
opacity: 0;
transform: translateY(-10px) scale(0.9);
transition: all 0.3s ease;
pointer-events: none;
/* Select dropdown styling */
.control-group select {
min-width: 100px;
padding: 4px 26px 4px 10px;
border-radius: var(--border-radius-xs);
border: 1px solid var(--border-color);
background-color: var(--card-bg);
color: var(--text-color);
font-size: 0.85em;
appearance: none;
-webkit-appearance: none;
-moz-appearance: none;
background-image: url("data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='currentColor' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e");
background-repeat: no-repeat;
background-position: right 6px center;
background-size: 14px;
cursor: pointer;
transition: all 0.2s ease;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
/* Expanded state */
.corner-controls.expanded .corner-controls-items {
opacity: 1;
transform: translateY(0) scale(1);
pointer-events: all;
.control-group select:hover {
border-color: var(--lora-accent);
background-color: var(--bg-color);
transform: translateY(-1px);
box-shadow: 0 3px 5px rgba(0, 0, 0, 0.08);
}
/* Expanded state - only expand on hover if not already expanded by click */
.corner-controls:hover:not(.expanded) .corner-controls-items {
opacity: 1;
transform: translateY(0) scale(1);
pointer-events: all;
.control-group select:focus {
outline: none;
border-color: var(--lora-accent);
box-shadow: 0 0 0 2px oklch(var(--lora-accent) / 0.15);
}
/* Ensure hidden class works properly */
@@ -84,46 +127,6 @@
display: none !important;
}
/* Update toggle button styles */
.update-toggle {
width: 36px;
height: 36px;
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var(--text-color); /* Changed from var(--lora-accent) to match other toggles */
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
transition: all 0.2s ease;
position: relative;
}
.update-toggle:hover {
background: var(--lora-accent);
color: white;
transform: translateY(-2px);
}
/* Update badge styles */
.update-badge {
position: absolute;
top: -3px;
right: -3px;
background-color: var(--lora-error);
width: 8px;
height: 8px;
border-radius: 50%;
box-shadow: 0 0 0 2px var(--card-bg);
}
/* Badge on corner toggle */
.corner-badge {
top: 0;
right: 0;
}
.folder-tags-container {
position: relative;
width: 100%;
@@ -131,11 +134,14 @@
}
.folder-tags {
display: flex;
gap: 4px;
padding: 2px 0;
flex-wrap: wrap;
transition: max-height 0.3s ease, opacity 0.2s ease;
max-height: 150px; /* Limit height to prevent overflow */
opacity: 1;
overflow-y: auto; /* Enable vertical scrolling */
padding-right: 40px; /* Make space for the toggle button */
margin-bottom: 5px; /* Add margin below the tags */
}
@@ -144,13 +150,15 @@
opacity: 0;
margin: 0;
padding-bottom: 0;
overflow: hidden;
}
.toggle-folders-container {
margin-left: auto;
}
/* Toggle Folders Button */
.toggle-folders-btn {
position: absolute;
bottom: 0; /* 固定在容器底部 */
right: 0; /* 固定在容器右侧 */
width: 36px;
height: 36px;
border-radius: 50%;
@@ -162,38 +170,33 @@
justify-content: center;
cursor: pointer;
transition: all 0.3s ease;
z-index: 2;
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
.toggle-folders-btn:hover {
background: var(--lora-accent);
color: white;
transform: translateY(-2px);
box-shadow: 0 3px 6px rgba(0, 0, 0, 0.1);
}
.toggle-folders-btn i {
transition: transform 0.3s ease;
}
/* 折叠状态样式 */
.folder-tags.collapsed + .toggle-folders-btn {
position: static;
margin-right: auto; /* 确保按钮在左侧 */
transform: translateY(0);
/* Icon-only button style */
.icon-only {
min-width: unset !important;
width: 32px !important;
padding: 0 !important;
height: 32px !important;
}
.folder-tags.collapsed + .toggle-folders-btn i {
/* Rotate icon when folders are collapsed */
.folder-tags.collapsed ~ .actions .toggle-folders-btn i {
transform: rotate(180deg);
}
/* 文件夹标签样式 */
.folder-tags {
display: flex;
gap: 4px;
padding: 2px 0;
flex-wrap: wrap;
}
/* Add custom scrollbar for better visibility */
.folder-tags::-webkit-scrollbar {
width: 6px;
@@ -217,16 +220,25 @@
cursor: pointer;
padding: 2px 8px;
margin: 2px;
border: 1px solid #ccc;
border: 1px solid var(--border-color);
border-radius: var(--border-radius-xs);
display: inline-block;
line-height: 1.2;
font-size: 14px;
background-color: var(--card-bg);
transition: all 0.2s ease;
}
.tag:hover {
border-color: var(--lora-accent);
background-color: oklch(var(--lora-accent) / 0.1);
transform: translateY(-1px);
}
.tag.active {
background-color: #007bff;
background-color: var(--lora-accent);
color: white;
border-color: var(--lora-accent);
}
/* Back to Top Button */
@@ -239,7 +251,7 @@
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var (--text-color);
color: var(--text-color);
display: flex;
align-items: center;
justify-content: center;
@@ -249,6 +261,7 @@
transform: translateY(10px);
transition: all 0.3s ease;
z-index: var(--z-overlay);
box-shadow: 0 2px 5px rgba(0, 0, 0, 0.1);
}
.back-to-top.visible {
@@ -258,84 +271,10 @@
}
.back-to-top:hover {
background: var (--lora-accent);
color: white;
transform: translateY(-2px);
}
.theme-toggle {
width: 36px;
height: 36px;
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var(--text-color);
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
transition: all 0.2s ease;
}
.theme-toggle:hover {
background: var(--lora-accent);
color: white;
transform: translateY(-2px);
}
.support-toggle {
width: 36px;
height: 36px;
border-radius: 50%;
background: var(--card-bg);
border: 1px solid var(--border-color);
color: var(--lora-error);
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
transition: all 0.2s ease;
}
.support-toggle:hover {
background: var(--lora-error);
color: white;
transform: translateY(-2px);
}
.support-toggle i {
font-size: 1.1em;
position: relative;
top: 1px;
left: -0.5px;
}
.theme-toggle img {
width: 20px;
height: 20px;
}
.theme-toggle .theme-icon {
width: 20px;
height: 20px;
position: absolute;
transition: opacity 0.2s ease;
}
.theme-toggle .light-icon {
opacity: 0;
}
.theme-toggle .dark-icon {
opacity: 1;
}
[data-theme="light"] .theme-toggle .light-icon {
opacity: 1;
}
[data-theme="light"] .theme-toggle .dark-icon {
opacity: 0;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
}
@media (max-width: 768px) {
@@ -343,55 +282,41 @@
flex-wrap: wrap;
gap: var(--space-1);
}
.controls {
flex-direction: column;
gap: 15px;
.action-buttons {
flex-wrap: wrap;
gap: var(--space-1);
width: 100%;
}
.toggle-folders-container {
margin-left: 0;
width: 100%;
display: flex;
justify-content: flex-end;
}
.folder-tags-container {
order: -1;
}
.toggle-folders-btn {
position: absolute;
bottom: 0;
right: 0;
transform: none; /* 移除transform防止hover时的位移 */
}
.toggle-folders-btn:hover {
transform: none; /* 移动端下禁用hover效果 */
}
.folder-tags.collapsed + .toggle-folders-btn {
position: relative;
transform: none;
transform: none; /* Disable hover effects on mobile */
}
.corner-controls {
top: 10px;
right: 10px;
.control-group button:hover {
transform: none; /* Disable hover effects on mobile */
}
.corner-controls-items {
display: none;
.control-group select:hover {
transform: none; /* Disable hover effects on mobile */
}
.corner-controls.expanded .corner-controls-items {
display: flex;
.tag:hover {
transform: none; /* Disable hover effects on mobile */
}
.back-to-top {
bottom: 60px; /* Give some extra space from bottom on mobile */
}
}
/* Standardize button widths in controls */
.control-group button {
min-width: 100px;
display: flex;
align-items: center;
justify-content: center;
gap: 6px;
}

View File

@@ -5,6 +5,7 @@
@import 'layout.css';
/* Import Components */
@import 'components/header.css';
@import 'components/card.css';
@import 'components/modal.css';
@import 'components/download-modal.css';
@@ -16,6 +17,9 @@
@import 'components/support-modal.css';
@import 'components/search-filter.css';
@import 'components/bulk.css';
@import 'components/shared.css';
@import 'components/filter-indicator.css';
@import 'components/initialization.css';
.initialization-notice {
display: flex;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

View File

@@ -1 +1 @@
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}
{"name":"","short_name":"","icons":[{"src":"/loras_static/images/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/loras_static/images/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 213 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 338 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 MiB

View File

@@ -0,0 +1,499 @@
// filepath: d:\Workspace\ComfyUI\custom_nodes\ComfyUI-Lora-Manager\static\js\api\baseModelApi.js
import { state, getCurrentPageState } from '../state/index.js';
import { showToast } from '../utils/uiHelpers.js';
import { showDeleteModal, confirmDelete } from '../utils/modalUtils.js';
import { getSessionItem } from '../utils/storageHelpers.js';
/**
* Shared functionality for handling models (loras and checkpoints)
*/
// Generic function to load more models with pagination
export async function loadMoreModels(options = {}) {
const {
resetPage = false,
updateFolders = false,
modelType = 'lora', // 'lora' or 'checkpoint'
createCardFunction,
endpoint = '/api/loras'
} = options;
const pageState = getCurrentPageState();
if (pageState.isLoading || (!pageState.hasMore && !resetPage)) return;
pageState.isLoading = true;
document.body.classList.add('loading');
try {
// Reset to first page if requested
if (resetPage) {
pageState.currentPage = 1;
// Clear grid if resetting
const gridId = modelType === 'checkpoint' ? 'checkpointGrid' : 'loraGrid';
const grid = document.getElementById(gridId);
if (grid) grid.innerHTML = '';
}
const params = new URLSearchParams({
page: pageState.currentPage,
page_size: pageState.pageSize || 20,
sort_by: pageState.sortBy
});
if (pageState.activeFolder !== null) {
params.append('folder', pageState.activeFolder);
}
// Add search parameters if there's a search term
if (pageState.filters?.search) {
params.append('search', pageState.filters.search);
params.append('fuzzy', 'true');
// Add search option parameters if available
if (pageState.searchOptions) {
params.append('search_filename', pageState.searchOptions.filename.toString());
params.append('search_modelname', pageState.searchOptions.modelname.toString());
if (pageState.searchOptions.tags !== undefined) {
params.append('search_tags', pageState.searchOptions.tags.toString());
}
params.append('recursive', (pageState.searchOptions?.recursive ?? false).toString());
}
}
// Add filter parameters if active
if (pageState.filters) {
// Handle tags filters
if (pageState.filters.tags && pageState.filters.tags.length > 0) {
// Checkpoints API expects individual 'tag' parameters, Loras API expects comma-separated 'tags'
if (modelType === 'checkpoint') {
pageState.filters.tags.forEach(tag => {
params.append('tag', tag);
});
} else {
params.append('tags', pageState.filters.tags.join(','));
}
}
// Handle base model filters
if (pageState.filters.baseModel && pageState.filters.baseModel.length > 0) {
if (modelType === 'checkpoint') {
pageState.filters.baseModel.forEach(model => {
params.append('base_model', model);
});
} else {
params.append('base_models', pageState.filters.baseModel.join(','));
}
}
}
// Add model-specific parameters
if (modelType === 'lora') {
// Check for recipe-based filtering parameters from session storage
const filterLoraHash = getSessionItem('recipe_to_lora_filterLoraHash');
const filterLoraHashes = getSessionItem('recipe_to_lora_filterLoraHashes');
// Add hash filter parameter if present
if (filterLoraHash) {
params.append('lora_hash', filterLoraHash);
}
// Add multiple hashes filter if present
else if (filterLoraHashes) {
try {
if (Array.isArray(filterLoraHashes) && filterLoraHashes.length > 0) {
params.append('lora_hashes', filterLoraHashes.join(','));
}
} catch (error) {
console.error('Error parsing lora hashes from session storage:', error);
}
}
}
const response = await fetch(`${endpoint}?${params}`);
if (!response.ok) {
throw new Error(`Failed to fetch models: ${response.statusText}`);
}
const data = await response.json();
const gridId = modelType === 'checkpoint' ? 'checkpointGrid' : 'loraGrid';
const grid = document.getElementById(gridId);
if (data.items.length === 0 && pageState.currentPage === 1) {
grid.innerHTML = `<div class="no-results">No ${modelType}s found in this folder</div>`;
pageState.hasMore = false;
} else if (data.items.length > 0) {
pageState.hasMore = pageState.currentPage < data.total_pages;
// Append model cards using the provided card creation function
data.items.forEach(model => {
const card = createCardFunction(model);
grid.appendChild(card);
});
// Increment the page number AFTER successful loading
pageState.currentPage++;
} else {
pageState.hasMore = false;
}
if (updateFolders && data.folders) {
updateFolderTags(data.folders);
}
} catch (error) {
console.error(`Error loading ${modelType}s:`, error);
showToast(`Failed to load ${modelType}s: ${error.message}`, 'error');
} finally {
pageState.isLoading = false;
document.body.classList.remove('loading');
}
}
// Update folder tags in the UI
export function updateFolderTags(folders) {
const folderTagsContainer = document.querySelector('.folder-tags');
if (!folderTagsContainer) return;
// Keep track of currently selected folder
const pageState = getCurrentPageState();
const currentFolder = pageState.activeFolder;
// Create HTML for folder tags
const tagsHTML = folders.map(folder => {
const isActive = folder === currentFolder;
return `<div class="tag ${isActive ? 'active' : ''}" data-folder="${folder}">${folder}</div>`;
}).join('');
// Update the container
folderTagsContainer.innerHTML = tagsHTML;
// Reattach click handlers and ensure the active tag is visible
const tags = folderTagsContainer.querySelectorAll('.tag');
tags.forEach(tag => {
if (typeof toggleFolder === 'function') {
tag.addEventListener('click', toggleFolder);
}
if (tag.dataset.folder === currentFolder) {
tag.scrollIntoView({ behavior: 'smooth', block: 'nearest' });
}
});
}
// Generic function to replace a model preview
export function replaceModelPreview(filePath, modelType = 'lora') {
// Open file picker
const input = document.createElement('input');
input.type = 'file';
input.accept ='image/*,video/mp4';
input.onchange = async function() {
if (!input.files || !input.files[0]) return;
const file = input.files[0];
await uploadPreview(filePath, file, modelType);
};
input.click();
}
// Delete a model (generic)
export function deleteModel(filePath, modelType = 'lora') {
if (modelType === 'checkpoint') {
confirmDelete('Are you sure you want to delete this checkpoint?', () => {
performDelete(filePath, modelType);
});
} else {
showDeleteModal(filePath);
}
}
// Reset and reload models
export async function resetAndReload(options = {}) {
const {
updateFolders = false,
modelType = 'lora',
loadMoreFunction
} = options;
const pageState = getCurrentPageState();
// Reset pagination and load more models
if (typeof loadMoreFunction === 'function') {
await loadMoreFunction(true, updateFolders);
}
}
// Generic function to refresh models
export async function refreshModels(options = {}) {
const {
modelType = 'lora',
scanEndpoint = '/api/loras/scan',
resetAndReloadFunction
} = options;
try {
state.loadingManager.showSimpleLoading(`Refreshing ${modelType}s...`);
const response = await fetch(scanEndpoint);
if (!response.ok) {
throw new Error(`Failed to refresh ${modelType}s: ${response.status} ${response.statusText}`);
}
if (typeof resetAndReloadFunction === 'function') {
await resetAndReloadFunction();
}
showToast(`Refresh complete`, 'success');
} catch (error) {
console.error(`Refresh failed:`, error);
showToast(`Failed to refresh ${modelType}s`, 'error');
} finally {
state.loadingManager.hide();
state.loadingManager.restoreProgressBar();
}
}
// Generic fetch from Civitai
export async function fetchCivitaiMetadata(options = {}) {
const {
modelType = 'lora',
fetchEndpoint = '/api/fetch-all-civitai',
resetAndReloadFunction
} = options;
let ws = null;
await state.loadingManager.showWithProgress(async (loading) => {
try {
const wsProtocol = window.location.protocol === 'https:' ? 'wss://' : 'ws://';
ws = new WebSocket(`${wsProtocol}${window.location.host}/ws/fetch-progress`);
const operationComplete = new Promise((resolve, reject) => {
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
switch(data.status) {
case 'started':
loading.setStatus('Starting metadata fetch...');
break;
case 'processing':
const percent = ((data.processed / data.total) * 100).toFixed(1);
loading.setProgress(percent);
loading.setStatus(
`Processing (${data.processed}/${data.total}) ${data.current_name}`
);
break;
case 'completed':
loading.setProgress(100);
loading.setStatus(
`Completed: Updated ${data.success} of ${data.processed} ${modelType}s`
);
resolve();
break;
case 'error':
reject(new Error(data.error));
break;
}
};
ws.onerror = (error) => {
reject(new Error('WebSocket error: ' + error.message));
};
});
await new Promise((resolve, reject) => {
ws.onopen = resolve;
ws.onerror = reject;
});
const requestBody = modelType === 'checkpoint'
? JSON.stringify({ model_type: 'checkpoint' })
: JSON.stringify({});
const response = await fetch(fetchEndpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: requestBody
});
if (!response.ok) {
throw new Error('Failed to fetch metadata');
}
await operationComplete;
if (typeof resetAndReloadFunction === 'function') {
await resetAndReloadFunction();
}
} catch (error) {
console.error('Error fetching metadata:', error);
showToast('Failed to fetch metadata: ' + error.message, 'error');
} finally {
if (ws) {
ws.close();
}
}
}, {
initialMessage: 'Connecting...',
completionMessage: 'Metadata update complete'
});
}
// Generic function to refresh single model metadata
export async function refreshSingleModelMetadata(filePath, modelType = 'lora') {
try {
state.loadingManager.showSimpleLoading('Refreshing metadata...');
const endpoint = modelType === 'checkpoint'
? '/api/checkpoints/fetch-civitai'
: '/api/fetch-civitai';
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ file_path: filePath })
});
if (!response.ok) {
throw new Error('Failed to refresh metadata');
}
const data = await response.json();
if (data.success) {
showToast('Metadata refreshed successfully', 'success');
return true;
} else {
throw new Error(data.error || 'Failed to refresh metadata');
}
} catch (error) {
console.error('Error refreshing metadata:', error);
showToast(error.message, 'error');
return false;
} finally {
state.loadingManager.hide();
state.loadingManager.restoreProgressBar();
}
}
// Private methods
// Upload a preview image
async function uploadPreview(filePath, file, modelType = 'lora') {
const loadingOverlay = document.getElementById('loading-overlay');
const loadingStatus = document.querySelector('.loading-status');
try {
if (loadingOverlay) loadingOverlay.style.display = 'flex';
if (loadingStatus) loadingStatus.textContent = 'Uploading preview...';
const formData = new FormData();
// Use appropriate parameter names and endpoint based on model type
// Prepare common form data
formData.append('preview_file', file);
formData.append('model_path', filePath);
// Set endpoint based on model type
const endpoint = modelType === 'checkpoint'
? '/api/checkpoints/replace-preview'
: '/api/replace_preview';
const response = await fetch(endpoint, {
method: 'POST',
body: formData
});
if (!response.ok) {
throw new Error('Upload failed');
}
const data = await response.json();
// Update the card preview in UI
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (card) {
const previewContainer = card.querySelector('.card-preview');
const oldPreview = previewContainer.querySelector('img, video');
// For LoRA models, use timestamp to prevent caching
if (modelType === 'lora') {
state.previewVersions?.set(filePath, Date.now());
}
const timestamp = Date.now();
const previewUrl = data.preview_url ?
`${data.preview_url}?t=${timestamp}` :
`/api/model/preview_image?path=${encodeURIComponent(filePath)}&t=${timestamp}`;
// Create appropriate element based on file type
if (file.type.startsWith('video/')) {
const video = document.createElement('video');
video.controls = true;
video.autoplay = true;
video.muted = true;
video.loop = true;
video.src = previewUrl;
oldPreview.replaceWith(video);
} else {
const img = document.createElement('img');
img.src = previewUrl;
oldPreview.replaceWith(img);
}
showToast('Preview updated successfully', 'success');
}
} catch (error) {
console.error('Error uploading preview:', error);
showToast('Failed to upload preview image', 'error');
} finally {
if (loadingOverlay) loadingOverlay.style.display = 'none';
}
}
// Private function to perform the delete operation
async function performDelete(filePath, modelType = 'lora') {
try {
showToast(`Deleting ${modelType}...`, 'info');
const response = await fetch('/api/model/delete', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
file_path: filePath,
model_type: modelType
})
});
if (!response.ok) {
throw new Error(`Failed to delete ${modelType}: ${response.status} ${response.statusText}`);
}
const data = await response.json();
if (data.success) {
// Remove the card from UI
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (card) {
card.remove();
}
showToast(`${modelType} deleted successfully`, 'success');
} else {
throw new Error(data.error || `Failed to delete ${modelType}`);
}
} catch (error) {
console.error(`Error deleting ${modelType}:`, error);
showToast(`Failed to delete ${modelType}: ${error.message}`, 'error');
}
}

View File

@@ -0,0 +1,57 @@
import { createCheckpointCard } from '../components/CheckpointCard.js';
import {
loadMoreModels,
resetAndReload as baseResetAndReload,
refreshModels as baseRefreshModels,
deleteModel as baseDeleteModel,
replaceModelPreview,
fetchCivitaiMetadata
} from './baseModelApi.js';
// Load more checkpoints with pagination
export async function loadMoreCheckpoints(resetPagination = true) {
return loadMoreModels({
resetPage: resetPagination,
updateFolders: true,
modelType: 'checkpoint',
createCardFunction: createCheckpointCard,
endpoint: '/api/checkpoints'
});
}
// Reset and reload checkpoints
export async function resetAndReload() {
return baseResetAndReload({
updateFolders: true,
modelType: 'checkpoint',
loadMoreFunction: loadMoreCheckpoints
});
}
// Refresh checkpoints
export async function refreshCheckpoints() {
return baseRefreshModels({
modelType: 'checkpoint',
scanEndpoint: '/api/checkpoints/scan',
resetAndReloadFunction: resetAndReload
});
}
// Delete a checkpoint
export function deleteCheckpoint(filePath) {
return baseDeleteModel(filePath, 'checkpoint');
}
// Replace checkpoint preview
export function replaceCheckpointPreview(filePath) {
return replaceModelPreview(filePath, 'checkpoint');
}
// Fetch metadata from Civitai for checkpoints
export async function fetchCivitai() {
return fetchCivitaiMetadata({
modelType: 'checkpoint',
fetchEndpoint: '/api/checkpoints/fetch-all-civitai',
resetAndReloadFunction: resetAndReload
});
}

View File

@@ -1,250 +1,38 @@
import { state } from '../state/index.js';
import { showToast } from '../utils/uiHelpers.js';
import { createLoraCard } from '../components/LoraCard.js';
import { initializeInfiniteScroll } from '../utils/infiniteScroll.js';
import { showDeleteModal } from '../utils/modalUtils.js';
import { toggleFolder } from '../utils/uiHelpers.js';
import {
loadMoreModels,
resetAndReload as baseResetAndReload,
refreshModels as baseRefreshModels,
deleteModel as baseDeleteModel,
replaceModelPreview,
fetchCivitaiMetadata,
refreshSingleModelMetadata
} from './baseModelApi.js';
export async function loadMoreLoras(boolUpdateFolders = false) {
if (state.isLoading || !state.hasMore) return;
state.isLoading = true;
try {
const params = new URLSearchParams({
page: state.currentPage,
page_size: 20,
sort_by: state.sortBy
});
// 使用 state 中的 searchManager 获取递归搜索状态
const isRecursiveSearch = state.searchManager?.isRecursiveSearch ?? false;
if (state.activeFolder !== null) {
params.append('folder', state.activeFolder);
params.append('recursive', isRecursiveSearch.toString());
}
// Add search parameters if there's a search term
const searchInput = document.getElementById('searchInput');
if (searchInput && searchInput.value.trim()) {
params.append('search', searchInput.value.trim());
params.append('fuzzy', 'true');
}
// Add filter parameters if active
if (state.filters && state.filters.baseModel && state.filters.baseModel.length > 0) {
// Convert the array of base models to a comma-separated string
params.append('base_models', state.filters.baseModel.join(','));
}
console.log('Loading loras with params:', params.toString());
const response = await fetch(`/api/loras?${params}`);
if (!response.ok) {
throw new Error(`Failed to fetch loras: ${response.statusText}`);
}
const data = await response.json();
console.log('Received data:', data);
if (data.items.length === 0 && state.currentPage === 1) {
const grid = document.getElementById('loraGrid');
grid.innerHTML = '<div class="no-results">No loras found in this folder</div>';
state.hasMore = false;
} else if (data.items.length > 0) {
state.hasMore = state.currentPage < data.total_pages;
state.currentPage++;
appendLoraCards(data.items);
const sentinel = document.getElementById('scroll-sentinel');
if (sentinel && state.observer) {
state.observer.observe(sentinel);
}
} else {
state.hasMore = false;
}
if (boolUpdateFolders && data.folders) {
updateFolderTags(data.folders);
}
} catch (error) {
console.error('Error loading loras:', error);
showToast('Failed to load loras: ' + error.message, 'error');
} finally {
state.isLoading = false;
}
}
function updateFolderTags(folders) {
const folderTagsContainer = document.querySelector('.folder-tags');
if (!folderTagsContainer) return;
// Keep track of currently selected folder
const currentFolder = state.activeFolder;
// Create HTML for folder tags
const tagsHTML = folders.map(folder => {
const isActive = folder === currentFolder;
return `<div class="tag ${isActive ? 'active' : ''}" data-folder="${folder}">${folder}</div>`;
}).join('');
// Update the container
folderTagsContainer.innerHTML = tagsHTML;
// Reattach click handlers and ensure the active tag is visible
const tags = folderTagsContainer.querySelectorAll('.tag');
tags.forEach(tag => {
tag.addEventListener('click', toggleFolder);
if (tag.dataset.folder === currentFolder) {
tag.scrollIntoView({ behavior: 'smooth', block: 'nearest' });
}
export async function loadMoreLoras(resetPage = false, updateFolders = false) {
return loadMoreModels({
resetPage,
updateFolders,
modelType: 'lora',
createCardFunction: createLoraCard,
endpoint: '/api/loras'
});
}
export async function fetchCivitai() {
let ws = null;
await state.loadingManager.showWithProgress(async (loading) => {
try {
ws = new WebSocket(`ws://${window.location.host}/ws/fetch-progress`);
const operationComplete = new Promise((resolve, reject) => {
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
switch(data.status) {
case 'started':
loading.setStatus('Starting metadata fetch...');
break;
case 'processing':
const percent = ((data.processed / data.total) * 100).toFixed(1);
loading.setProgress(percent);
loading.setStatus(
`Processing (${data.processed}/${data.total}) ${data.current_name}`
);
break;
case 'completed':
loading.setProgress(100);
loading.setStatus(
`Completed: Updated ${data.success} of ${data.processed} loras`
);
resolve();
break;
case 'error':
reject(new Error(data.error));
break;
}
};
ws.onerror = (error) => {
reject(new Error('WebSocket error: ' + error.message));
};
});
await new Promise((resolve, reject) => {
ws.onopen = resolve;
ws.onerror = reject;
});
const response = await fetch('/api/fetch-all-civitai', {
method: 'POST',
headers: { 'Content-Type': 'application/json' }
});
if (!response.ok) {
throw new Error('Failed to fetch metadata');
}
await operationComplete;
await resetAndReload();
} catch (error) {
console.error('Error fetching metadata:', error);
showToast('Failed to fetch metadata: ' + error.message, 'error');
} finally {
if (ws) {
ws.close();
}
}
}, {
initialMessage: 'Connecting...',
completionMessage: 'Metadata update complete'
return fetchCivitaiMetadata({
modelType: 'lora',
fetchEndpoint: '/api/fetch-all-civitai',
resetAndReloadFunction: resetAndReload
});
}
export async function deleteModel(filePath) {
showDeleteModal(filePath);
return baseDeleteModel(filePath, 'lora');
}
export async function replacePreview(filePath) {
const loadingOverlay = document.getElementById('loading-overlay');
const loadingStatus = document.querySelector('.loading-status');
const input = document.createElement('input');
input.type = 'file';
input.accept = 'image/*,video/mp4';
input.onchange = async function() {
if (!input.files || !input.files[0]) return;
const file = input.files[0];
const formData = new FormData();
formData.append('preview_file', file);
formData.append('model_path', filePath);
try {
loadingOverlay.style.display = 'flex';
loadingStatus.textContent = 'Uploading preview...';
const response = await fetch('/api/replace_preview', {
method: 'POST',
body: formData
});
if (!response.ok) {
throw new Error('Upload failed');
}
const data = await response.json();
// 更新预览版本
state.previewVersions.set(filePath, Date.now());
// 更新卡片显示
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
const previewContainer = card.querySelector('.card-preview');
const oldPreview = previewContainer.querySelector('img, video');
const previewUrl = `${data.preview_url}?t=${state.previewVersions.get(filePath)}`;
if (file.type.startsWith('video/')) {
const video = document.createElement('video');
video.controls = true;
video.autoplay = true;
video.muted = true;
video.loop = true;
video.src = previewUrl;
oldPreview.replaceWith(video);
} else {
const img = document.createElement('img');
img.src = previewUrl;
oldPreview.replaceWith(img);
}
} catch (error) {
console.error('Error uploading preview:', error);
alert('Failed to upload preview image');
} finally {
loadingOverlay.style.display = 'none';
}
};
input.click();
return replaceModelPreview(filePath, 'lora');
}
export function appendLoraCards(loras) {
@@ -253,76 +41,45 @@ export function appendLoraCards(loras) {
loras.forEach(lora => {
const card = createLoraCard(lora);
if (sentinel) {
grid.insertBefore(card, sentinel);
} else {
grid.appendChild(card);
}
grid.appendChild(card);
});
}
export async function resetAndReload(boolUpdateFolders = false) {
console.log('Resetting with state:', { ...state });
state.currentPage = 1;
state.hasMore = true;
state.isLoading = false;
const grid = document.getElementById('loraGrid');
grid.innerHTML = '';
const sentinel = document.createElement('div');
sentinel.id = 'scroll-sentinel';
grid.appendChild(sentinel);
initializeInfiniteScroll();
await loadMoreLoras(boolUpdateFolders);
export async function resetAndReload(updateFolders = false) {
return baseResetAndReload({
updateFolders,
modelType: 'lora',
loadMoreFunction: loadMoreLoras
});
}
export async function refreshLoras() {
try {
state.loadingManager.showSimpleLoading('Refreshing loras...');
await resetAndReload();
showToast('Refresh complete', 'success');
} catch (error) {
console.error('Refresh failed:', error);
showToast('Failed to refresh loras', 'error');
} finally {
state.loadingManager.hide();
state.loadingManager.restoreProgressBar();
}
return baseRefreshModels({
modelType: 'lora',
scanEndpoint: '/api/loras/scan',
resetAndReloadFunction: resetAndReload
});
}
export async function refreshSingleLoraMetadata(filePath) {
const success = await refreshSingleModelMetadata(filePath, 'lora');
if (success) {
// Reload the current view to show updated data
await resetAndReload();
}
}
export async function fetchModelDescription(modelId, filePath) {
try {
state.loadingManager.showSimpleLoading('Refreshing metadata...');
const response = await fetch('/api/fetch-civitai', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ file_path: filePath })
});
if (!response.ok) {
throw new Error('Failed to refresh metadata');
}
const data = await response.json();
const response = await fetch(`/api/lora-model-description?model_id=${modelId}&file_path=${encodeURIComponent(filePath)}`);
if (data.success) {
showToast('Metadata refreshed successfully', 'success');
// Reload the current view to show updated data
await resetAndReload();
} else {
throw new Error(data.error || 'Failed to refresh metadata');
if (!response.ok) {
throw new Error(`Failed to fetch model description: ${response.statusText}`);
}
return await response.json();
} catch (error) {
console.error('Error refreshing metadata:', error);
showToast(error.message, 'error');
} finally {
state.loadingManager.hide();
state.loadingManager.restoreProgressBar();
console.error('Error fetching model description:', error);
throw error;
}
}

55
static/js/checkpoints.js Normal file
View File

@@ -0,0 +1,55 @@
import { appCore } from './core.js';
import { initializeInfiniteScroll } from './utils/infiniteScroll.js';
import { confirmDelete, closeDeleteModal } from './utils/modalUtils.js';
import { createPageControls } from './components/controls/index.js';
import { loadMoreCheckpoints } from './api/checkpointApi.js';
import { CheckpointDownloadManager } from './managers/CheckpointDownloadManager.js';
// Initialize the Checkpoints page
class CheckpointsPageManager {
constructor() {
// Initialize page controls
this.pageControls = createPageControls('checkpoints');
// Initialize checkpoint download manager
window.checkpointDownloadManager = new CheckpointDownloadManager();
// Expose only necessary functions to global scope
this._exposeRequiredGlobalFunctions();
}
_exposeRequiredGlobalFunctions() {
// Minimal set of functions that need to remain global
window.confirmDelete = confirmDelete;
window.closeDeleteModal = closeDeleteModal;
// Add loadCheckpoints function to window for FilterManager compatibility
window.checkpointManager = {
loadCheckpoints: (reset) => loadMoreCheckpoints(reset)
};
}
async initialize() {
// Initialize page-specific components
this.pageControls.restoreFolderFilter();
this.pageControls.initFolderTagsVisibility();
// Initialize infinite scroll
initializeInfiniteScroll('checkpoints');
// Initialize common page features
appCore.initializePageFeatures();
console.log('Checkpoints Manager initialized');
}
}
// Initialize everything when DOM is ready
document.addEventListener('DOMContentLoaded', async () => {
// Initialize core application
await appCore.initialize();
// Initialize checkpoints page
const checkpointsPage = new CheckpointsPageManager();
await checkpointsPage.initialize();
});

View File

@@ -0,0 +1,313 @@
import { showToast } from '../utils/uiHelpers.js';
import { state } from '../state/index.js';
import { showCheckpointModal } from './checkpointModal/index.js';
import { NSFW_LEVELS } from '../utils/constants.js';
import { replaceCheckpointPreview as apiReplaceCheckpointPreview } from '../api/checkpointApi.js';
export function createCheckpointCard(checkpoint) {
const card = document.createElement('div');
card.className = 'lora-card'; // Reuse the same class for styling
card.dataset.sha256 = checkpoint.sha256;
card.dataset.filepath = checkpoint.file_path;
card.dataset.name = checkpoint.model_name;
card.dataset.file_name = checkpoint.file_name;
card.dataset.folder = checkpoint.folder;
card.dataset.modified = checkpoint.modified;
card.dataset.file_size = checkpoint.file_size;
card.dataset.from_civitai = checkpoint.from_civitai;
card.dataset.notes = checkpoint.notes || '';
card.dataset.base_model = checkpoint.base_model || 'Unknown';
// Store metadata if available
if (checkpoint.civitai) {
card.dataset.meta = JSON.stringify(checkpoint.civitai || {});
}
// Store tags if available
if (checkpoint.tags && Array.isArray(checkpoint.tags)) {
card.dataset.tags = JSON.stringify(checkpoint.tags);
}
if (checkpoint.modelDescription) {
card.dataset.modelDescription = checkpoint.modelDescription;
}
// Store NSFW level if available
const nsfwLevel = checkpoint.preview_nsfw_level !== undefined ? checkpoint.preview_nsfw_level : 0;
card.dataset.nsfwLevel = nsfwLevel;
// Determine if the preview should be blurred based on NSFW level and user settings
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
if (shouldBlur) {
card.classList.add('nsfw-content');
}
// Determine preview URL
const previewUrl = checkpoint.preview_url || '/loras_static/images/no-preview.png';
const version = state.previewVersions ? state.previewVersions.get(checkpoint.file_path) : null;
const versionedPreviewUrl = version ? `${previewUrl}?t=${version}` : previewUrl;
// Determine NSFW warning text based on level
let nsfwText = "Mature Content";
if (nsfwLevel >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (nsfwLevel >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (nsfwLevel >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
// Check if autoplayOnHover is enabled for video previews
const autoplayOnHover = state.global?.settings?.autoplayOnHover || false;
const isVideo = previewUrl.endsWith('.mp4');
const videoAttrs = autoplayOnHover ? 'controls muted loop' : 'controls autoplay muted loop';
card.innerHTML = `
<div class="card-preview ${shouldBlur ? 'blurred' : ''}">
${isVideo ?
`<video ${videoAttrs}>
<source src="${versionedPreviewUrl}" type="video/mp4">
</video>` :
`<img src="${versionedPreviewUrl}" alt="${checkpoint.model_name}">`
}
<div class="card-header">
${shouldBlur ?
`<button class="toggle-blur-btn" title="Toggle blur">
<i class="fas fa-eye"></i>
</button>` : ''}
<span class="base-model-label ${shouldBlur ? 'with-toggle' : ''}" title="${checkpoint.base_model}">
${checkpoint.base_model}
</span>
<div class="card-actions">
<i class="fas fa-globe"
title="${checkpoint.from_civitai ? 'View on Civitai' : 'Not available from Civitai'}"
${!checkpoint.from_civitai ? 'style="opacity: 0.5; cursor: not-allowed"' : ''}>
</i>
<i class="fas fa-copy"
title="Copy Checkpoint Name">
</i>
<i class="fas fa-trash"
title="Delete Model">
</i>
</div>
</div>
${shouldBlur ? `
<div class="nsfw-overlay">
<div class="nsfw-warning">
<p>${nsfwText}</p>
<button class="show-content-btn">Show</button>
</div>
</div>
` : ''}
<div class="card-footer">
<div class="model-info">
<span class="model-name">${checkpoint.model_name}</span>
</div>
<div class="card-actions">
<i class="fas fa-image"
title="Replace Preview Image">
</i>
</div>
</div>
</div>
`;
// Main card click event
card.addEventListener('click', () => {
// Show checkpoint details modal
const checkpointMeta = {
sha256: card.dataset.sha256,
file_path: card.dataset.filepath,
model_name: card.dataset.name,
file_name: card.dataset.file_name,
folder: card.dataset.folder,
modified: card.dataset.modified,
file_size: parseInt(card.dataset.file_size || '0'),
from_civitai: card.dataset.from_civitai === 'true',
base_model: card.dataset.base_model,
notes: card.dataset.notes || '',
preview_url: versionedPreviewUrl,
// Parse civitai metadata from the card's dataset
civitai: (() => {
try {
return JSON.parse(card.dataset.meta || '{}');
} catch (e) {
console.error('Failed to parse civitai metadata:', e);
return {}; // Return empty object on error
}
})(),
tags: (() => {
try {
return JSON.parse(card.dataset.tags || '[]');
} catch (e) {
console.error('Failed to parse tags:', e);
return []; // Return empty array on error
}
})(),
modelDescription: card.dataset.modelDescription || ''
};
showCheckpointModal(checkpointMeta);
});
// Toggle blur button functionality
const toggleBlurBtn = card.querySelector('.toggle-blur-btn');
if (toggleBlurBtn) {
toggleBlurBtn.addEventListener('click', (e) => {
e.stopPropagation();
const preview = card.querySelector('.card-preview');
const isBlurred = preview.classList.toggle('blurred');
const icon = toggleBlurBtn.querySelector('i');
// Update the icon based on blur state
if (isBlurred) {
icon.className = 'fas fa-eye';
} else {
icon.className = 'fas fa-eye-slash';
}
// Toggle the overlay visibility
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = isBlurred ? 'flex' : 'none';
}
});
}
// Show content button functionality
const showContentBtn = card.querySelector('.show-content-btn');
if (showContentBtn) {
showContentBtn.addEventListener('click', (e) => {
e.stopPropagation();
const preview = card.querySelector('.card-preview');
preview.classList.remove('blurred');
// Update the toggle button icon
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
// Hide the overlay
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = 'none';
}
});
}
// Copy button click event
card.querySelector('.fa-copy')?.addEventListener('click', async e => {
e.stopPropagation();
const checkpointName = card.dataset.file_name;
try {
// Modern clipboard API
if (navigator.clipboard && window.isSecureContext) {
await navigator.clipboard.writeText(checkpointName);
} else {
// Fallback for older browsers
const textarea = document.createElement('textarea');
textarea.value = checkpointName;
textarea.style.position = 'absolute';
textarea.style.left = '-99999px';
document.body.appendChild(textarea);
textarea.select();
document.execCommand('copy');
document.body.removeChild(textarea);
}
showToast('Checkpoint name copied', 'success');
} catch (err) {
console.error('Copy failed:', err);
showToast('Copy failed', 'error');
}
});
// Civitai button click event
if (checkpoint.from_civitai) {
card.querySelector('.fa-globe')?.addEventListener('click', e => {
e.stopPropagation();
openCivitai(checkpoint.model_name);
});
}
// Delete button click event
card.querySelector('.fa-trash')?.addEventListener('click', e => {
e.stopPropagation();
deleteCheckpoint(checkpoint.file_path);
});
// Replace preview button click event
card.querySelector('.fa-image')?.addEventListener('click', e => {
e.stopPropagation();
replaceCheckpointPreview(checkpoint.file_path);
});
// Add autoplayOnHover handlers for video elements if needed
const videoElement = card.querySelector('video');
if (videoElement && autoplayOnHover) {
const cardPreview = card.querySelector('.card-preview');
// Remove autoplay attribute and pause initially
videoElement.removeAttribute('autoplay');
videoElement.pause();
// Add mouse events to trigger play/pause
cardPreview.addEventListener('mouseenter', () => {
videoElement.play();
});
cardPreview.addEventListener('mouseleave', () => {
videoElement.pause();
videoElement.currentTime = 0;
});
}
return card;
}
// These functions will be implemented in checkpointApi.js
function openCivitai(modelName) {
// Check if the global function exists (registered by PageControls)
if (window.openCivitai) {
window.openCivitai(modelName);
} else {
// Fallback implementation
const card = document.querySelector(`.lora-card[data-name="${modelName}"]`);
if (!card) return;
const metaData = JSON.parse(card.dataset.meta || '{}');
const civitaiId = metaData.modelId;
const versionId = metaData.id;
// Build URL
if (civitaiId) {
let url = `https://civitai.com/models/${civitaiId}`;
if (versionId) {
url += `?modelVersionId=${versionId}`;
}
window.open(url, '_blank');
} else {
// If no ID, try searching by name
window.open(`https://civitai.com/models?query=${encodeURIComponent(modelName)}`, '_blank');
}
}
}
function deleteCheckpoint(filePath) {
if (window.deleteCheckpoint) {
window.deleteCheckpoint(filePath);
} else {
// Use the modal delete functionality
import('../utils/modalUtils.js').then(({ showDeleteModal }) => {
showDeleteModal(filePath, 'checkpoint');
});
}
}
function replaceCheckpointPreview(filePath) {
if (window.replaceCheckpointPreview) {
window.replaceCheckpointPreview(filePath);
} else {
apiReplaceCheckpointPreview(filePath);
}
}

View File

@@ -1,9 +1,13 @@
import { refreshSingleLoraMetadata } from '../api/loraApi.js';
import { showToast, getNSFWLevelName } from '../utils/uiHelpers.js';
import { NSFW_LEVELS } from '../utils/constants.js';
import { getStorageItem } from '../utils/storageHelpers.js';
export class LoraContextMenu {
constructor() {
this.menu = document.getElementById('loraContextMenu');
this.currentCard = null;
this.nsfwSelector = document.getElementById('nsfwLevelSelector');
this.init();
}
@@ -58,10 +62,274 @@ export class LoraContextMenu {
case 'refresh-metadata':
refreshSingleLoraMetadata(this.currentCard.dataset.filepath);
break;
case 'set-nsfw':
this.showNSFWLevelSelector(null, null, this.currentCard);
break;
}
this.hideMenu();
});
// Initialize NSFW Level Selector events
this.initNSFWSelector();
}
initNSFWSelector() {
// Close button
const closeBtn = this.nsfwSelector.querySelector('.close-nsfw-selector');
closeBtn.addEventListener('click', () => {
this.nsfwSelector.style.display = 'none';
});
// Level buttons
const levelButtons = this.nsfwSelector.querySelectorAll('.nsfw-level-btn');
levelButtons.forEach(btn => {
btn.addEventListener('click', async () => {
const level = parseInt(btn.dataset.level);
const filePath = this.nsfwSelector.dataset.cardPath;
if (!filePath) return;
try {
await this.saveModelMetadata(filePath, { preview_nsfw_level: level });
// Update card data
const card = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (card) {
let metaData = {};
try {
metaData = JSON.parse(card.dataset.meta || '{}');
} catch (err) {
console.error('Error parsing metadata:', err);
}
metaData.preview_nsfw_level = level;
card.dataset.meta = JSON.stringify(metaData);
card.dataset.nsfwLevel = level.toString();
// Apply blur effect immediately
this.updateCardBlurEffect(card, level);
}
showToast(`Content rating set to ${getNSFWLevelName(level)}`, 'success');
this.nsfwSelector.style.display = 'none';
} catch (error) {
showToast(`Failed to set content rating: ${error.message}`, 'error');
}
});
});
// Close when clicking outside
document.addEventListener('click', (e) => {
if (this.nsfwSelector.style.display === 'block' &&
!this.nsfwSelector.contains(e.target) &&
!e.target.closest('.context-menu-item[data-action="set-nsfw"]')) {
this.nsfwSelector.style.display = 'none';
}
});
}
async saveModelMetadata(filePath, data) {
const response = await fetch('/api/loras/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
if (!response.ok) {
throw new Error('Failed to save metadata');
}
return await response.json();
}
updateCardBlurEffect(card, level) {
// Get user settings for blur threshold
const blurThreshold = parseInt(getStorageItem('nsfwBlurLevel') || '4');
// Get card preview container
const previewContainer = card.querySelector('.card-preview');
if (!previewContainer) return;
// Get preview media element
const previewMedia = previewContainer.querySelector('img') || previewContainer.querySelector('video');
if (!previewMedia) return;
// Check if blur should be applied
if (level >= blurThreshold) {
// Add blur class to the preview container
previewContainer.classList.add('blurred');
// Get or create the NSFW overlay
let nsfwOverlay = previewContainer.querySelector('.nsfw-overlay');
if (!nsfwOverlay) {
// Create new overlay
nsfwOverlay = document.createElement('div');
nsfwOverlay.className = 'nsfw-overlay';
// Create and configure the warning content
const warningContent = document.createElement('div');
warningContent.className = 'nsfw-warning';
// Determine NSFW warning text based on level
let nsfwText = "Mature Content";
if (level >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (level >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (level >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
// Add warning text and show button
warningContent.innerHTML = `
<p>${nsfwText}</p>
<button class="show-content-btn">Show</button>
`;
// Add click event to the show button
const showBtn = warningContent.querySelector('.show-content-btn');
showBtn.addEventListener('click', (e) => {
e.stopPropagation();
previewContainer.classList.remove('blurred');
nsfwOverlay.style.display = 'none';
// Update toggle button icon if it exists
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
});
nsfwOverlay.appendChild(warningContent);
previewContainer.appendChild(nsfwOverlay);
} else {
// Update existing overlay
const warningText = nsfwOverlay.querySelector('p');
if (warningText) {
let nsfwText = "Mature Content";
if (level >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (level >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (level >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
warningText.textContent = nsfwText;
}
nsfwOverlay.style.display = 'flex';
}
// Get or create the toggle button in the header
const cardHeader = previewContainer.querySelector('.card-header');
if (cardHeader) {
let toggleBtn = cardHeader.querySelector('.toggle-blur-btn');
if (!toggleBtn) {
toggleBtn = document.createElement('button');
toggleBtn.className = 'toggle-blur-btn';
toggleBtn.title = 'Toggle blur';
toggleBtn.innerHTML = '<i class="fas fa-eye"></i>';
// Add click event to toggle button
toggleBtn.addEventListener('click', (e) => {
e.stopPropagation();
const isBlurred = previewContainer.classList.toggle('blurred');
const icon = toggleBtn.querySelector('i');
// Update icon and overlay visibility
if (isBlurred) {
icon.className = 'fas fa-eye';
nsfwOverlay.style.display = 'flex';
} else {
icon.className = 'fas fa-eye-slash';
nsfwOverlay.style.display = 'none';
}
});
// Add to the beginning of header
cardHeader.insertBefore(toggleBtn, cardHeader.firstChild);
// Update base model label class
const baseModelLabel = cardHeader.querySelector('.base-model-label');
if (baseModelLabel && !baseModelLabel.classList.contains('with-toggle')) {
baseModelLabel.classList.add('with-toggle');
}
} else {
// Update existing toggle button
toggleBtn.querySelector('i').className = 'fas fa-eye';
}
}
} else {
// Remove blur
previewContainer.classList.remove('blurred');
// Hide overlay if it exists
const overlay = previewContainer.querySelector('.nsfw-overlay');
if (overlay) overlay.style.display = 'none';
// Update or remove toggle button
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
// We'll leave the button but update the icon
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
}
}
showNSFWLevelSelector(x, y, card) {
const selector = document.getElementById('nsfwLevelSelector');
const currentLevelEl = document.getElementById('currentNSFWLevel');
// Get current NSFW level
let currentLevel = 0;
try {
const metaData = JSON.parse(card.dataset.meta || '{}');
currentLevel = metaData.preview_nsfw_level || 0;
// Update if we have no recorded level but have a dataset attribute
if (!currentLevel && card.dataset.nsfwLevel) {
currentLevel = parseInt(card.dataset.nsfwLevel) || 0;
}
} catch (err) {
console.error('Error parsing metadata:', err);
}
currentLevelEl.textContent = getNSFWLevelName(currentLevel);
// Position the selector
if (x && y) {
const viewportWidth = document.documentElement.clientWidth;
const viewportHeight = document.documentElement.clientHeight;
const selectorRect = selector.getBoundingClientRect();
// Center the selector if no coordinates provided
let finalX = (viewportWidth - selectorRect.width) / 2;
let finalY = (viewportHeight - selectorRect.height) / 2;
selector.style.left = `${finalX}px`;
selector.style.top = `${finalY}px`;
}
// Highlight current level button
document.querySelectorAll('.nsfw-level-btn').forEach(btn => {
if (parseInt(btn.dataset.level) === currentLevel) {
btn.classList.add('active');
} else {
btn.classList.remove('active');
}
});
// Store reference to current card
selector.dataset.cardPath = card.dataset.filepath;
// Show selector
selector.style.display = 'block';
}
showMenu(x, y, card) {

View File

@@ -0,0 +1,82 @@
import { updateService } from '../managers/UpdateService.js';
import { toggleTheme } from '../utils/uiHelpers.js';
import { SearchManager } from '../managers/SearchManager.js';
import { FilterManager } from '../managers/FilterManager.js';
import { initPageState } from '../state/index.js';
/**
* Header.js - Manages the application header behavior across different pages
* Handles initialization of appropriate search and filter managers based on current page
*/
export class HeaderManager {
constructor() {
this.currentPage = this.detectCurrentPage();
initPageState(this.currentPage);
this.searchManager = null;
this.filterManager = null;
// Initialize appropriate managers based on current page
this.initializeManagers();
// Set up common header functionality
this.initializeCommonElements();
}
detectCurrentPage() {
const path = window.location.pathname;
if (path.includes('/loras/recipes')) return 'recipes';
if (path.includes('/checkpoints')) return 'checkpoints';
if (path.includes('/loras')) return 'loras';
return 'unknown';
}
initializeManagers() {
// Initialize SearchManager for all page types
this.searchManager = new SearchManager({ page: this.currentPage });
window.searchManager = this.searchManager;
// Initialize FilterManager for all page types that have filters
if (document.getElementById('filterButton')) {
this.filterManager = new FilterManager({ page: this.currentPage });
window.filterManager = this.filterManager;
}
}
initializeCommonElements() {
// Handle theme toggle
const themeToggle = document.querySelector('.theme-toggle');
if (themeToggle) {
themeToggle.addEventListener('click', () => {
if (typeof toggleTheme === 'function') {
toggleTheme();
}
});
}
// Handle settings toggle
const settingsToggle = document.querySelector('.settings-toggle');
if (settingsToggle) {
settingsToggle.addEventListener('click', () => {
if (window.settingsManager) {
window.settingsManager.toggleSettings();
}
});
}
// Handle update toggle
const updateToggle = document.getElementById('updateToggleBtn');
if (updateToggle) {
updateToggle.addEventListener('click', () => {
updateService.toggleUpdateModal();
});
}
// Handle support toggle
const supportToggle = document.getElementById('supportToggleBtn');
if (supportToggle) {
supportToggle.addEventListener('click', () => {
// Handle support panel logic
});
}
}
}

View File

@@ -1,7 +1,9 @@
import { showToast } from '../utils/uiHelpers.js';
import { showToast, openCivitai } from '../utils/uiHelpers.js';
import { state } from '../state/index.js';
import { showLoraModal } from './LoraModal.js';
import { showLoraModal } from './loraModal/index.js';
import { bulkManager } from '../managers/BulkManager.js';
import { NSFW_LEVELS } from '../utils/constants.js';
import { replacePreview, deleteModel } from '../api/loraApi.js'
export function createLoraCard(lora) {
const card = document.createElement('div');
@@ -18,6 +20,24 @@ export function createLoraCard(lora) {
card.dataset.usage_tips = lora.usage_tips;
card.dataset.notes = lora.notes;
card.dataset.meta = JSON.stringify(lora.civitai || {});
// Store tags and model description
if (lora.tags && Array.isArray(lora.tags)) {
card.dataset.tags = JSON.stringify(lora.tags);
}
if (lora.modelDescription) {
card.dataset.modelDescription = lora.modelDescription;
}
// Store NSFW level if available
const nsfwLevel = lora.preview_nsfw_level !== undefined ? lora.preview_nsfw_level : 0;
card.dataset.nsfwLevel = nsfwLevel;
// Determine if the preview should be blurred based on NSFW level and user settings
const shouldBlur = state.settings.blurMatureContent && nsfwLevel > NSFW_LEVELS.PG13;
if (shouldBlur) {
card.classList.add('nsfw-content');
}
// Apply selection state if in bulk mode and this card is in the selected set
if (state.bulkMode && state.selectedLoras.has(lora.file_path)) {
@@ -28,16 +48,35 @@ export function createLoraCard(lora) {
const previewUrl = lora.preview_url || '/loras_static/images/no-preview.png';
const versionedPreviewUrl = version ? `${previewUrl}?t=${version}` : previewUrl;
// Determine NSFW warning text based on level
let nsfwText = "Mature Content";
if (nsfwLevel >= NSFW_LEVELS.XXX) {
nsfwText = "XXX-rated Content";
} else if (nsfwLevel >= NSFW_LEVELS.X) {
nsfwText = "X-rated Content";
} else if (nsfwLevel >= NSFW_LEVELS.R) {
nsfwText = "R-rated Content";
}
// Check if autoplayOnHover is enabled for video previews
const autoplayOnHover = state.global.settings.autoplayOnHover || false;
const isVideo = previewUrl.endsWith('.mp4');
const videoAttrs = autoplayOnHover ? 'controls muted loop' : 'controls autoplay muted loop';
card.innerHTML = `
<div class="card-preview">
${previewUrl.endsWith('.mp4') ?
`<video controls autoplay muted loop>
<div class="card-preview ${shouldBlur ? 'blurred' : ''}">
${isVideo ?
`<video ${videoAttrs}>
<source src="${versionedPreviewUrl}" type="video/mp4">
</video>` :
`<img src="${versionedPreviewUrl}" alt="${lora.model_name}">`
}
<div class="card-header">
<span class="base-model-label" title="${lora.base_model}">
${shouldBlur ?
`<button class="toggle-blur-btn" title="Toggle blur">
<i class="fas fa-eye"></i>
</button>` : ''}
<span class="base-model-label ${shouldBlur ? 'with-toggle' : ''}" title="${lora.base_model}">
${lora.base_model}
</span>
<div class="card-actions">
@@ -53,6 +92,14 @@ export function createLoraCard(lora) {
</i>
</div>
</div>
${shouldBlur ? `
<div class="nsfw-overlay">
<div class="nsfw-warning">
<p>${nsfwText}</p>
<button class="show-content-btn">Show</button>
</div>
</div>
` : ''}
<div class="card-footer">
<div class="model-info">
<span class="model-name">${lora.model_name}</span>
@@ -86,12 +133,69 @@ export function createLoraCard(lora) {
base_model: card.dataset.base_model,
usage_tips: card.dataset.usage_tips,
notes: card.dataset.notes,
civitai: JSON.parse(card.dataset.meta || '{}')
// Parse civitai metadata from the card's dataset
civitai: (() => {
try {
// Attempt to parse the JSON string
return JSON.parse(card.dataset.meta || '{}');
} catch (e) {
console.error('Failed to parse civitai metadata:', e);
return {}; // Return empty object on error
}
})(),
tags: JSON.parse(card.dataset.tags || '[]'),
modelDescription: card.dataset.modelDescription || ''
};
showLoraModal(loraMeta);
}
});
// Toggle blur button functionality
const toggleBlurBtn = card.querySelector('.toggle-blur-btn');
if (toggleBlurBtn) {
toggleBlurBtn.addEventListener('click', (e) => {
e.stopPropagation();
const preview = card.querySelector('.card-preview');
const isBlurred = preview.classList.toggle('blurred');
const icon = toggleBlurBtn.querySelector('i');
// Update the icon based on blur state
if (isBlurred) {
icon.className = 'fas fa-eye';
} else {
icon.className = 'fas fa-eye-slash';
}
// Toggle the overlay visibility
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = isBlurred ? 'flex' : 'none';
}
});
}
// Show content button functionality
const showContentBtn = card.querySelector('.show-content-btn');
if (showContentBtn) {
showContentBtn.addEventListener('click', (e) => {
e.stopPropagation();
const preview = card.querySelector('.card-preview');
preview.classList.remove('blurred');
// Update the toggle button icon
const toggleBtn = card.querySelector('.toggle-blur-btn');
if (toggleBtn) {
toggleBtn.querySelector('i').className = 'fas fa-eye-slash';
}
// Hide the overlay
const overlay = card.querySelector('.nsfw-overlay');
if (overlay) {
overlay.style.display = 'none';
}
});
}
// Copy button click event
card.querySelector('.fa-copy')?.addEventListener('click', async e => {
e.stopPropagation();
@@ -148,6 +252,26 @@ export function createLoraCard(lora) {
actionGroup.style.display = 'none';
});
}
// Add autoplayOnHover handlers for video elements if needed
const videoElement = card.querySelector('video');
if (videoElement && autoplayOnHover) {
const cardPreview = card.querySelector('.card-preview');
// Remove autoplay attribute and pause initially
videoElement.removeAttribute('autoplay');
videoElement.pause();
// Add mouse events to trigger play/pause
cardPreview.addEventListener('mouseenter', () => {
videoElement.play();
});
cardPreview.addEventListener('mouseleave', () => {
videoElement.pause();
videoElement.currentTime = 0;
});
}
return card;
}
@@ -159,11 +283,26 @@ export function updateCardsForBulkMode(isBulkMode) {
document.body.classList.toggle('bulk-mode', isBulkMode);
document.querySelectorAll('.lora-card').forEach(card => {
// Get all lora cards
const loraCards = document.querySelectorAll('.lora-card');
loraCards.forEach(card => {
// Get all action containers for this card
const actions = card.querySelectorAll('.card-actions');
actions.forEach(actionGroup => {
actionGroup.style.display = isBulkMode ? 'none' : 'flex';
});
// Handle display property based on mode
if (isBulkMode) {
// Hide actions when entering bulk mode
actions.forEach(actionGroup => {
actionGroup.style.display = 'none';
});
} else {
// Ensure actions are visible when exiting bulk mode
actions.forEach(actionGroup => {
// We need to reset to default display style which is flex
actionGroup.style.display = 'flex';
});
}
});
// Apply selection state to cards if entering bulk mode

View File

@@ -1,561 +0,0 @@
import { showToast } from '../utils/uiHelpers.js';
import { state } from '../state/index.js';
export function showLoraModal(lora) {
const escapedWords = lora.civitai?.trainedWords?.length ?
lora.civitai.trainedWords.map(word => word.replace(/'/g, '\\\'')) : [];
const content = `
<div class="modal-content">
<button class="close" onclick="modalManager.closeModal('loraModal')">&times;</button>
<header class="modal-header">
<div class="editable-field model-name-field">
<h2 class="model-name-content" contenteditable="true" spellcheck="false">${lora.model_name}</h2>
<button class="save-btn" onclick="saveModelName('${lora.file_path}')">
<i class="fas fa-save"></i>
</button>
</div>
</header>
<div class="modal-body">
<div class="info-section">
<div class="info-grid">
<div class="info-item">
<label>Version</label>
<span>${lora.civitai.name || 'N/A'}</span>
</div>
<div class="info-item">
<label>File Name</label>
<div class="file-name-wrapper" onclick="copyFileName('${lora.file_name}')">
<span id="file-name">${lora.file_name || 'N/A'}</span>
<i class="fas fa-copy" title="Copy file name"></i>
</div>
</div>
<div class="info-item location-size">
<div class="location-wrapper">
<label>Location</label>
<span class="file-path">${lora.file_path.replace(/[^/]+$/, '') || 'N/A'}</span>
</div>
</div>
<div class="info-item base-size">
<div class="base-wrapper">
<label>Base Model</label>
<span>${lora.base_model || 'N/A'}</span>
</div>
<div class="size-wrapper">
<label>Size</label>
<span>${formatFileSize(lora.file_size)}</span>
</div>
</div>
<div class="info-item usage-tips">
<label>Usage Tips</label>
<div class="editable-field">
<div class="preset-controls">
<select id="preset-selector">
<option value="">Add preset parameter...</option>
<option value="strength_min">Strength Min</option>
<option value="strength_max">Strength Max</option>
<option value="strength">Strength</option>
<option value="clip_skip">Clip Skip</option>
</select>
<input type="number" id="preset-value" step="0.01" placeholder="Value" style="display:none;">
<button class="add-preset-btn">Add</button>
</div>
<div class="preset-tags">
${renderPresetTags(parsePresets(lora.usage_tips))}
</div>
</div>
</div>
${renderTriggerWords(escapedWords)}
<div class="info-item notes">
<label>Additional Notes</label>
<div class="editable-field">
<div class="notes-content" contenteditable="true" spellcheck="false">${lora.notes || 'Add your notes here...'}</div>
<button class="save-btn" onclick="saveNotes('${lora.file_path}')">
<i class="fas fa-save"></i>
</button>
</div>
</div>
<div class="info-item full-width">
<label>About this version</label>
<div class="description-text">${lora.description || 'N/A'}</div>
</div>
</div>
</div>
${renderShowcaseImages(lora.civitai.images)}
</div>
</div>
`;
modalManager.showModal('loraModal', content);
setupEditableFields();
setupShowcaseScroll();
}
// 添加复制文件名的函数
window.copyFileName = async function(fileName) {
try {
await navigator.clipboard.writeText(fileName);
showToast('File name copied', 'success');
} catch (err) {
console.error('Copy failed:', err);
showToast('Copy failed', 'error');
}
};
// Add function to save model name
window.saveModelName = async function(filePath) {
const modelNameElement = document.querySelector('.model-name-content');
const newModelName = modelNameElement.textContent.trim();
// Validate model name
if (!newModelName) {
showToast('Model name cannot be empty', 'error');
return;
}
// Check if model name is too long (limit to 100 characters)
if (newModelName.length > 100) {
showToast('Model name is too long (maximum 100 characters)', 'error');
// Truncate the displayed text
modelNameElement.textContent = newModelName.substring(0, 100);
return;
}
try {
await saveModelMetadata(filePath, { model_name: newModelName });
// Update the corresponding lora card's dataset and display
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (loraCard) {
loraCard.dataset.model_name = newModelName;
const titleElement = loraCard.querySelector('.card-title');
if (titleElement) {
titleElement.textContent = newModelName;
}
}
showToast('Model name updated successfully', 'success');
// Reload the page to reflect the sorted order
setTimeout(() => {
window.location.reload();
}, 1500);
} catch (error) {
showToast('Failed to update model name', 'error');
}
};
function setupEditableFields() {
const editableFields = document.querySelectorAll('.editable-field [contenteditable]');
editableFields.forEach(field => {
field.addEventListener('focus', function() {
if (this.textContent === 'Add your notes here...' ||
this.textContent === 'Save usage tips here..') {
this.textContent = '';
}
});
field.addEventListener('blur', function() {
if (this.textContent.trim() === '') {
if (this.classList.contains('model-name-content')) {
// Restore original model name if empty
const filePath = document.querySelector('.modal-content')
.querySelector('.file-path').textContent +
document.querySelector('.modal-content')
.querySelector('#file-name').textContent + '.safetensors';
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (loraCard) {
this.textContent = loraCard.dataset.model_name;
}
} else if (this.classList.contains('usage-tips-content')) {
this.textContent = 'Save usage tips here..';
} else {
this.textContent = 'Add your notes here...';
}
}
});
// Add input validation for model name
if (field.classList.contains('model-name-content')) {
field.addEventListener('input', function() {
// Limit model name length
if (this.textContent.length > 100) {
this.textContent = this.textContent.substring(0, 100);
// Place cursor at the end
const range = document.createRange();
const sel = window.getSelection();
range.setStart(this.childNodes[0], 100);
range.collapse(true);
sel.removeAllRanges();
sel.addRange(range);
showToast('Model name is limited to 100 characters', 'warning');
}
});
field.addEventListener('keydown', function(e) {
if (e.key === 'Enter') {
e.preventDefault();
const filePath = document.querySelector('.modal-content')
.querySelector('.file-path').textContent +
document.querySelector('.modal-content')
.querySelector('#file-name').textContent + '.safetensors';
saveModelName(filePath);
}
});
}
});
const presetSelector = document.getElementById('preset-selector');
const presetValue = document.getElementById('preset-value');
const addPresetBtn = document.querySelector('.add-preset-btn');
const presetTags = document.querySelector('.preset-tags');
presetSelector.addEventListener('change', function() {
const selected = this.value;
if (selected) {
presetValue.style.display = 'inline-block';
presetValue.min = selected.includes('strength') ? 0 : 1;
presetValue.max = selected.includes('strength') ? 1 : 12;
presetValue.step = selected.includes('strength') ? 0.01 : 1;
if (selected === 'clip_skip') {
presetValue.type = 'number';
presetValue.step = 1;
}
// Add auto-focus
setTimeout(() => presetValue.focus(), 0);
} else {
presetValue.style.display = 'none';
}
});
addPresetBtn.addEventListener('click', async function() {
const key = presetSelector.value;
const value = presetValue.value;
if (!key || !value) return;
const filePath = document.querySelector('.modal-content')
.querySelector('.file-path').textContent +
document.querySelector('.modal-content')
.querySelector('#file-name').textContent + '.safetensors';
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
const currentPresets = parsePresets(loraCard.dataset.usage_tips);
currentPresets[key] = parseFloat(value);
const newPresetsJson = JSON.stringify(currentPresets);
await saveModelMetadata(filePath, {
usage_tips: newPresetsJson
});
loraCard.dataset.usage_tips = newPresetsJson;
presetTags.innerHTML = renderPresetTags(currentPresets);
presetSelector.value = '';
presetValue.value = '';
presetValue.style.display = 'none';
});
// Add keydown event listeners for notes
const notesContent = document.querySelector('.notes-content');
if (notesContent) {
notesContent.addEventListener('keydown', async function(e) {
if (e.key === 'Enter') {
if (e.shiftKey) {
// Allow shift+enter for new line
return;
}
e.preventDefault();
const filePath = document.querySelector('.modal-content')
.querySelector('.file-path').textContent +
document.querySelector('.modal-content')
.querySelector('#file-name').textContent + '.safetensors';
await saveNotes(filePath);
}
});
}
// Add keydown event for preset value
presetValue.addEventListener('keydown', function(e) {
if (e.key === 'Enter') {
e.preventDefault();
addPresetBtn.click();
}
});
}
window.saveNotes = async function(filePath) {
const content = document.querySelector('.notes-content').textContent;
try {
await saveModelMetadata(filePath, { notes: content });
// Update the corresponding lora card's dataset
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
if (loraCard) {
loraCard.dataset.notes = content;
}
showToast('Notes saved successfully', 'success');
} catch (error) {
showToast('Failed to save notes', 'error');
}
};
async function saveModelMetadata(filePath, data) {
const response = await fetch('/loras/api/save-metadata', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
file_path: filePath,
...data
})
});
if (!response.ok) {
throw new Error('Failed to save metadata');
}
}
function renderTriggerWords(words) {
if (!words.length) return `
<div class="info-item full-width trigger-words">
<label>Trigger Words</label>
<span>No trigger word needed</span>
</div>
`;
return `
<div class="info-item full-width trigger-words">
<label>Trigger Words</label>
<div class="trigger-words-tags">
${words.map(word => `
<div class="trigger-word-tag" onclick="copyTriggerWord('${word}')">
<span class="trigger-word-content">${word}</span>
<span class="trigger-word-copy">
<i class="fas fa-copy"></i>
</span>
</div>
`).join('')}
</div>
</div>
`;
}
function renderShowcaseImages(images) {
if (!images?.length) return '';
return `
<div class="showcase-section">
<div class="scroll-indicator" onclick="toggleShowcase(this)">
<i class="fas fa-chevron-down"></i>
<span>Scroll or click to show ${images.length} examples</span>
</div>
<div class="carousel collapsed">
<div class="carousel-container">
${images.map(img => {
// 计算适当的展示高度:
// 1. 保持原始宽高比
// 2. 限制最大高度为视窗高度的60%
// 3. 确保最小高度为容器宽度的40%
const aspectRatio = (img.height / img.width) * 100;
const containerWidth = 800; // modal content的最大宽度
const minHeightPercent = 40; // 最小高度为容器宽度的40%
const maxHeightPercent = (window.innerHeight * 0.6 / containerWidth) * 100;
const heightPercent = Math.max(
minHeightPercent,
Math.min(maxHeightPercent, aspectRatio)
);
if (img.type === 'video') {
return `
<div class="media-wrapper" style="padding-bottom: ${heightPercent}%">
<video controls autoplay muted loop crossorigin="anonymous"
referrerpolicy="no-referrer" data-src="${img.url}"
class="lazy">
<source data-src="${img.url}" type="video/mp4">
Your browser does not support video playback
</video>
</div>
`;
}
return `
<div class="media-wrapper" style="padding-bottom: ${heightPercent}%">
<img data-src="${img.url}"
alt="Preview"
crossorigin="anonymous"
referrerpolicy="no-referrer"
width="${img.width}"
height="${img.height}"
class="lazy">
</div>
`;
}).join('')}
</div>
</div>
<button class="back-to-top" onclick="scrollToTop(this)">
<i class="fas fa-arrow-up"></i>
</button>
</div>
`;
}
export function toggleShowcase(element) {
const carousel = element.nextElementSibling;
const isCollapsed = carousel.classList.contains('collapsed');
const indicator = element.querySelector('span');
const icon = element.querySelector('i');
carousel.classList.toggle('collapsed');
if (isCollapsed) {
const count = carousel.querySelectorAll('.media-wrapper').length;
indicator.textContent = `Scroll or click to hide examples`;
icon.classList.replace('fa-chevron-down', 'fa-chevron-up');
initLazyLoading(carousel);
} else {
const count = carousel.querySelectorAll('.media-wrapper').length;
indicator.textContent = `Scroll or click to show ${count} examples`;
icon.classList.replace('fa-chevron-up', 'fa-chevron-down');
}
}
// Add lazy loading initialization
function initLazyLoading(container) {
const lazyElements = container.querySelectorAll('.lazy');
const lazyLoad = (element) => {
if (element.tagName.toLowerCase() === 'video') {
element.src = element.dataset.src;
element.querySelector('source').src = element.dataset.src;
element.load();
} else {
element.src = element.dataset.src;
}
element.classList.remove('lazy');
};
const observer = new IntersectionObserver((entries) => {
entries.forEach(entry => {
if (entry.isIntersecting) {
lazyLoad(entry.target);
observer.unobserve(entry.target);
}
});
});
lazyElements.forEach(element => observer.observe(element));
}
export function setupShowcaseScroll() {
// Change from modal-content to window/document level
document.addEventListener('wheel', (event) => {
const modalContent = document.querySelector('.modal-content');
if (!modalContent) return;
const showcase = modalContent.querySelector('.showcase-section');
if (!showcase) return;
const carousel = showcase.querySelector('.carousel');
const scrollIndicator = showcase.querySelector('.scroll-indicator');
if (carousel?.classList.contains('collapsed') && event.deltaY > 0) {
const isNearBottom = modalContent.scrollHeight - modalContent.scrollTop - modalContent.clientHeight < 100;
if (isNearBottom) {
toggleShowcase(scrollIndicator);
event.preventDefault();
}
}
}, { passive: false }); // Add passive: false option here
// Keep the existing scroll tracking code
const modalContent = document.querySelector('.modal-content');
if (modalContent) {
modalContent.addEventListener('scroll', () => {
const backToTopBtn = modalContent.querySelector('.back-to-top');
if (backToTopBtn) {
if (modalContent.scrollTop > 300) {
backToTopBtn.classList.add('visible');
} else {
backToTopBtn.classList.remove('visible');
}
}
});
}
}
export function scrollToTop(button) {
const modalContent = button.closest('.modal-content');
if (modalContent) {
modalContent.scrollTo({
top: 0,
behavior: 'smooth'
});
}
}
function parsePresets(usageTips) {
if (!usageTips || usageTips === 'Save usage tips here..') return {};
try {
return JSON.parse(usageTips);
} catch {
return {};
}
}
function renderPresetTags(presets) {
return Object.entries(presets).map(([key, value]) => `
<div class="preset-tag" data-key="${key}">
<span>${formatPresetKey(key)}: ${value}</span>
<i class="fas fa-times" onclick="removePreset('${key}')"></i>
</div>
`).join('');
}
function formatPresetKey(key) {
return key.split('_').map(word =>
word.charAt(0).toUpperCase() + word.slice(1)
).join(' ');
}
window.removePreset = async function(key) {
const filePath = document.querySelector('.modal-content')
.querySelector('.file-path').textContent +
document.querySelector('.modal-content')
.querySelector('#file-name').textContent + '.safetensors';
const loraCard = document.querySelector(`.lora-card[data-filepath="${filePath}"]`);
const currentPresets = parsePresets(loraCard.dataset.usage_tips);
delete currentPresets[key];
const newPresetsJson = JSON.stringify(currentPresets);
await saveModelMetadata(filePath, {
usage_tips: newPresetsJson
});
loraCard.dataset.usage_tips = newPresetsJson;
document.querySelector('.preset-tags').innerHTML = renderPresetTags(currentPresets);
};
// 添加文件大小格式化函数
function formatFileSize(bytes) {
if (!bytes) return 'N/A';
const units = ['B', 'KB', 'MB', 'GB'];
let size = bytes;
let unitIndex = 0;
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024;
unitIndex++;
}
return `${size.toFixed(1)} ${units[unitIndex]}`;
}

Some files were not shown because too many files have changed in this diff Show More