Compare commits

..

453 Commits

Author SHA1 Message Date
Will Miao
76d3aa2b5b feat(version): bump version to 0.9.7 in pyproject.toml 2025-10-09 22:15:50 +08:00
Will Miao
c9a65c7347 feat(metadata): implement model data hydration and enhance metadata handling across services, fixes #547 2025-10-09 22:15:07 +08:00
Will Miao
f542ade628 feat(civitai): implement URL rewriting for Civitai previews and enhance download handling, fixes #499 2025-10-09 17:54:37 +08:00
Will Miao
d2c2bfbe6a feat(sidebar): add recursive search functionality and toggle button 2025-10-09 17:07:10 +08:00
Will Miao
2b6910bd55 feat(misc): mark model versions in library for Civitai user models 2025-10-09 15:23:42 +08:00
Will Miao
b1dd733493 feat(civitai): enhance model version handling with cache lookup 2025-10-09 14:10:00 +08:00
pixelpaws
5dcf0a1e48 Merge pull request #545 from willmiao/codex/evaluate-sqlite-cache-indexing-necessity
feat: index cached models by version id
2025-10-09 13:54:46 +08:00
pixelpaws
cf357b57fc feat(scanner): index cached models by version id 2025-10-09 13:50:44 +08:00
pixelpaws
4e1773833f Merge pull request #544 from willmiao/codex/add-endpoint-to-fetch-civitai-user-models
Add endpoint to fetch Civitai user models
2025-10-09 11:56:57 +08:00
pixelpaws
8cf762ffd3 feat(misc): add civitai user model lookup 2025-10-09 11:49:41 +08:00
pixelpaws
d997eaa429 Merge pull request #543 from willmiao/codex/refactor-get_model_version-logic-and-add-tests, fixes #540
fix: improve Civitai model version retrieval
2025-10-09 11:07:33 +08:00
pixelpaws
8e51f0f19f fix(civitai): improve model version retrieval 2025-10-09 10:56:25 +08:00
pixelpaws
f0e246b4ac Merge pull request #542 from willmiao/codex/investigate-backend-tests-modifying-settings.json
Refactor settings manager to lazy singleton
2025-10-08 16:02:11 +08:00
pixelpaws
a232997a79 fix(utils): respect metadata sync overrides 2025-10-08 15:52:15 +08:00
pixelpaws
08a449db99 fix(metadata): refresh metadata sync settings 2025-10-08 10:38:05 +08:00
pixelpaws
0c023c9888 fix(settings): lazily resolve module aliases 2025-10-08 10:10:23 +08:00
pixelpaws
0ad92d00b3 fix(settings): restore legacy settings aliases 2025-10-08 09:54:36 +08:00
pixelpaws
a726cbea1e fix(routes): pass resolved settings to metadata sync 2025-10-08 09:32:57 +08:00
pixelpaws
c53fa8692b refactor(settings): lazily initialize manager 2025-10-08 08:56:57 +08:00
Will Miao
3118f3b43c feat(graph): enhance node handling with graph identifiers and improve metadata updates, see #408, #538 2025-10-07 23:22:38 +08:00
Will Miao
9199950b74 feat(release): update version to 0.9.6 and add release notes for critical performance optimizations and new features 2025-10-07 17:41:58 +08:00
Will Miao
4c7e31687b feat(recipe_scanner): reset cached state on active library change 2025-10-07 08:07:44 +08:00
Will Miao
75e207b520 fix(banner): remove redundant community support banner registration 2025-10-06 22:39:49 +08:00
Will Miao
631289b75e feat(banner): add community support banner with Ko-fi integration and translations 2025-10-06 22:39:21 +08:00
Will Miao
1b958d0a5d feat(modal): use CSS variables for header height and improve recipe modal layout 2025-10-06 16:28:56 +08:00
Will Miao
35fdf9020d docs(README): update instructions for settings.json file creation in Portable Edition 2025-10-06 15:32:37 +08:00
Will Miao
45926b1dca feat(constants): add CHROMA model to BASE_MODELS and update categories 2025-10-06 08:48:15 +08:00
Will Miao
686ba5024d fix(tests): add loadingManager mocks in settingsManager tests 2025-10-06 08:24:58 +08:00
pixelpaws
cf375c7c86 Merge pull request #537 from willmiao/codex/implement-video-lazy-loading-with-queue
fix: throttle model card video lazy loading
2025-10-06 08:07:24 +08:00
pixelpaws
5e53d76f44 fix(model-card): throttle preview video loading 2025-10-06 07:45:51 +08:00
Will Miao
7757f72859 Enhance test for saving paths to ensure cross-platform compatibility in folder paths 2025-10-05 22:40:43 +08:00
pixelpaws
c8cc584049 Merge pull request #536 from willmiao/codex/add-unit-tests-for-config-saving-paths
Add regression tests for Config save path handling
2025-10-05 22:24:57 +08:00
pixelpaws
2cdd269bba Merge pull request #535 from willmiao/codex/add-tests-for-migration-utility-functions
test: cover example images migration flows
2025-10-05 22:24:42 +08:00
pixelpaws
d2d97ae5bb Merge pull request #534 from willmiao/codex/add-tests-for-cache-middleware
test: add cache control middleware coverage
2025-10-05 22:24:26 +08:00
pixelpaws
d08d77c555 Merge pull request #533 from willmiao/codex/add-async-test-module-for-lifecycle
test: add LoRA manager lifecycle coverage
2025-10-05 22:24:12 +08:00
pixelpaws
92f8d2139a Merge pull request #532 from willmiao/codex/create-tests-for-statsroutes
test: add coverage for stats routes endpoints
2025-10-05 22:23:59 +08:00
pixelpaws
50f2c2dfe6 test(config): cover save path migration flows 2025-10-05 22:19:36 +08:00
pixelpaws
3539c453d3 test(utils): cover example images migrations 2025-10-05 22:19:29 +08:00
pixelpaws
1631122f95 test(middleware): add cache control coverage 2025-10-05 22:19:20 +08:00
pixelpaws
8fcb979544 test(routes): cover lora manager lifecycle 2025-10-05 22:19:10 +08:00
pixelpaws
8a5af0b7f3 test(routes): add stats routes coverage 2025-10-05 22:18:59 +08:00
Will Miao
cb1f08d556 Fix low contrast on nav-item hover 2025-10-05 21:10:01 +08:00
pixelpaws
1150267765 Merge pull request #531 from willmiao/codex/-activelibrary
fix(locales): translate library selection strings
2025-10-05 21:04:10 +08:00
pixelpaws
5c1252548d fix(locales): translate library selection strings 2025-10-05 21:03:21 +08:00
Will Miao
3c7cdf5db8 feat(header): enhance navigation and search functionality with context-aware behavior 2025-10-05 20:58:14 +08:00
Will Miao
9ac4203b1c test(example-images): allow monkeypatching os.startfile on linux 2025-10-05 16:30:44 +08:00
pixelpaws
d0800510db Merge pull request #529 from willmiao/codex/update-file-url-formatting-methods
fix: route recipe previews through preview API
2025-10-05 15:53:26 +08:00
pixelpaws
f8ba551cc4 fix(recipes): use preview endpoint for recipe images 2025-10-05 15:49:18 +08:00
Will Miao
413444500e refactor(model_scanner): normalize path comparisons for model roots
fix(example_images_download_manager): re-raise specific exception on download error

refactor(usage_stats): define constants locally to avoid conditional imports

test(example_images_download_manager): update exception handling in download tests

test(example_images_file_manager): differentiate between os.startfile and subprocess.Popen in tests

test(example_images_paths): ensure valid example images root with single-library mode

test(usage_stats): use string literals for metadata payload to avoid conditional imports
2025-10-05 15:48:50 +08:00
pixelpaws
e21d5835ec Merge pull request #524 from willmiao/codex/add-async-tests-for-websocketmanager
Add websocket broadcast and usage stats tests
2025-10-05 15:19:40 +08:00
pixelpaws
f2f354e478 Merge pull request #528 from willmiao/codex/add-bulk-action-set-content-rating, fixes #428
Add bulk content rating update action
2025-10-05 15:19:19 +08:00
pixelpaws
b195d4569c test(usage-stats): allow metadata registry monkeypatch 2025-10-05 15:13:20 +08:00
pixelpaws
3b77fed72d feat(bulk): add bulk content rating action 2025-10-05 15:09:43 +08:00
pixelpaws
fc64e97f92 Merge pull request #525 from willmiao/codex/develop-tests-for-metadatasyncservice-and-modelscanner
Add coverage for metadata sync service and scanner reconciliation
2025-10-05 15:03:36 +08:00
pixelpaws
1da0434454 Merge pull request #526 from willmiao/codex/add-tests-for-utility-functions
test: add coverage for utility helpers
2025-10-05 15:03:20 +08:00
pixelpaws
cf2fe40612 Merge pull request #527 from willmiao/codex/add-unit-tests-for-metadata-components
Add metadata collector unit tests and fixtures
2025-10-05 15:03:04 +08:00
pixelpaws
8f46433ff7 Merge pull request #523 from willmiao/codex/add-tests-for-settingsmanager-migration
Add tests for settings migrations and service registry lazy loading
2025-10-05 15:02:44 +08:00
pixelpaws
f3be3ae269 Merge pull request #522 from willmiao/codex/add-tests-for-example-images-pipeline
test: add example images route and utility coverage
2025-10-05 15:02:07 +08:00
pixelpaws
cfec5447d3 test(metadata): add collector coverage 2025-10-05 14:44:17 +08:00
pixelpaws
2d36b461cf test(utils): add coverage for helper utilities 2025-10-05 14:43:21 +08:00
pixelpaws
5e23e4b13d test(metadata): cover sync service and scanner reconciliation 2025-10-05 14:42:13 +08:00
pixelpaws
badae2e8b3 test: cover websocket broadcasts and usage stats 2025-10-05 14:41:47 +08:00
pixelpaws
9e64531de6 test(settings): cover migrations and registry lazy loading 2025-10-05 14:41:24 +08:00
pixelpaws
fdec8d283c test(example-images): expand coverage for routes and utilities 2025-10-05 14:40:48 +08:00
Will Miao
9abedbf7cb fix(metadata-sync): improve error handling for deleted CivitAI models, fixes #497 2025-10-05 11:05:52 +08:00
pixelpaws
66004c1cdc Merge pull request #520 from willmiao/codex/adjust-example-images-download-to-use-library-name
fix: keep example image downloads in initial library
2025-10-05 10:08:26 +08:00
pixelpaws
5b564cd8a3 fix(example-images): pin downloads to start library 2025-10-05 09:10:25 +08:00
pixelpaws
2e79970e6e Merge pull request #519 from willmiao/codex/update-example-images-download-flow
fix: reuse migrated example image folders before download
2025-10-05 09:04:06 +08:00
pixelpaws
67c82ba6ea fix(example-images): reuse migrated folders during downloads 2025-10-05 08:37:11 +08:00
Will Miao
98425f37b8 fix(download-manager): improve handling of civitai payloads to avoid empty dictionaries 2025-10-05 07:29:11 +08:00
Will Miao
9d22dd3465 fix(model-library): update response structure to return model versions directly 2025-10-04 22:06:33 +08:00
pixelpaws
837138db49 Merge pull request #517 from willmiao/codex/determine-example_images_path-structure
feat: namespace example image storage by library
2025-10-04 20:42:15 +08:00
pixelpaws
d43d992362 feat(example-images): namespace storage by library 2025-10-04 20:29:49 +08:00
Will Miao
16b611cb7e Simplify settings file location and configuration 2025-10-04 18:42:53 +08:00
Will Miao
8dde2d5e0d feat(websocket-manager): implement caching for initialization progress and enhance broadcast functionality 2025-10-04 18:19:20 +08:00
Will Miao
22b0b2bd24 fix(model-card): correct query parameter handling in versioned preview URL 2025-10-04 17:32:16 +08:00
Will Miao
056f727bfd feat(model-scanner): enhance page type determination for model types 2025-10-04 17:08:02 +08:00
Will Miao
0aa6c53c1f feat(initialization): add support for embeddings page type and log progress updates 2025-10-04 14:11:27 +08:00
pixelpaws
d9b0660611 Merge pull request #516 from willmiao/codex/investigate-library-switching-functionality-issue
feat: serve dynamic preview assets after library switch
2025-10-04 10:51:52 +08:00
pixelpaws
d01666f4e2 feat(previews): serve dynamic library previews 2025-10-04 10:38:06 +08:00
Will Miao
51bee87cd0 fix(persistence): improve handling of lora_info attributes in recipe persistence 2025-10-04 09:52:53 +08:00
pixelpaws
3041b443e5 Merge pull request #515 from willmiao/codex/add-library-selection-in-settings-modal
Add tests for settings library switcher
2025-10-04 09:22:11 +08:00
pixelpaws
d95e6c939b test(settings): cover library switch workflow 2025-10-04 09:07:02 +08:00
pixelpaws
fd38c63b35 Merge pull request #514 from willmiao/codex/add-multi-library-endpoints-for-frontend
test: add coverage for settings library endpoints
2025-10-04 08:43:03 +08:00
pixelpaws
b69c24ae14 test(routes): cover settings library endpoints 2025-10-04 08:38:59 +08:00
pixelpaws
65a0c00e33 Merge pull request #513 from willmiao/codex/add-link-button-to-settings-modal-header
Adjust settings modal location shortcut styling
2025-10-04 07:57:43 +08:00
pixelpaws
b12a5ef133 style(settings): restyle settings location shortcut 2025-10-04 07:52:10 +08:00
pixelpaws
9e1b92c26e Merge pull request #512 from willmiao/codex/analyze-and-add-tests-for-misc-routes
test: improve misc routes coverage
2025-10-04 05:25:58 +08:00
pixelpaws
3922aec36e test(routes): extend misc routes coverage 2025-10-04 05:10:43 +08:00
pixelpaws
41cca8e56d Merge pull request #511 from willmiao/codex/refactor-misc_routes.py-and-add-tests
refactor: modularize misc route controller
2025-10-03 22:47:22 +08:00
pixelpaws
2d37a7341a fix(routes): await trained words extraction 2025-10-03 22:45:25 +08:00
pixelpaws
40e3c6134c refactor(routes): modularize misc route handling 2025-10-03 22:19:09 +08:00
pixelpaws
edddd47a1e Merge pull request #510 from willmiao/codex/update-default-library-folder-path-handling
fix: rename legacy default library to comfyui
2025-10-03 21:50:41 +08:00
pixelpaws
4ea6f38645 fix(config): rename legacy default library 2025-10-03 21:24:17 +08:00
Will Miao
40d998a026 fix(settings): use timezone-aware datetime for current timestamp
fix(tests): normalize stored paths in library upsert test
2025-10-03 20:59:47 +08:00
pixelpaws
3af8f151ac Merge pull request #509 from willmiao/codex/implement-features-from-multi-library-design
feat: add multi-library backend support
2025-10-03 20:37:59 +08:00
pixelpaws
e066fa6873 feat(settings): add multi-library backend support 2025-10-03 20:08:35 +08:00
Will Miao
6bd94269d4 refactor(model-scanner): remove unused model scanning methods 2025-10-03 18:58:02 +08:00
Will Miao
c90edec18a feat(multi-library): add design documentation for multi-library management in standalone mode 2025-10-03 18:34:52 +08:00
Will Miao
cbb302614c Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-10-03 15:23:30 +08:00
Will Miao
c54611a11b fix(metadata-service): change log level to debug for SQLite and fallback provider registration 2025-10-03 15:23:28 +08:00
pixelpaws
88f249649a Merge pull request #508 from willmiao/codex/sync-persistent-model-cache-metadata-updates
fix: sync persistent cache with metadata updates
2025-10-03 15:06:30 +08:00
pixelpaws
fe9fbdb93c fix(cache): sync persistent metadata updates 2025-10-03 14:57:44 +08:00
Will Miao
28bc966b76 feat(model-scanner): enhance cache loading with progress reporting and fallback to full scan 2025-10-03 14:31:08 +08:00
Will Miao
77bbf85b52 feat(persistent-cache): implement SQLite-based persistent model cache with loading and saving functionality 2025-10-03 11:00:51 +08:00
Will Miao
3b1990e97a feat(scanner): enhance model scanning with cache build result and progress reporting 2025-10-02 21:25:09 +08:00
Will Miao
375b5a49f3 fix(config): update standalone mode environment variable usage 2025-10-02 09:40:24 +08:00
pixelpaws
392c157cb5 Merge pull request #503 from willmiao/codex/add-hebrew-locale-to-tests
fix(i18n): add hebrew locale coverage
2025-09-30 22:07:52 +08:00
pixelpaws
6f5bf4b582 fix(i18n): add hebrew locale coverage 2025-09-30 22:03:44 +08:00
pixelpaws
2e3f48ebb7 Merge pull request #426 from start-life/main
Adding Hebrew language
2025-09-30 21:51:50 +08:00
Will Miao
e4a2c518bb fix(preset): update filePath retrieval method in removePreset function 2025-09-30 21:41:30 +08:00
pixelpaws
f19fb68b4c Merge pull request #501 from willmiao/codex/update-downloadmanager-to-handle-multiple-download-urls
feat: retry mirror downloads sequentially
2025-09-30 17:17:34 +08:00
pixelpaws
9121c12a2c feat(download): retry mirror urls sequentially 2025-09-30 17:14:59 +08:00
Will Miao
d0fe28cfe2 fix(recipe): validate modelVersionId before fetching hash from cache or Civitai 2025-09-28 09:18:59 +08:00
Will Miao
656e3e43be fix(imports): update import paths for ensure_settings_file to use relative imports 2025-09-28 08:40:09 +08:00
pixelpaws
c2c1772371 Merge pull request #496 from willmiao/codex/find-best-practices-for-settings-file-storage
feat(settings): persist settings in user config directory
2025-09-28 07:06:02 +08:00
pixelpaws
88d5caf642 feat(settings): migrate settings to user config dir 2025-09-27 22:22:15 +08:00
pixelpaws
1684978693 Merge pull request #491 from willmiao/codex/replace-spaces-in-embedding-paths
Fix embedding relative paths by replacing spaces
2025-09-26 09:06:56 +08:00
pixelpaws
8e4927600f fix(embeddings): replace spaces in relative paths 2025-09-26 09:02:46 +08:00
pixelpaws
4d72dc57e7 Merge pull request #490 from willmiao/codex/remove-comfy-field-from-images-metadata
fix: strip comfy metadata from civitai model images
2025-09-26 08:59:30 +08:00
pixelpaws
e7316b3389 fix(civitai): strip comfy metadata from images 2025-09-26 08:55:46 +08:00
start-life
e17b374606 Update zh-CN.json 2025-09-26 02:16:58 +03:00
Will Miao
141f83065f fix(locales): update language selection text in Chinese 2025-09-25 21:14:58 +08:00
pixelpaws
6381dbafc1 Merge pull request #488 from willmiao/codex/fix-bespoke-import_from-loader-issue
test: standardize backend package imports
2025-09-25 16:51:17 +08:00
pixelpaws
fc9db4510f test: fix duplicate pytest import 2025-09-25 16:44:43 +08:00
pixelpaws
66abf736c9 Merge pull request #487 from willmiao/codex/fix-lora-information-import-issue
fix: parse civitai image LoRAs from hash metadata
2025-09-25 16:00:48 +08:00
pixelpaws
af713470c1 fix(recipes): parse loras from civitai hashes 2025-09-25 15:59:44 +08:00
pixelpaws
93a51d2bcb Merge pull request #486 from willmiao/codex/enable-test-coverage-metrics-in-ci
ci: add pytest coverage reporting
2025-09-25 15:58:47 +08:00
pixelpaws
3f3e06de8a Fix pytest command in backend tests workflow 2025-09-25 15:57:59 +08:00
pixelpaws
7315aac9d8 ci: add pytest coverage reporting 2025-09-25 15:47:07 +08:00
pixelpaws
d933308a6f Merge pull request #485 from willmiao/codex/expand-vitest-coverage-for-frontend-components
test(frontend): extend coverage for comfyui widgets and helpers
2025-09-25 15:37:48 +08:00
pixelpaws
3baf93dcc5 test(frontend): extend coverage for comfyui widgets and helpers 2025-09-25 15:32:25 +08:00
pixelpaws
6ba14bd8fe Merge pull request #484 from willmiao/codex/fix-update-check-in-offline-mode
Fix offline update logging and respect update notification toggle
2025-09-25 14:54:28 +08:00
pixelpaws
7499570766 fix(updates): avoid network stack traces offline 2025-09-25 14:50:06 +08:00
start-life
003ee55a75 Merge branch 'main' into main 2025-09-25 09:28:30 +03:00
pixelpaws
b0cc42ef1f Merge pull request #483 from willmiao/codex/introduce-integration-and-contract-tests
test: add aiohttp integration coverage for ServiceRegistry
2025-09-25 14:23:53 +08:00
pixelpaws
23679ec3f5 chore(tests): clean integration route header 2025-09-25 14:17:45 +08:00
Will Miao
da52e5b9dd fix(settings): improve logic for auto-setting default root paths based on folder presence 2025-09-25 10:56:09 +08:00
Will Miao
c4e357793f Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-09-25 10:43:14 +08:00
Will Miao
6c3424029c fix(recipe image): optimize image saving and update PNG metadata handling, fixes #481 2025-09-25 10:43:10 +08:00
pixelpaws
dd9e6a5b69 Merge pull request #482 from willmiao/codex/add-pytest-modules-for-untested-services
Add backend service and route test coverage
2025-09-25 09:48:27 +08:00
pixelpaws
095320ef72 test(routes): tidy lora route test imports 2025-09-25 09:40:25 +08:00
pixelpaws
35f7674bcd Merge pull request #480 from willmiao/codex/modularize-i18n-tests-into-smaller-cases
test(i18n): modularize translation validation
2025-09-25 09:25:06 +08:00
pixelpaws
26b36c123d test(i18n): modularize translation validation 2025-09-25 09:21:08 +08:00
Will Miao
c85e694c1d docs: update repository guidelines for clarity and consistency 2025-09-25 08:36:15 +08:00
Will Miao
ec05282db6 fix(coverage): improve Vitest CLI path handling and error checking 2025-09-25 08:11:48 +08:00
pixelpaws
3d6f9b226f Merge pull request #479 from willmiao/codex/execute-phase-5-tasks-and-update-roadmap
Add frontend coverage workflow and reporting script
2025-09-25 07:00:04 +08:00
pixelpaws
eda6df4a5d chore(ci): add frontend coverage workflow 2025-09-24 23:22:32 +08:00
Will Miao
d504f89f6a Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-09-24 21:11:43 +08:00
Will Miao
14c468f2a2 feat(video): enhance video handling in model cards with lazy loading and autoplay settings, see #446 2025-09-24 21:11:36 +08:00
pixelpaws
2a99b0e46f Merge pull request #478 from willmiao/codex/execute-phase-4-tasks-from-roadmap
Add interaction-level frontend regression tests
2025-09-24 20:38:51 +08:00
pixelpaws
ae8914f5c8 test(frontend): add interaction regression suites 2025-09-24 20:33:41 +08:00
pixelpaws
0c9f8971ce Merge pull request #477 from willmiao/codex/execute-phase-3-tasks-and-update-roadmap
test: add embeddings and recipes manager suites
2025-09-24 20:18:37 +08:00
pixelpaws
d7a75ea4e5 test(frontend): cover embeddings and recipes managers 2025-09-24 20:15:38 +08:00
pixelpaws
3ad8d8b17c Merge pull request #476 from willmiao/codex/plan-next-tasks-based-on-roadmap-juork7
test(frontend): cover filtering flows for lora and checkpoints
2025-09-24 17:54:50 +08:00
pixelpaws
39225dc204 test(frontend): add filtering coverage for model pages 2025-09-24 17:50:04 +08:00
pixelpaws
4fb69f7d89 Merge pull request #475 from willmiao/codex/plan-next-tasks-based-on-roadmap-gpw3fe
Add checkpoints page smoke tests
2025-09-24 17:22:22 +08:00
pixelpaws
0890c6ad24 test(frontend): add checkpoints manager smoke tests 2025-09-24 17:18:20 +08:00
pixelpaws
dd81809589 Merge pull request #474 from willmiao/codex/plan-next-steps-for-roadmap-tasks
test(frontend): add loras page manager suite
2025-09-24 17:09:38 +08:00
pixelpaws
f0672beb46 test(frontend): add loras page manager suite 2025-09-24 16:22:17 +08:00
pixelpaws
cc5301e710 Merge pull request #473 from willmiao/codex/plan-next-tasks-based-on-roadmap
fix(frontend): validate AppCore initialization wiring
2025-09-24 16:15:05 +08:00
pixelpaws
9d5ec43c4e fix(frontend): correct AppCore example images initialization 2025-09-24 16:10:27 +08:00
pixelpaws
6d41211b07 Merge pull request #472 from willmiao/codex/plan-and-update-frontend-testing-roadmap
Add AppCore page orchestration tests
2025-09-24 15:56:17 +08:00
pixelpaws
d58b61eed5 test(frontend): cover appcore page features 2025-09-24 15:55:50 +08:00
pixelpaws
4b53d98bfc Merge pull request #471 from willmiao/codex/organize-frontend-tests-into-new-directories
test(frontend): document dom fixture workflow
2025-09-24 15:44:23 +08:00
pixelpaws
f51f354e48 test(frontend): add dom fixture helpers 2025-09-24 15:39:52 +08:00
Will Miao
59d027181d refactor: remove obsolete JSON files and add new version metadata for LORA model 2025-09-24 10:56:06 +08:00
Will Miao
0d0988c090 feat: add functionality to attach model files to version data in SQLiteModelMetadataProvider 2025-09-24 10:55:55 +08:00
Will Miao
dc2de50924 bump: update version to 0.9.5 in pyproject.toml 2025-09-24 09:16:50 +08:00
Will Miao
12c88835f2 refactor: enhance model version retrieval logic in CivitaiClient, fixes #460 2025-09-24 09:16:02 +08:00
Will Miao
6f4453aaf3 refactor: remove storage migration logic and associated tests 2025-09-24 06:04:08 +08:00
Will Miao
4b4b8fe3c1 refactor: remove unused ModelRouteUtils class and its methods 2025-09-24 05:41:30 +08:00
pixelpaws
49e7c2e9f5 Merge pull request #470 from willmiao/codex/add-tests-for-storagehelpers-and-appcore
test: add vitest coverage for storage helpers and core
2025-09-24 05:21:02 +08:00
pixelpaws
4653c273e3 test(frontend): add storage and core initialization specs 2025-09-24 05:20:39 +08:00
Will Miao
ae145de2f2 feat(tests): add frontend automated testing setup with Vitest and jsdom 2025-09-23 23:05:55 +08:00
Will Miao
dde7cf71c6 fix(locales): translate global context menu entries for downloading example images 2025-09-23 22:24:57 +08:00
pixelpaws
219cd242db Merge pull request #467 from willmiao/codex/migrate-frontend-settings-to-backend
feat(settings): centralize settings loading and snake_case keys
2025-09-23 22:16:10 +08:00
pixelpaws
e5b712c082 fix(i18n): sync language with state settings 2025-09-23 22:00:31 +08:00
pixelpaws
4d2c60d59b fix(settings): persist language preference 2025-09-23 22:00:20 +08:00
pixelpaws
1d2c1b114b Merge pull request #469 from willmiao/codex/add-download-example-images-to-global-context-menu
feat: add download example images to global context menu
2025-09-23 20:58:35 +08:00
pixelpaws
2bde936d05 Merge pull request #468 from willmiao/codex/migrate-i18n-tests-to-tests-framework
Migrate i18n test script into pytest suite
2025-09-23 20:58:14 +08:00
pixelpaws
cd3e32bf4b feat(context-menu): add example image download entry 2025-09-23 20:49:44 +08:00
pixelpaws
454536d631 test(i18n): migrate localization tests into pytest suite 2025-09-23 20:47:33 +08:00
Will Miao
656f1755fd feat: add cleanup example image folders functionality and UI integration 2025-09-23 20:35:35 +08:00
pixelpaws
8aa76ce5c1 feat(settings): centralize frontend settings on backend 2025-09-23 20:28:32 +08:00
Will Miao
49fa37f00d Merge branch 'main' of https://github.com/willmiao/ComfyUI-Lora-Manager 2025-09-23 19:31:19 +08:00
pixelpaws
9f83548cf3 Merge pull request #466 from willmiao/refactor
Refactor
2025-09-23 19:29:54 +08:00
pixelpaws
6054d95e85 Update py/services/model_query.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-09-23 19:25:12 +08:00
pixelpaws
8c9bb35824 Update tests/services/test_base_model_service.py
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-09-23 19:25:02 +08:00
Will Miao
3eacf9558a docs: remove outdated developer notes and add example image route architecture documentation 2025-09-23 15:39:56 +08:00
pixelpaws
fee37172b4 Merge pull request #465 from willmiao/codex/add-async-tests-for-concurrent-behavior
Add async tests for example image download concurrency
2025-09-23 15:00:31 +08:00
pixelpaws
e128c80eb1 test(services): add async example image download tests 2025-09-23 14:58:35 +08:00
pixelpaws
5cc735ed57 Merge pull request #464 from willmiao/codex/refactor-websocket-integration-for-downloading
refactor: align example image downloads with websocket manager
2025-09-23 14:43:37 +08:00
pixelpaws
43fcce6361 refactor(example-images): inject websocket manager 2025-09-23 14:40:43 +08:00
pixelpaws
49b7126278 Merge pull request #463 from willmiao/codex/refactor-downloadmanager-to-instance-based
refactor: convert example image download manager to service instance
2025-09-23 13:08:08 +08:00
pixelpaws
679cfb5c69 refactor(example-images): encapsulate download manager state 2025-09-23 13:07:11 +08:00
pixelpaws
50616bc680 Merge pull request #462 from willmiao/codex/wrap-long-running-flows-in-use-cases
feat(example-images): add use case orchestration
2025-09-23 11:55:06 +08:00
pixelpaws
aaad270822 feat(example-images): add use case orchestration 2025-09-23 11:47:12 +08:00
pixelpaws
bd10280736 Merge pull request #461 from willmiao/codex/refactor-example-images-routes
Add regression tests for example images routes
2025-09-23 11:31:22 +08:00
Will Miao
d477050239 feat: add global context menu with actions and integration 2025-09-23 11:19:36 +08:00
pixelpaws
85f79cd8d1 refactor(routes): introduce example images controller 2025-09-23 11:12:08 +08:00
pixelpaws
613cd81152 refactor(routes): add registrar for example images 2025-09-23 11:12:05 +08:00
pixelpaws
e0aba6c49a test(example-images): add route regression harness 2025-09-23 10:41:56 +08:00
pixelpaws
d78bcf2494 Merge pull request #459 from willmiao/codex/complete-refactor-with-tests-and-documentation
Add recipe route integration tests and update architecture docs
2025-09-22 14:19:41 +08:00
pixelpaws
f7cffd2eba test(recipes): add route smoke tests and docs 2025-09-22 14:15:24 +08:00
pixelpaws
0d0b91aa80 Merge pull request #458 from willmiao/codex/expose-first-class-operations-on-recipescanner
feat: expose recipe scanner mutation APIs
2025-09-22 13:54:49 +08:00
pixelpaws
42872e6d2d feat(recipes): expose recipe scanner mutation apis 2025-09-22 13:45:40 +08:00
Will Miao
b91f06405d feat: support clip strength in LoRA usage tips, fixes #401 2025-09-22 13:42:36 +08:00
pixelpaws
dac4c688d6 Merge pull request #456 from willmiao/codex/refactor-http-handlers-with-recipe-services
Refactor recipe handlers to use dedicated services
2025-09-22 13:30:05 +08:00
pixelpaws
097a68ad18 refactor(recipes): introduce dedicated services for handlers 2025-09-22 13:25:21 +08:00
pixelpaws
4a98710db0 Merge pull request #455 from willmiao/codex/refactor-reciperoutes-into-handler-objects
refactor: split recipe routes into dedicated handlers
2025-09-22 13:02:39 +08:00
pixelpaws
d033a374dd refactor(routes): split recipe handlers into dedicated classes 2025-09-22 12:57:37 +08:00
pixelpaws
6aa23fe36a Merge pull request #454 from willmiao/codex/migrate-recipe-http-layer-architecture
Refactor recipe routes to use registrar scaffolding
2025-09-22 12:42:37 +08:00
pixelpaws
3220cfb79c test(recipe-routes): add scaffolding baseline 2025-09-22 12:41:37 +08:00
pixelpaws
b92e7aa446 chore(routes): dedupe os import 2025-09-22 12:15:12 +08:00
Will Miao
c3b9c73541 refactor: remove ModelRouteUtils usage and implement filtering directly in services 2025-09-22 09:09:40 +08:00
pixelpaws
81c6672880 Merge pull request #453 from willmiao/codex/evaluate-need-for-further-refactoring
refactor: migrate model lifecycle handlers to dedicated service
2025-09-22 08:37:40 +08:00
pixelpaws
08baf884d3 refactor(routes): migrate lifecycle mutations to service 2025-09-22 08:28:30 +08:00
Will Miao
1c4096f3d5 test(routes): add tests for service readiness and error handling in download model 2025-09-22 06:28:30 +08:00
Will Miao
66a3f3f59a refactor(tests): enhance async test handling in pytest_pyfunc_call 2025-09-22 05:37:24 +08:00
pixelpaws
624df1328b Merge pull request #452 from willmiao/codex/create-application-level-use-case-services
feat(routes): extract orchestration use cases
2025-09-22 05:27:19 +08:00
pixelpaws
c063854b51 feat(routes): extract orchestration use cases 2025-09-22 05:25:27 +08:00
Will Miao
8cf99dd928 refactor(tests): remove deprecated test runner script 2025-09-21 23:39:21 +08:00
pixelpaws
c07e885725 Merge pull request #451 from willmiao/codex/refactor-routes_common.py-into-services
Refactor model route utilities into dedicated services
2025-09-21 23:38:38 +08:00
pixelpaws
21772feadd refactor(routes): extract route utilities into services 2025-09-21 23:34:46 +08:00
Will Miao
2d00cfdd31 refactor: enhance BaseModelService initialization and improve filtering logic 2025-09-21 23:13:30 +08:00
pixelpaws
49e03d658b Merge pull request #450 from willmiao/codex/refactor-basemodelroutes-for-better-separation
refactor(routes): extract registrar and handlers
2025-09-21 22:46:13 +08:00
Will Miao
fec85bcc08 refactor: unify standalone mode check using environment variable 2025-09-21 22:45:11 +08:00
pixelpaws
0e93a6bcb0 refactor(routes): extract registrar and handlers 2025-09-21 20:52:08 +08:00
pixelpaws
7e20f738fb Merge pull request #449 from willmiao/codex/document-and-map-basemodelroutes-contracts
docs(routes): map base model dependencies and contracts
2025-09-21 20:40:44 +08:00
pixelpaws
24090e6077 docs(routes): map base model dependencies and contracts 2025-09-21 20:34:45 +08:00
Will Miao
1022b07f64 feat: enhance model metadata provider with import error handling and mock setup for tests 2025-09-21 19:57:49 +08:00
pixelpaws
4faf912c6f Merge pull request #448 from willmiao/codex/create-documentation-and-tests-for-model-routes
test(routes): add base model route smoke coverage
2025-09-21 17:19:52 +08:00
pixelpaws
56e4b24b07 test(routes): clean smoke test module 2025-09-21 17:15:24 +08:00
Will Miao
12295d2fdc feat(docs): add comprehensive repository guidelines and project structure documentation 2025-09-21 16:40:07 +08:00
Will Miao
6261f7d18d Update LM extension wiki 2025-09-20 23:21:10 +08:00
Will Miao
9e1a2e3bb7 chore(pyproject): bump version to 0.9.4 2025-09-20 22:03:29 +08:00
Will Miao
40cbb2155c refactor(baseModelApi): comment out failure message handling in bulk metadata refresh 2025-09-20 21:43:00 +08:00
Will Miao
a8d7070832 feat(civitai): enhance metadata fetching with error handling and cache validation 2025-09-20 21:35:34 +08:00
Will Miao
ab7266f3a4 fix(download_manager): streamline output directory retrieval by using settings directly, fixes #443 2025-09-20 08:12:14 +08:00
Will Miao
3053b13fcb feat(metadata): enhance model processing with CivitAI metadata checks and new fields for archive DB status 2025-09-19 23:22:47 +08:00
Will Miao
f3544b3471 refactor(settings): replace getStorageItem with state.global.settings for default root retrieval 2025-09-19 22:57:05 +08:00
Will Miao
1610048974 refactor(metadata): update model fetching methods to return error messages alongside results 2025-09-19 16:36:34 +08:00
Will Miao
fc6f1bf95b fix(lora_loader): remove unnecessary string stripping from lora names in loaders, fixes #441 2025-09-19 11:17:19 +08:00
Will Miao
67b274c1b2 feat(settings): add 'show_only_sfw' setting to manage content visibility 2025-09-18 21:55:21 +08:00
Will Miao
fb0d6b5641 feat(docs): add comprehensive documentation for LoRA Manager Civitai Extension, including features, installation, privacy, and usage guidelines 2025-09-18 19:33:47 +08:00
Will Miao
d30fbeb286 feat(example_images): add dedicated folder check and update settings handling for example images path, see #431 2025-09-18 19:22:29 +08:00
Will Miao
46e430ebbb fix(utils): update API endpoint for fetching connected trigger words 2025-09-18 15:45:57 +08:00
Will Miao
bc4cd45fcb fix(lora_manager): rename invalid hash folder removal to orphaned folders and update logging 2025-09-18 15:09:32 +08:00
Will Miao
bdc86ddf15 Refactor API endpoints to use '/api/lm/' prefix
- Updated all relevant routes in `stats_routes.py` and `update_routes.py` to include the new '/api/lm/' prefix for consistency.
- Modified API endpoint configurations in `apiConfig.js` to reflect the new structure, ensuring all CRUD and bulk operations are correctly routed.
- Adjusted fetch calls in various components and managers to utilize the updated API paths, including recipe, model, and example image operations.
- Ensured all instances of the old API paths were replaced with the new '/api/lm/' prefix across the codebase for uniformity and to prevent broken links.
2025-09-18 14:50:40 +08:00
Will Miao
ded17c1479 feat(routes): add model versions status endpoint and enhance metadata retrieval 2025-09-17 22:06:59 +08:00
Will Miao
933e2fc01d feat(routes): integrate CivitAI model version retrieval for various model types 2025-09-17 15:47:30 +08:00
Will Miao
1cddeee264 style(autocomplete): remove font styles from dropdown for consistency 2025-09-17 11:04:51 +08:00
Will Miao
183c000080 Refactor ComfyUI: Remove legacy tags widget and related dynamic imports
- Deleted the legacy tags widget implementation from legacy_tags_widget.js.
- Updated trigger_word_toggle.js to directly import the new tags widget.
- Removed unused dynamic import functions and version checks from utils.js.
- Cleaned up lora_loader.js and lora_stacker.js by removing redundant node registration code.
2025-09-16 21:48:20 +08:00
Will Miao
adf7b6d4b2 chore(version): bump version to 0.9.3 in pyproject.toml 2025-09-16 18:55:59 +08:00
Will Miao
0566d50346 feat(middleware): add .mp4 to image extensions for cache control 2025-09-16 15:39:12 +08:00
Will Miao
4275dc3003 refactor(middleware): reorganize cache middleware into py directory and update import paths 2025-09-16 15:16:53 +08:00
Will Miao
30956aeefc feat(middleware): add cache control middleware to manage response caching for image files 2025-09-16 15:05:31 +08:00
Will Miao
64e1dd3dd6 chore(release): update release notes for v0.9.3 with new features and bug fixes 2025-09-15 21:35:24 +08:00
Will Miao
0dc4b6f728 refactor(showcase): improve custom image identification logic in renderMediaItem and findLocalFile functions 2025-09-15 20:18:39 +08:00
Will Miao
86074c87d7 refactor(downloader): update download_to_memory calls to include response headers 2025-09-15 19:24:09 +08:00
Will Miao
6f9245df01 refactor(downloader): enhance download_to_memory to return response headers and improve error handling 2025-09-15 18:53:04 +08:00
Will Miao
4540e47055 refactor(baseModelApi): update example images path retrieval to use state settings 2025-09-15 18:07:22 +08:00
Will Miao
4bb8981e78 refactor(routes): update API endpoints for settings to use '/api/lm/settings', see #435 2025-09-15 16:22:59 +08:00
Will Miao
c49be91aa0 refactor(update_routes): exclude civitai folder from plugin update process 2025-09-15 16:04:20 +08:00
Will Miao
2b847039d4 refactor(settings-modal): adjust font size for path template preview 2025-09-15 15:38:01 +08:00
Will Miao
1147725fd7 feat(settings): add base model, author, and first tag option to download path templates
refactor(constants): reorder preset tag suggestions for consistency
2025-09-15 12:23:46 +08:00
Will Miao
26891e12a4 refactor(ExampleImagesManager): enhance path input handling with Enter key and blur events 2025-09-15 11:34:39 +08:00
Will Miao
2f7e44a76f refactor(settings): Update synchronization logic 2025-09-15 10:30:06 +08:00
Will Miao
9366d3d2d0 feat: add API endpoint for fetching application settings and update frontend settings management 2025-09-14 22:57:17 +08:00
Will Miao
6b606a5cc8 refactor(CivArchiveModelMetadataProvider): remove session management and use downloader for HTTP requests 2025-09-13 20:04:41 +08:00
Will Miao
e5339c178a fix: increase max-height for expanded sidebar tree children to improve visibility, fixes #403 2025-09-13 16:36:01 +08:00
Will Miao
1a76f74482 refactor(BaseModelRoutes): temporary comment out model description and creator checks 2025-09-13 13:07:25 +08:00
Will Miao
13f13eb095 fix: update preview versions keys for consistency in state management, fixes #406 2025-09-13 09:20:55 +08:00
Will Miao
125fdecd61 fix: handle missing download URL for primary file in metadata 2025-09-13 09:03:34 +08:00
Will Miao
d05076d258 feat: add CivArchive metadata provider and support for optional source parameter in downloads 2025-09-12 21:13:15 +08:00
Will Miao
00b77581fc refactor(Downloader): change logger info statements to debug level for proxy usage 2025-09-12 15:20:34 +08:00
Will Miao
897787d17c refactor(AutoComplete): simplify search term extraction and insertion logic 2025-09-12 14:35:25 +08:00
Will Miao
d5a280cf2b fix: increase maxItems for autocomplete to improve user experience 2025-09-12 14:01:52 +08:00
Will Miao
a0c2d9b5ad refactor: change logger info statements to debug level for improved logging granularity 2025-09-12 11:48:59 +08:00
Will Miao
e713bd1ca2 feat: add app-level proxy settings with UI integration and session management, fixes #382 2025-09-12 11:22:45 +08:00
start-life
a3c28c1003 Update zh-TW.json 2025-09-12 03:33:34 +03:00
start-life
f4b7c9a138 Update zh-CN.json 2025-09-12 03:33:09 +03:00
start-life
6b860b5f29 Update ru.json 2025-09-12 03:32:15 +03:00
start-life
37dfcd6abd Update ko.json 2025-09-12 03:31:57 +03:00
start-life
bc2fca3a4f Update ja.json 2025-09-12 03:31:35 +03:00
start-life
f8ef159656 Update fr.json 2025-09-12 03:31:12 +03:00
start-life
b2b8a9d37e Update es.json 2025-09-12 03:30:56 +03:00
start-life
15ae4031b7 Update de.json 2025-09-12 03:30:39 +03:00
start-life
688976ce3b Update en.json 2025-09-12 03:30:14 +03:00
start-life
a548af01dc Update settings_modal.html
Adding Hebrew language
2025-09-12 03:28:45 +03:00
start-life
0dd52eceb3 Update index.js
Adding Hebrew language
2025-09-12 03:26:08 +03:00
start-life
b8c6cf4ac1 Add files via upload
Adding Hebrew language
2025-09-12 03:20:38 +03:00
Will Miao
beb8ff1dd1 refactor(ModelFileService): enhance auto-organize logic to track source directories for cleanup, see #407 2025-09-11 23:02:30 +08:00
Will Miao
6a8f0867d9 refactor: migrate auto_organize_models logic to service layer with dependency injection 2025-09-11 22:37:46 +08:00
Will Miao
51ad1c9a33 refactor(MetadataProcessor): comment out guidance parameter in generation params, fixes #425 2025-09-11 16:55:41 +08:00
pixelpaws
34872eb612 Merge pull request #411 from gaoqi125/main
__init__.py register WanVideoLoraSelectFromText
2025-09-11 16:31:57 +08:00
Will Miao
8b4e3128ff feat: add functionality to open file location from model modal and update translations, fixes #405 2025-09-11 15:54:32 +08:00
Will Miao
c66cbc800b refactor: remove clear cache functionality and associated modal from settings manager 2025-09-11 15:21:06 +08:00
Will Miao
21941521a0 fix(sidebar): increase max-height for expanded sidebar tree children, see #403 2025-09-11 12:53:58 +08:00
Will Miao
0d33884052 refactor(ModelScanner): remove unused metadata fetching logic from model processing 2025-09-11 12:33:34 +08:00
Will Miao
415df49377 fix(SearchManager): update search options handling to modify relevant fields instead of replacing the entire object, see #415 2025-09-11 12:30:01 +08:00
Will Miao
f5f45002c7 fix(routes): skip tag check in model validation to allow empty tags 2025-09-11 12:10:00 +08:00
Will Miao
1edf7126bb fix(routes): add support for metadata archive settings in model processing 2025-09-11 09:31:58 +08:00
Will Miao
a1a55a1002 feat(node_extractors): add PCTextEncode extractor to NODE_EXTRACTORS registry, fixes #424 2025-09-11 06:45:22 +08:00
Will Miao
45f5cb46bd fix(utils): update base model retrieval to use model_data for consistency, fixes #423 2025-09-10 23:44:42 +08:00
Will Miao
1b5e608a27 fix(routes): enhance model processing to include checks for missing tags, description, and creator 2025-09-10 23:30:08 +08:00
Will Miao
a7df8ae15c feat(civitai_client): enrich model version info with additional metadata 2025-09-10 23:28:19 +08:00
Will Miao
47ce0d0fe2 fix(model_scanner): comment out fetch missing metadata call to prevent potential issues 2025-09-10 22:08:44 +08:00
pixelpaws
b220e288d0 Merge pull request #422 from willmiao/ca
Civitai metadata archive db
2025-09-10 20:35:16 +08:00
Will Miao
1fc8b45b68 feat(dependencies): add GitPython and aiosqlite to project dependencies 2025-09-10 20:33:45 +08:00
Will Miao
62f06302f0 refactor(routes): replace ModelMetadataProviderManager with get_default_metadata_provider in checkpoint, embedding, and lora routes 2025-09-10 20:29:26 +08:00
Will Miao
3e5cb223f3 refactor(metadata): remove outdated metadata provider summary documentation 2025-09-10 20:09:05 +08:00
Will Miao
4ee5b7481c fix(downloader): set socket read timeout to 5 minutes for improved stability during large downloads 2025-09-10 18:49:35 +08:00
gaoqi125
e104b78c01 Merge branch 'willmiao:main' into main 2025-09-10 18:02:51 +08:00
Will Miao
ba1ac58721 feat(metadata): trigger metadata provider update when enabling metadata archive database 2025-09-10 16:18:04 +08:00
Will Miao
a4fbeb6295 feat(metadata): update metadata archive management and remove provider priority settings 2025-09-10 15:55:29 +08:00
Will Miao
68f8871403 feat(metadata): add source tracking for SQLite metadata and implement Civitai API metadata validation 2025-09-10 11:20:58 +08:00
Will Miao
6fd74952b7 Refactor metadata handling to use unified provider system
- Replaced direct usage of Civitai client with a fallback metadata provider across all recipe parsers.
- Updated metadata service to improve initialization and error handling.
- Enhanced download manager to utilize a downloader service for file operations.
- Improved recipe scanner to fetch model information through the new metadata provider.
- Updated utility functions to streamline image downloading and processing.
- Added comprehensive logging and error handling for better debugging and reliability.
- Introduced `get_default_metadata_provider()` for simplified access to the default provider.
- Ensured backward compatibility with existing APIs and workflows.
2025-09-09 20:57:45 +08:00
Will Miao
1ea468cfc4 feat(metadata): enhance metadata archive management with download progress and status updates 2025-09-09 15:24:28 +08:00
Will Miao
14721c265f Refactor download logic to use unified downloader service
- Introduced a new `Downloader` class to centralize HTTP/HTTPS download management.
- Replaced direct `aiohttp` session handling with the unified downloader in `MetadataArchiveManager`, `DownloadManager`, and `ExampleImagesProcessor`.
- Added support for resumable downloads, progress tracking, and error handling in the new downloader.
- Updated methods to utilize the downloader's capabilities for downloading files and images, improving code maintainability and readability.
2025-09-09 10:34:14 +08:00
Will Miao
821827a375 feat(metadata): implement metadata archive management and update settings for metadata providers 2025-09-08 22:41:17 +08:00
Will Miao
9ba3e2c204 feat(metadata): implement metadata providers and initialize metadata service
- Added ModelMetadataProvider and CivitaiModelMetadataProvider for handling model metadata.
- Introduced SQLiteModelMetadataProvider for SQLite database integration.
- Created metadata_service.py to initialize and configure metadata providers.
- Updated CivitaiClient to register as a metadata provider.
- Refactored download_manager to use the new download_file method.
- Added SQL schema for models, model_versions, and model_files.
- Updated requirements.txt to include aiosqlite.
2025-09-08 22:41:17 +08:00
Will Miao
d287883671 refactor(civitai): remove legacy get_model_description and _get_hash_from_civitai methods 2025-09-08 22:41:17 +08:00
Will Miao
ead34818db feat(utils): implement forwardMiddleMouseToCanvas function and integrate it into JSON, LoRA, and Tags widgets, see #417 2025-09-08 21:49:16 +08:00
Will Miao
a060010b96 feat(loras_widget): add delayed preview tooltip for LoRA names, see #416 2025-09-08 21:03:22 +08:00
gaoqi125
76a92ac847 Update wanvideo_lora_select_from_text.py 2025-09-07 23:21:33 +08:00
gaoqi125
74bc490383 Update __init__.py 2025-09-07 19:51:19 +08:00
Will Miao
510d476323 feat(civitai): enhance LoRA matching by extracting hashes from metadata 2025-09-07 10:05:30 +08:00
Will Miao
1e7257fd53 fix(download): temporarily disable delay to speed up downloads 2025-09-06 18:47:18 +08:00
Will Miao
4ff1f51b1c fix(docs): update portable package download link to version 0.9.2 2025-09-06 18:02:57 +08:00
Will Miao
74507cef05 feat(settings): add validation for settings.json to ensure required configuration is present
fix(usage_stats): handle initialization errors for usage statistics when no valid paths are configured, fixes #375
2025-09-06 17:39:51 +08:00
Will Miao
c23ab04d90 chore(release): update version to 0.9.2 and add release notes for bulk auto-organization feature 2025-09-06 14:38:00 +08:00
Will Miao
d50dde6cf6 refactor(i18n): remove legacy migration summary and transition to JSON format 2025-09-06 10:07:43 +08:00
Will Miao
fcb1fb39be feat(controls): add toggleBulkMode functionality for Checkpoints and Embeddings pages 2025-09-06 08:15:18 +08:00
Will Miao
b0ef74f802 feat(LoraManager): add example images cleanup functionality to remove invalid or empty folders, see #402 2025-09-06 07:59:33 +08:00
Will Miao
f332aef41d fix(BulkManager): prevent initialization on recipes page to avoid unnecessary processing 2025-09-05 22:45:23 +08:00
Will Miao
1f91a3da8e fix(BulkManager): streamline cleanupBulkBaseModelModal to clear base model select options 2025-09-05 21:00:54 +08:00
Will Miao
16840c321d feat(api): enhance fetchModelDescription to improve error handling and response parsing 2025-09-05 20:57:36 +08:00
Will Miao
c109e392ad feat(auto-organize): add auto-organize functionality for selected models and update context menu 2025-09-05 20:51:30 +08:00
pixelpaws
5e69671366 Merge pull request #398 from gaoqi125/gaoqi125-patch-1
Create wanvideo_lora_select_from_text.py
2025-09-05 19:55:40 +08:00
Will Miao
52d23d9b75 feat(constants): update model tags to include 'realistic', 'anime', 'toon', and 'furry' 2025-09-05 19:53:29 +08:00
Will Miao
4c4e6d7a7b feat(release-notes): update version to 0.9.1 and enhance bulk operations documentation 2025-09-05 18:15:08 +08:00
Will Miao
03b6e78705 feat(locales): add bulk base model functionality in multiple languages and update toast messages 2025-09-05 18:00:21 +08:00
pixelpaws
24c01141d7 Merge pull request #400 from willmiao/bulk-menu
Bulk menu
2025-09-05 17:44:40 +08:00
Will Miao
6dc2811af4 feat(bulk-modal): refactor bulk base model modal for improved UI and functionality, fixes 352 2025-09-05 17:36:54 +08:00
Will Miao
e6425dce32 feat(bulk-manager): enhance bulk mode handling by skipping actions when a modal is open 2025-09-05 17:07:57 +08:00
Will Miao
95e2ff5f1e Implement centralized event management system with priority handling and state tracking
- Enhanced EventManager class to support priority-based event handling, conditional execution, and automatic cleanup.
- Integrated event management into BulkManager for global keyboard shortcuts and marquee selection events.
- Migrated mouse tracking and node selector events to UIHelpers for better coordination.
- Established global event handlers for context menu interactions and modal state management.
- Added comprehensive documentation for event management implementation and usage.
- Implemented initialization logic for event management, including error handling and cleanup on page unload.
2025-09-05 16:56:26 +08:00
Will Miao
92ac487128 feat(bulk-base-model): implement bulk base model setting functionality with UI and context menu integration 2025-09-05 14:07:03 +08:00
Will Miao
3250fa89cb feat(selection): implement marquee selection for bulk operations 2025-09-05 11:24:48 +08:00
Will Miao
7475de366b feat(context-menu): enhance bulk workflow options with append and replace actions 2025-09-05 11:24:48 +08:00
Will Miao
affb507b37 feat(sync): enhance translation key synchronization to remove obsolete keys 2025-09-05 11:24:48 +08:00
pixelpaws
3320b80150 Merge pull request #399 from willmiao/bulk-menu
Bulk context menu
2025-09-05 09:31:48 +08:00
Will Miao
fb2b69b787 feat(tags): refactor preset tags to constants for better maintainability 2025-09-05 09:27:45 +08:00
Will Miao
29a05f6533 Move test_i18n.py to scripts folder 2025-09-05 08:48:20 +08:00
Will Miao
9fa3fac973 feat(locales): add bulk tag management translations for multiple languages 2025-09-05 08:43:01 +08:00
Will Miao
904b0d104a feat(sync): add translation key synchronization script for locale management 2025-09-05 08:35:20 +08:00
Will Miao
1d31dae110 feat(tags): implement bulk tag addition and replacement functionality 2025-09-05 07:18:24 +08:00
Will Miao
476ecb7423 fix(banner): ensure href attribute defaults to '#' for actions without a URL 2025-09-04 22:09:15 +08:00
Will Miao
4eb67cf6da feat(bulk-tags): add bulk tag management modal and context menu integration 2025-09-04 22:08:55 +08:00
Will Miao
a5a9f7ed83 fix(banner): ensure href attribute defaults to '#' for actions without a URL 2025-09-04 22:07:07 +08:00
Will Miao
c0b029e228 feat(context-menu): refactor context menu initialization and coordination for improved bulk operations 2025-09-04 16:34:05 +08:00
Will Miao
9bebcc9a4b feat(bulk): implement bulk context menu for model operations and remove bulk operations panel 2025-09-04 15:24:54 +08:00
Will Miao
ac7d23011c chore(release): update version to 0.9.0 and add release notes for UI overhaul and new features 2025-09-04 00:04:25 +08:00
pixelpaws
491e09b7b5 Merge pull request #395 from willmiao/ot
Onboarding Tutorial
2025-09-03 23:25:31 +08:00
Will Miao
192bc237bf fix(onboarding): update language selection button text and remove skip option from translations 2025-09-03 23:04:06 +08:00
Will Miao
f041f4a114 feat(onboarding): prevent onboarding from starting if version-mismatch banner is visible 2025-09-03 22:48:29 +08:00
Will Miao
2546580377 fix(localization): update French translations for "recipe" to ensure consistency in terminology 2025-09-03 22:23:35 +08:00
Will Miao
8fbf2ab56d feat(onboarding): add multilingual support for onboarding steps and language selection 2025-09-03 22:17:48 +08:00
Will Miao
ea727aad2e feat(onboarding): enhance target highlighting with mask and pulsing effect 2025-09-03 21:44:23 +08:00
Will Miao
5520aecbba fix(onboarding): adjust language selection logic to skip if already set and update prompt text 2025-09-03 19:22:53 +08:00
Will Miao
6b738a4769 fix(onboarding): update language handling and selection logic in onboarding process 2025-09-03 19:15:55 +08:00
Will Miao
903a8050b3 Add SVG flags for France, Hong Kong, Japan, South Korea, Russia, and the United States
- Added France flag (fr.svg) with three vertical stripes: blue, white, and red.
- Added Hong Kong flag (hk.svg) featuring a red background with a white flower emblem.
- Added Japan flag (jp.svg) with a white field and a red circle in the center.
- Added South Korea flag (kr.svg) showcasing a white background with a central yin-yang symbol and four black trigrams.
- Added Russia flag (ru.svg) with three horizontal stripes: white, blue, and red.
- Added United States flag (us.svg) with red and white stripes and a blue canton featuring stars.
2025-09-03 18:19:34 +08:00
Will Miao
31b032429d fix(sidebar): change default pinned state to true for sidebar restoration 2025-09-03 15:46:33 +08:00
Will Miao
2bcf341f04 feat(onboarding): implement onboarding tutorial with language selection and step guidance 2025-09-03 15:42:36 +08:00
Will Miao
ca6f45b359 fix(download-manager): temporarily disable delay to speed up downloads 2025-09-02 22:36:36 +08:00
Will Miao
2a67cec16b fix(sidebar): update tree selection logic and improve breadcrumb and header state handling 2025-09-02 18:19:01 +08:00
Will Miao
1800afe31b feat(sidebar): implement display mode toggle and update sidebar actions for improved navigation. See #389 2025-09-02 17:42:21 +08:00
gaoqi125
8c6311355d Create wanvideo_lora_select_from_text.py
Stacking new LoRA nodes via lora_syntax text input
2025-09-02 17:18:48 +08:00
Will Miao
91801dff85 feat(localization): add new workflow-related messages for LoRA and recipe actions in multiple languages 2025-09-02 11:50:20 +08:00
Will Miao
be594133f0 feat(localization): update app title from "oRA Manager" to "LoRA Manager" across all locale files 2025-09-02 10:29:29 +08:00
Will Miao
8a538d117e feat(localization): simplify language selection labels and update app title across all locale files 2025-09-02 10:11:55 +08:00
Will Miao
8d9118cbee feat(localization): update control labels and actions for improved clarity in multiple languages 2025-09-01 22:00:19 +08:00
Will Miao
b67464ea13 feat(trigger-word-toggle): update existing tags' active state based on default_active widget value 2025-09-01 20:55:50 +08:00
Will Miao
33334da0bb feat(i18n): add structural consistency tests for locale files and enhance existing tests 2025-09-01 19:29:50 +08:00
pixelpaws
40ce2baa7b Merge pull request #388 from willmiao/i18n
I18n
2025-09-01 08:57:39 +08:00
Will Miao
1134466cc0 feat(i18n): complete locale files for all languages 2025-09-01 08:48:34 +08:00
Will Miao
92341111ad feat(localization): enhance import modal and related components with new labels, descriptions, and error messages for improved user experience 2025-08-31 22:41:35 +08:00
Will Miao
4956d6781f feat(localization): enhance download modal with new labels and error messages for improved user experience 2025-08-31 22:06:59 +08:00
Will Miao
63562240c4 feat(localization): enhance English and Chinese translations for update notifications and support modal 2025-08-31 21:54:54 +08:00
Will Miao
84d801cf14 feat(localization): enhance settings modal with new sections and translations for improved user experience 2025-08-31 21:27:59 +08:00
Will Miao
b56fe4ca68 Implement code changes to enhance functionality and improve performance 2025-08-31 20:55:08 +08:00
Will Miao
6c83c65e02 feat(localization): add custom filter message and update toast keys for recipe actions 2025-08-31 20:32:37 +08:00
Will Miao
a83f020fcc feat(localization): add file size labels and enhance search placeholders in UI components 2025-08-31 20:26:13 +08:00
Will Miao
7f9a3bf272 feat(i18n): enhance translation key extraction to optionally include container nodes 2025-08-31 19:01:23 +08:00
Will Miao
f80e266d02 feat(localization): update toast messages for consistency and improved error handling across various components 2025-08-31 18:38:42 +08:00
Will Miao
7bef562541 feat(localization): update toast messages for improved user feedback and localization support across various components 2025-08-31 16:52:58 +08:00
Will Miao
b2428f607c feat(localization): add trigger words functionality with localization support for UI elements and messages 2025-08-31 15:13:12 +08:00
Will Miao
8303196b57 feat(localization): enhance toast messages for context menu actions, model tags, and download management with improved error handling and user feedback 2025-08-31 14:27:33 +08:00
Will Miao
987b8c8742 feat(localization): enhance toast messages for recipes and example images with improved error handling and success feedback 2025-08-31 13:51:37 +08:00
Will Miao
e60a579b85 feat(localization): enhance toast messages for API actions and model management with i18n support
refactor(localization): update toast messages in various components and managers for better user feedback
2025-08-31 12:25:08 +08:00
Will Miao
be8edafed0 feat(localization): enhance toast messages for better user feedback and localization support 2025-08-31 11:51:28 +08:00
Will Miao
a258a18fa4 refactor(preload): remove unnecessary preload blocks from multiple templates 2025-08-31 11:28:49 +08:00
Will Miao
59010ca431 Refactor localization handling and improve i18n support across the application
- Replaced `safeTranslate` with `translate` in various components for consistent translation handling.
- Updated Chinese (Simplified and Traditional) localization files to include new keys and improved translations for model card actions, metadata, and usage tips.
- Enhanced the ModelCard, ModelDescription, ModelMetadata, ModelModal, and ModelTags components to utilize the new translation functions.
- Improved user feedback messages for actions like copying to clipboard, saving notes, and updating tags with localized strings.
- Ensured all UI elements reflect the correct translations based on the user's language preference.
2025-08-31 11:19:06 +08:00
Will Miao
75f3764e6c refactor(i18n): optimize safeTranslate usage by removing unnecessary await calls 2025-08-31 10:32:15 +08:00
Will Miao
867ffd1163 feat(localization): add model description translations and enhance UI text across multiple languages 2025-08-31 10:12:54 +08:00
Will Miao
6acccbbb94 fix(localization): update language labels to use English and native scripts for consistency 2025-08-31 09:16:26 +08:00
Will Miao
b2c4efab45 refactor(i18n): streamline i18n initialization and update translation methods 2025-08-31 09:03:06 +08:00
Will Miao
408a435b71 Add copilot instructions to enforce English for comments 2025-08-31 09:02:51 +08:00
Will Miao
36d3cd93d5 Enhance localization and UI for model management features
- Added new localization keys for usage statistics, collection analysis, storage efficiency, and insights in English and Chinese.
- Updated modal templates to utilize localization for delete, exclude, and bulk delete confirmations.
- Improved download modal with localized labels and placeholders.
- Enhanced example access modal with localized titles and descriptions.
- Updated help modal to include localized content for update vlogs and documentation sections.
- Refactored move modal to use localization for labels and buttons.
- Implemented localization in relink Civitai modal for warnings and help text.
- Updated update modal to reflect localized text for actions and progress messages.
- Enhanced statistics template with localized titles for charts and lists.
2025-08-30 23:20:13 +08:00
Will Miao
b36fea002e Add localization support for new features and update existing translations
- Added "unknown" status to model states in English and Chinese locales.
- Introduced new actions for checking updates and support in both locales.
- Added settings for Civitai API key with help text in both locales.
- Updated context menus and control components to use localized strings.
- Enhanced help and support modals with localization.
- Updated update modal to reflect current and new version information in localized format.
- Refactored various templates to utilize the translation function for better internationalization.
2025-08-30 22:32:44 +08:00
Will Miao
52acbd954a Add Chinese (Simplified and Traditional) localization files and implement i18n tests
- Created zh-CN.json and zh-TW.json for Simplified and Traditional Chinese translations respectively.
- Added comprehensive test suite in test_i18n.py to validate JSON structure, server-side i18n functionality, and translation completeness across multiple languages.
2025-08-30 21:41:48 +08:00
Will Miao
f6709a55c3 refactor(i18n): Remove server_i18n references from routes and update translations in zh-CN and zh-TW locales 2025-08-30 19:02:37 +08:00
Will Miao
7b374d747b cleanup 2025-08-30 18:44:33 +08:00
Will Miao
fd480a9360 refactor(i18n): Remove language setting endpoints and related logic from MiscRoutes 2025-08-30 17:48:32 +08:00
Will Miao
ec8b228867 fix(statistics): Add margin-top to metrics grid for improved spacing 2025-08-30 17:30:49 +08:00
Will Miao
401200050b feat(i18n): Enhance internationalization support by updating storage retrieval and translation handling 2025-08-30 17:29:04 +08:00
Will Miao
29160bd6e5 feat(i18n): Implement server-side internationalization support
- Added ServerI18nManager to handle translations and locale settings on the server.
- Integrated server-side translations into templates, reducing language flashing on initial load.
- Created API endpoints for setting and getting user language preferences.
- Enhanced client-side i18n handling to work seamlessly with server-rendered content.
- Updated various templates to utilize the new translation system.
- Added mixed i18n handler to coordinate server and client translations, improving user experience.
- Expanded translation files to include initialization messages for various components.
2025-08-30 16:56:56 +08:00
Will Miao
3c9e402bc0 Add Korean, Russian, and Traditional Chinese translations for LoRA Manager 2025-08-30 11:32:39 +08:00
Will Miao
ff4d0f0208 feat: Update Simplified Chinese translations for LoRA Manager to improve clarity and consistency 2025-08-29 21:32:48 +08:00
Will Miao
f82908221c Implement internationalization (i18n) system for LoRA Manager
- Added i18n support with automatic language detection based on browser settings.
- Implemented translations for English (en) and Simplified Chinese (zh-CN).
- Created utility functions for text replacement in HTML templates and JavaScript.
- Developed a comprehensive translation key structure for various application components.
- Added formatting functions for numbers, dates, and file sizes according to locale.
- Included RTL language support and dynamic updates for DOM elements.
- Created tests to verify the functionality of the i18n system.
2025-08-29 21:32:48 +08:00
Will Miao
4246908f2e feat: Add updateContainerMargin method and integrate it into sidebar state management for improved layout handling 2025-08-29 21:28:19 +08:00
Will Miao
f64597afd2 feat: Update restoreSelectedFolder to ensure activeFolder is a string before assignment and reset selectedPath if not 2025-08-29 17:46:43 +08:00
Will Miao
975ff2672d feat: Add new Flux model 'FLUX_1_KREA' and update Video Models list for enhanced model support 2025-08-28 16:24:01 +08:00
Will Miao
e90ba31784 feat: Update filter_civitai_data to include 'id' and 'modelId' fields for improved data retrieval 2025-08-28 15:21:04 +08:00
Will Miao
a4074c93bc feat: Improve folder filtering logic to ensure exact matches and handle root folder case 2025-08-28 05:33:53 +08:00
Will Miao
7a8b7598c7 feat: Enhance deepMerge function to only update existing keys in target for improved merging logic 2025-08-27 20:42:57 +08:00
Will Miao
cd0d832f14 feat: Refactor showModelModal to fetch complete metadata and update related functions for improved data handling 2025-08-27 19:42:34 +08:00
Will Miao
5b0becaaf2 feat: Implement model description retrieval and update related API endpoints 2025-08-27 18:22:56 +08:00
Will Miao
9817bac2fe feat: Add metadata endpoint and implement model metadata retrieval functionality 2025-08-27 17:44:29 +08:00
Will Miao
f6bd48cfcd feat: Update box-shadow for header and adjust controls styling for improved layout 2025-08-27 15:43:44 +08:00
Will Miao
01843b8f2b feat: Update media query breakpoints from 2000px to 2150px for improved responsiveness across components 2025-08-27 09:54:08 +08:00
Will Miao
94ed81de5e feat: Update tooltip positioning comments for clarity and consistency 2025-08-27 09:11:19 +08:00
Will Miao
0700b8f399 feat: Adjust sidebar position to align with viewport edges for improved layout consistency 2025-08-27 09:11:05 +08:00
Will Miao
d62cff9841 feat: Refactor SidebarManager integration and cleanup methods for improved state management 2025-08-26 21:38:33 +08:00
Will Miao
083f4805b2 feat: Enhance get_preview_static_url to find the longest matching route for static URLs 2025-08-26 20:41:01 +08:00
Will Miao
8e5bfd379e feat: Add closeDropdown method to manage dropdown state in SidebarManager 2025-08-26 19:26:05 +08:00
pixelpaws
2366f143d8 Merge pull request #377 from willmiao/sidebar, See #257 #52
Sidebar
2025-08-26 19:10:30 +08:00
Will Miao
e997f5bc1b feat: Update activeFolder state initialization to load from localStorage for each model type 2025-08-26 19:04:23 +08:00
Will Miao
842beec7cc feat: Update recursive search option to default to true and remove related UI elements 2025-08-26 18:14:43 +08:00
Will Miao
d2268fc9e0 feat: Implement initial hidden state for sidebar and enhance visibility handling 2025-08-26 18:02:52 +08:00
Will Miao
a98e26139f feat: Implement auto-hide functionality for sidebar and update controls layout 2025-08-26 17:57:59 +08:00
Will Miao
522a3ea88b feat: Update sidebar breadcrumb styles and enhance dropdown functionality 2025-08-26 17:13:04 +08:00
Will Miao
d7949fbc30 feat: Enhance sidebar navigation with dropdowns and refactor breadcrumb structure 2025-08-26 16:44:01 +08:00
Will Miao
6df083a1d5 feat: Refactor sidebar components for improved structure and styling 2025-08-26 15:26:45 +08:00
Will Miao
4dc80e7f6e feat: Implement sidebar navigation with folder tree and controls 2025-08-26 10:33:46 +08:00
Will Miao
c2a8508513 feat: Add get_preview_extension function to retrieve complete preview file extensions 2025-08-26 10:19:17 +08:00
Will Miao
159193ef43 feat: Implement unique filename generation with conflict resolution using metadata hash 2025-08-25 15:33:46 +08:00
Will Miao
1f37ffb105 feat: Refactor unique filename generation to use a hash provider for improved flexibility 2025-08-25 14:52:44 +08:00
Will Miao
919fed05c5 feat: Enhance model moving functionality with improved error handling and unique filename generation 2025-08-25 13:08:35 +08:00
Will Miao
1814f83bee feat: Implement post-initialization tasks and backup file cleanup in LoraManager 2025-08-25 09:03:40 +08:00
Will Miao
1823840456 feat: Disable image optimization in find_preview_file function for future configuration 2025-08-25 09:03:28 +08:00
Will Miao
623c28bfc3 feat: Remove backup creation from metadata saving functions for streamlined operations 2025-08-24 22:30:53 +08:00
342 changed files with 53342 additions and 13241 deletions

1
.github/copilot-instructions.md vendored Normal file
View File

@@ -0,0 +1 @@
Always use English for comments.

69
.github/workflows/backend-tests.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
name: Backend Tests
on:
push:
branches:
- main
- master
paths:
- 'py/**'
- 'standalone.py'
- 'tests/**'
- 'requirements.txt'
- 'requirements-dev.txt'
- 'pyproject.toml'
- 'pytest.ini'
- '.github/workflows/backend-tests.yml'
pull_request:
paths:
- 'py/**'
- 'standalone.py'
- 'tests/**'
- 'requirements.txt'
- 'requirements-dev.txt'
- 'pyproject.toml'
- 'pytest.ini'
- '.github/workflows/backend-tests.yml'
jobs:
pytest:
name: Run pytest with coverage
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: 'pip'
cache-dependency-path: |
requirements.txt
requirements-dev.txt
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-dev.txt
- name: Run pytest with coverage
env:
COVERAGE_FILE: coverage/backend/.coverage
run: |
mkdir -p coverage/backend
python -m pytest \
--cov=py \
--cov=standalone \
--cov-report=term-missing \
--cov-report=xml:coverage/backend/coverage.xml \
--cov-report=html:coverage/backend/html \
--cov-report=json:coverage/backend/coverage.json
- name: Upload coverage artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: backend-coverage
path: coverage/backend
if-no-files-found: warn

52
.github/workflows/frontend-tests.yml vendored Normal file
View File

@@ -0,0 +1,52 @@
name: Frontend Tests
on:
push:
branches:
- main
- master
paths:
- 'package.json'
- 'package-lock.json'
- 'vitest.config.js'
- 'tests/frontend/**'
- 'static/js/**'
- 'scripts/run_frontend_coverage.js'
- '.github/workflows/frontend-tests.yml'
pull_request:
paths:
- 'package.json'
- 'package-lock.json'
- 'vitest.config.js'
- 'tests/frontend/**'
- 'static/js/**'
- 'scripts/run_frontend_coverage.js'
- '.github/workflows/frontend-tests.yml'
jobs:
vitest:
name: Run Vitest with coverage
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Use Node.js 20
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run frontend tests with coverage
run: npm run test:coverage
- name: Upload coverage artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: frontend-coverage
path: coverage/frontend
if-no-files-found: warn

4
.gitignore vendored
View File

@@ -5,3 +5,7 @@ output/*
py/run_test.py
.vscode/
cache/
civitai/
node_modules/
coverage/
.coverage

22
AGENTS.md Normal file
View File

@@ -0,0 +1,22 @@
# Repository Guidelines
## Project Structure & Module Organization
ComfyUI LoRA Manager pairs a Python backend with browser-side widgets. Backend modules live in <code>py/</code> with HTTP entry points in <code>py/routes/</code>, feature logic in <code>py/services/</code>, shared helpers in <code>py/utils/</code>, and custom nodes in <code>py/nodes/</code>. UI scripts extend ComfyUI from <code>web/comfyui/</code>, while deploy-ready assets remain in <code>static/</code> and <code>templates/</code>. Localization files live in <code>locales/</code>, example workflows in <code>example_workflows/</code>, and interim tests such as <code>test_i18n.py</code> sit beside their source until a dedicated <code>tests/</code> tree lands.
## Build, Test, and Development Commands
- <code>pip install -r requirements.txt</code> installs backend dependencies.
- <code>python standalone.py --port 8188</code> launches the standalone server for iterative development.
- <code>python -m pytest test_i18n.py</code> runs the current regression suite; target new files explicitly, e.g. <code>python -m pytest tests/test_recipes.py</code>.
- <code>python scripts/sync_translation_keys.py</code> synchronizes locale keys after UI string updates.
## Coding Style & Naming Conventions
Follow PEP 8 with four-space indentation and descriptive snake_case file and function names such as <code>settings_manager.py</code>. Classes stay PascalCase, constants in UPPER_SNAKE_CASE, and loggers retrieved via <code>logging.getLogger(__name__)</code>. Prefer explicit type hints and docstrings on public APIs. JavaScript under <code>web/comfyui/</code> uses ES modules with camelCase helpers and the <code>_widget.js</code> suffix for UI components.
## Testing Guidelines
Pytest powers backend tests. Name modules <code>test_<feature>.py</code> and keep them near the code or in a future <code>tests/</code> package. Mock ComfyUI dependencies through helpers in <code>standalone.py</code>, keep filesystem fixtures deterministic, and ensure translations are covered. Run <code>python -m pytest</code> before submitting changes.
## Commit & Pull Request Guidelines
Commits follow the conventional format, e.g. <code>feat(settings): add default model path</code>, and should stay focused on a single concern. Pull requests must outline the problem, summarize the solution, list manual verification steps (server run, targeted pytest), and link related issues. Include screenshots or GIFs for UI or locale updates and call out migration steps such as <code>settings.json</code> adjustments.
## Configuration & Localization Tips
Copy <code>settings.json.example</code> to <code>settings.json</code> and adapt model directories before running the standalone server. Store reference assets in <code>civitai/</code> or <code>docs/</code> to keep runtime directories deploy-ready. Whenever UI text changes, update every <code>locales/&lt;lang&gt;.json</code> file and rerun the translation sync script so ComfyUI surfaces localized strings.

118
README.md
View File

@@ -34,6 +34,34 @@ Enhance your Civitai browsing experience with our companion browser extension! S
## Release Notes
### v0.9.6
* **Critical Performance Optimization** - Introduced persistent model cache that dramatically accelerates initialization after startup and significantly reduces Python backend memory footprint for improved application performance.
* **Cross-Browser Settings Synchronization** - Migrated nearly all settings to the backend, ensuring your preferences sync automatically across all browsers for a seamless multi-browser experience.
* **Protected User Settings Location** - Relocated user settings (settings.json) to the user config directory (accessible via the link icon in Settings), preventing accidental deletion during reinstalls or updates.
* **Global Context Menu** - Added a new global context menu accessible by right-clicking on empty page areas, providing quick access to global operations with more features coming in future updates.
* **Multi-Library Support** - Introduced support for managing multiple libraries, allowing you to easily switch between different model collections (advanced usage, documentation in progress).
* **Bug Fixes & Stability Improvements** - Various bug fixes and enhancements for improved stability and reliability.
### v0.9.3
* **Metadata Archive Database Support** - Added the ability to download and utilize a metadata archive database, enabling access to metadata for models that have been deleted from CivitAI.
* **App-Level Proxy Settings** - Introduced support for configuring a global proxy within the application, making it easier to use the manager behind network restrictions.
* **Bug Fixes** - Various bug fixes for improved stability and reliability.
### v0.9.2
* **Bulk Auto-Organization Action** - Added a new bulk auto-organization feature. You can now select multiple models and automatically organize them according to your current path template settings for streamlined management.
* **Bug Fixes** - Addressed several bugs to improve stability and reliability.
### v0.9.1
* **Enhanced Bulk Operations** - Improved bulk operations with Marquee Selection and a bulk operation context menu, providing a more intuitive, desktop-application-like user experience.
* **New Bulk Actions** - Added bulk operations for adding tags and setting base models to multiple models simultaneously.
### v0.9.0
* **UI Overhaul for Enhanced Navigation** - Replaced the top flat folder tags with a new folder sidebar and breadcrumb navigation system for a more intuitive folder browsing and selection experience.
* **Dual-Mode Folder Sidebar** - The new folder sidebar offers two display modes: 'List Mode,' which mirrors the classic folder view, and 'Tree Mode,' which presents a hierarchical folder structure for effortless navigation through nested directories.
* **Internationalization Support** - Introduced multi-language support, now available in English, Simplified Chinese, Traditional Chinese, Spanish, Japanese, Korean, French, Russian, and German. Feedback from native speakers is welcome to improve the translations.
* **Automatic Filename Conflict Resolution** - Implemented automatic file renaming (`original name + short hash`) to prevent conflicts when downloading or moving models.
* **Performance Optimizations & Bug Fixes** - Various performance improvements and bug fixes for a more stable and responsive experience.
### v0.8.30
* **Automatic Model Path Correction** - Added auto-correction for model paths in built-in nodes such as Load Checkpoint, Load Diffusion Model, Load LoRA, and other custom nodes with similar functionality. Workflows containing outdated or incorrect model paths will now be automatically updated to reflect the current location of your models.
* **Node UI Enhancements** - Improved node interface for a smoother and more intuitive user experience.
@@ -62,61 +90,6 @@ Enhance your Civitai browsing experience with our companion browser extension! S
* **Enhanced Node Usability** - Improved user experience for Lora Loader, Lora Stacker, and WanVideo Lora Select nodes by fixing the maximum height of the text input area. Users can now freely and conveniently adjust the LoRA region within these nodes.
* **Compatibility Fixes** - Resolved compatibility issues with ComfyUI and certain custom nodes, including ComfyUI-Custom-Scripts, ensuring smoother integration and operation.
### v0.8.25
* **LoRA List Reordering**
- Drag & Drop: Easily rearrange LoRA entries using the drag handle.
- Keyboard Shortcuts:
- Arrow keys: Navigate between LoRAs
- Ctrl/Cmd + Arrow: Move selected LoRA up/down
- Ctrl/Cmd + Home/End: Move selected LoRA to top/bottom
- Delete/Backspace: Remove selected LoRA
- Context Menu: Right-click for quick actions like Move Up, Move Down, Move to Top, Move to Bottom.
* **Bulk Operations for Checkpoints & Embeddings**
- Bulk Mode: Select multiple checkpoints or embeddings for batch actions.
- Bulk Refresh: Update Civitai metadata for selected models.
- Bulk Delete: Remove multiple models at once.
- Bulk Move (Embeddings): Move selected embeddings to a different folder.
* **New Setting: Auto Download Example Images**
- Automatically fetch example images for models missing previews (requires download location to be set). Enabled by default.
* **General Improvements**
- Various user experience enhancements and stability fixes.
### v0.8.22
* **Embeddings Management** - Added Embeddings page for comprehensive embedding model management.
* **Advanced Sorting Options** - Introduced flexible sorting controls, allowing sorting by name, added date, or file size in both ascending and descending order.
* **Custom Download Path Templates & Base Model Mapping** - Implemented UI settings for configuring download path templates and base model path mappings, allowing customized model organization and storage location when downloading models via LM Civitai Extension.
* **LM Civitai Extension Enhancements** - Improved concurrent download performance and stability, with new support for canceling active downloads directly from the extension interface.
* **Update Feature** - Added update functionality, allowing users to update LoRA Manager to the latest release version directly from the LoRA Manager UI.
* **Bulk Operations: Refresh All** - Added bulk refresh functionality, allowing users to update Civitai metadata across multiple LoRAs.
### v0.8.20
* **LM Civitai Extension** - Released [browser extension through Chrome Web Store](https://chromewebstore.google.com/detail/lm-civitai-extension/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) that works seamlessly with LoRA Manager to enhance Civitai browsing experience, showing which models are already in your local library, enabling one-click downloads, and providing queue and parallel download support
* **Enhanced Lora Loader** - Added support for nunchaku, improving convenience when working with ComfyUI-nunchaku workflows, plus new template workflows for quick onboarding
* **WanVideo Integration** - Introduced WanVideo Lora Select (LoraManager) node compatible with ComfyUI-WanVideoWrapper for streamlined lora usage in video workflows, including a template workflow to help you get started quickly
### v0.8.19
* **Analytics Dashboard** - Added new Statistics page providing comprehensive visual analysis of model collection and usage patterns for better library insights
* **Target Node Selection** - Enhanced workflow integration with intelligent target choosing when sending LoRAs/recipes to workflows with multiple loader/stacker nodes; a visual selector now appears showing node color, type, ID, and title for precise targeting
* **Enhanced NSFW Controls** - Added support for setting NSFW levels on recipes with automatic content blurring based on user preferences
* **Customizable Card Display** - New display settings allowing users to choose whether card information and action buttons are always visible or only revealed on hover
* **Expanded Compatibility** - Added support for efficiency-nodes-comfyui in Save Recipe and Save Image nodes, plus fixed compatibility with ComfyUI_Custom_Nodes_AlekPet
### v0.8.18
* **Custom Example Images** - Added ability to import your own example images for LoRAs and checkpoints with automatic metadata extraction from embedded information
* **Enhanced Example Management** - New action buttons to set specific examples as previews or delete custom examples
* **Improved Duplicate Detection** - Enhanced "Find Duplicates" with hash verification feature to eliminate false positives when identifying duplicate models
* **Tag Management** - Added tag editing functionality allowing users to customize and manage model tags
* **Advanced Selection Controls** - Implemented Ctrl+A shortcut for quickly selecting all filtered LoRAs, automatically entering bulk mode when needed
* **Note**: Cache file functionality temporarily disabled pending rework
### v0.8.17
* **Duplicate Model Detection** - Added "Find Duplicates" functionality for LoRAs and checkpoints using model file hash detection, enabling convenient viewing and batch deletion of duplicate models
* **Enhanced URL Recipe Imports** - Optimized import recipe via URL functionality using CivitAI API calls instead of web scraping, now supporting all rated images (including NSFW) for recipe imports
* **Improved TriggerWord Control** - Enhanced TriggerWord Toggle node with new default_active switch to set the initial state (active/inactive) when trigger words are added
* **Centralized Example Management** - Added "Migrate Existing Example Images" feature to consolidate downloaded example images from model folders into central storage with customizable naming patterns
* **Intelligent Word Suggestions** - Implemented smart trigger word suggestions by reading class tokens and tag frequency from safetensors files, displaying recommendations when editing trigger words
* **Model Version Management** - Added "Re-link to CivitAI" context menu option for connecting models to different CivitAI versions when needed
[View Update History](./update_logs.md)
---
@@ -174,9 +147,9 @@ Enhance your Civitai browsing experience with our companion browser extension! S
### Option 2: **Portable Standalone Edition** (No ComfyUI required)
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.8.26/lora_manager_portable.7z)
1. Download the [Portable Package](https://github.com/willmiao/ComfyUI-Lora-Manager/releases/download/v0.9.2/lora_manager_portable.7z)
2. Copy the provided `settings.json.example` file to create a new file named `settings.json` in `comfyui-lora-manager` folder
3. Edit `settings.json` to include your correct model folder paths and CivitAI API key
3. Edit the new `settings.json` to include your correct model folder paths and CivitAI API key
4. Run run.bat
- To change the startup port, edit `run.bat` and modify the parameter (e.g. `--port 9001`)
@@ -244,7 +217,7 @@ You can combine multiple patterns to create detailed, organized filenames for yo
You can now run LoRA Manager independently from ComfyUI:
1. **For ComfyUI users**:
- Launch ComfyUI with LoRA Manager at least once to initialize the necessary path information in the `settings.json` file.
- Launch ComfyUI with LoRA Manager at least once to initialize the necessary path information in the `settings.json` file located in your user settings folder (see paths above).
- Make sure dependencies are installed: `pip install -r requirements.txt`
- From your ComfyUI root directory, run:
```bash
@@ -266,8 +239,37 @@ You can now run LoRA Manager independently from ComfyUI:
```
- Access the interface through your browser at: `http://localhost:8188/loras`
> **Note:** Existing installations automatically migrate the legacy `settings.json` from the plugin folder to the user settings directory the first time you launch this version.
This standalone mode provides a lightweight option for managing your model and recipe collection without needing to run the full ComfyUI environment, making it useful even for users who primarily use other stable diffusion interfaces.
## Testing & Coverage
### Backend
Install the development dependencies and run pytest with coverage reports:
```bash
pip install -r requirements-dev.txt
COVERAGE_FILE=coverage/backend/.coverage pytest \
--cov=py \
--cov=standalone \
--cov-report=term-missing \
--cov-report=html:coverage/backend/html \
--cov-report=xml:coverage/backend/coverage.xml \
--cov-report=json:coverage/backend/coverage.json
```
HTML, XML, and JSON artifacts are stored under `coverage/backend/` so you can inspect hot spots locally or from CI artifacts.
### Frontend
Run the Vitest coverage suite to analyze widget hot spots:
```bash
npm run test:coverage
```
---
## Contributing

View File

@@ -1,12 +1,32 @@
from .py.lora_manager import LoraManager
from .py.nodes.lora_loader import LoraManagerLoader, LoraManagerTextLoader
from .py.nodes.trigger_word_toggle import TriggerWordToggle
from .py.nodes.lora_stacker import LoraStacker
from .py.nodes.save_image import SaveImage
from .py.nodes.debug_metadata import DebugMetadata
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
# Import metadata collector to install hooks on startup
from .py.metadata_collector import init as init_metadata_collector
try: # pragma: no cover - import fallback for pytest collection
from .py.lora_manager import LoraManager
from .py.nodes.lora_loader import LoraManagerLoader, LoraManagerTextLoader
from .py.nodes.trigger_word_toggle import TriggerWordToggle
from .py.nodes.lora_stacker import LoraStacker
from .py.nodes.save_image import SaveImage
from .py.nodes.debug_metadata import DebugMetadata
from .py.nodes.wanvideo_lora_select import WanVideoLoraSelect
from .py.nodes.wanvideo_lora_select_from_text import WanVideoLoraSelectFromText
from .py.metadata_collector import init as init_metadata_collector
except ImportError: # pragma: no cover - allows running under pytest without package install
import importlib
import pathlib
import sys
package_root = pathlib.Path(__file__).resolve().parent
if str(package_root) not in sys.path:
sys.path.append(str(package_root))
LoraManager = importlib.import_module("py.lora_manager").LoraManager
LoraManagerLoader = importlib.import_module("py.nodes.lora_loader").LoraManagerLoader
LoraManagerTextLoader = importlib.import_module("py.nodes.lora_loader").LoraManagerTextLoader
TriggerWordToggle = importlib.import_module("py.nodes.trigger_word_toggle").TriggerWordToggle
LoraStacker = importlib.import_module("py.nodes.lora_stacker").LoraStacker
SaveImage = importlib.import_module("py.nodes.save_image").SaveImage
DebugMetadata = importlib.import_module("py.nodes.debug_metadata").DebugMetadata
WanVideoLoraSelect = importlib.import_module("py.nodes.wanvideo_lora_select").WanVideoLoraSelect
WanVideoLoraSelectFromText = importlib.import_module("py.nodes.wanvideo_lora_select_from_text").WanVideoLoraSelectFromText
init_metadata_collector = importlib.import_module("py.metadata_collector").init
NODE_CLASS_MAPPINGS = {
LoraManagerLoader.NAME: LoraManagerLoader,
@@ -15,7 +35,8 @@ NODE_CLASS_MAPPINGS = {
LoraStacker.NAME: LoraStacker,
SaveImage.NAME: SaveImage,
DebugMetadata.NAME: DebugMetadata,
WanVideoLoraSelect.NAME: WanVideoLoraSelect
WanVideoLoraSelect.NAME: WanVideoLoraSelect,
WanVideoLoraSelectFromText.NAME: WanVideoLoraSelectFromText
}
WEB_DIRECTORY = "./web/comfyui"

180
docs/LM-Extension-Wiki.md Normal file
View File

@@ -0,0 +1,180 @@
## Overview
The **LoRA Manager Civitai Extension** is a Browser extension designed to work seamlessly with [LoRA Manager](https://github.com/willmiao/ComfyUI-Lora-Manager) to significantly enhance your browsing experience on [Civitai](https://civitai.com).
It also supports browsing on [CivArchive](https://civarchive.com/) (formerly CivitaiArchive).
With this extension, you can:
✅ Instantly see which models are already present in your local library
✅ Download new models with a single click
✅ Manage downloads efficiently with queue and parallel download support
✅ Keep your downloaded models automatically organized according to your custom settings
![Civitai Models page](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/wiki-images/civitai-models-page.png)
![CivArchive Models page](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/wiki-images/civarchive-models-page.png)
---
## Why Are All Features for Supporters Only?
I love building tools for the Stable Diffusion and ComfyUI communities, and LoRA Manager is a passion project that I've poured countless hours into. When I created this companion extension, my hope was to offer its core features for free, as a thank-you to all of you.
Unfortunately, I've reached a point where I need to be realistic. The level of support from the free model has been far lower than what's needed to justify the continuous development and maintenance for both projects. It was a difficult decision, but I've chosen to make the extension's features exclusive to supporters.
This change is crucial for me to be able to continue dedicating my time to improving the free and open-source LoRA Manager, which I'm committed to keeping available for everyone.
Your support does more than just unlock a few features—it allows me to keep innovating and ensures the core LoRA Manager project thrives. I'm incredibly grateful for your understanding and any support you can offer. ❤️
(_For those who previously supported me on Ko-fi with a one-time donation, I'll be sending out license keys individually as a thank-you._)
---
## Installation
### Supported Browsers & Installation Methods
| Browser | Installation Method |
|--------------------|-------------------------------------------------------------------------------------|
| **Google Chrome** | [Chrome Web Store link](https://chromewebstore.google.com/detail/capigligggeijgmocnaflanlbghnamgm?utm_source=item-share-cb) |
| **Microsoft Edge** | Install via Chrome Web Store (compatible) |
| **Brave Browser** | Install via Chrome Web Store (compatible) |
| **Opera** | Install via Chrome Web Store (compatible) |
| **Firefox** | <div id="firefox-install" class="install-ok"><a href="https://github.com/willmiao/lm-civitai-extension-firefox/releases/latest/download/extension.xpi">📦 Install Firefox Extension (reviewed and verified by Mozilla)</a></div> |
For non-Chrome browsers (e.g., Microsoft Edge), you can typically install extensions from the Chrome Web Store by following these steps: open the extensions Chrome Web Store page, click 'Get extension', then click 'Allow' when prompted to enable installations from other stores, and finally click 'Add extension' to complete the installation.
---
## Privacy & Security
I understand concerns around browser extensions and privacy, and I want to be fully transparent about how the **LM Civitai Extension** works:
- **Reviewed and Verified**
This extension has been **manually reviewed and approved by the Chrome Web Store**. The Firefox version uses the **exact same code** (only the packaging format differs) and has passed **Mozillas Add-on review**.
- **Minimal Network Access**
The only external server this extension connects to is:
**`https://willmiao.shop`** — used solely for **license validation**.
It does **not collect, transmit, or store any personal or usage data**.
No browsing history, no user IDs, no analytics, no hidden trackers.
- **Local-Only Model Detection**
Model detection and LoRA Manager communication all happen **locally** within your browser, directly interacting with your local LoRA Manager backend.
I value your trust and are committed to keeping your local setup private and secure. If you have any questions, feel free to reach out!
---
## How to Use
After installing the extension, you'll automatically receive a **7-day trial** to explore all features.
When the extension is correctly installed and your license is valid:
- Open **Civitai**, and you'll see visual indicators added by the extension on model cards, showing:
- ✅ Models already present in your local library
- ⬇️ A download button for models not in your library
Clicking the download button adds the corresponding model version to the download queue, waiting to be downloaded. You can set up to **5 models to download simultaneously**.
### Visual Indicators Appear On:
- **Home Page** — Featured models
- **Models Page**
- **Creator Profiles** — If the creator has set their models to be visible
- **Recommended Resources** — On individual model pages
### Version Buttons on Model Pages
On a specific model page, visual indicators also appear on version buttons, showing which versions are already in your local library.
When switching to a specific version by clicking a version button:
- Clicking the download button will open a dropdown:
- Download via **LoRA Manager**
- Download via **Original Download** (browser download)
You can check **Remember my choice** to set your preferred default. You can change this setting anytime in the extension's settings.
![Civitai Model Page](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/wiki-images/civitai-model-page.png)
### Resources on Image Pages (2025-08-05) — now shows in-library indicators for image resources. Import image as recipe coming soon!
![Civitai Image Page](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/wiki-images/civitai-image-page.jpg)
---
## Model Download Location & LoRA Manager Settings
To use the **one-click download function**, you must first set:
- Your **Default LoRAs Root**
- Your **Default Checkpoints Root**
These are set within LoRA Manager's settings.
When everything is configured, downloaded model files will be placed in:
`<Default_Models_Root>/<Base_Model_of_the_Model>/<First_Tag_of_the_Model>`
### Update: Default Path Customization (2025-07-21)
A new setting to customize the default download path has been added in the nightly version. You can now personalize where models are saved when downloading via the LM Civitai Extension.
![Default Path Customization](https://github.com/willmiao/ComfyUI-Lora-Manager/blob/main/wiki-images/default-path-customization.png)
The previous YAML path mapping file will be deprecated—settings will now be unified in settings.json to simplify configuration.
---
## Backend Port Configuration
If your **ComfyUI** or **LoRA Manager** backend is running on a port **other than the default 8188**, you must configure the backend port in the extension's settings.
After correctly setting and saving the port, you'll see in the extension's header area:
- A **Healthy** status with the tooltip: `Connected to LoRA Manager on port xxxx`
---
## Advanced Usage
### Connecting to a Remote LoRA Manager
If your LoRA Manager is running on another computer, you can still connect from your browser using port forwarding.
> **Why can't you set a remote IP directly?**
>
> For privacy and security, the extension only requests access to `http://127.0.0.1/*`. Supporting remote IPs would require much broader permissions, which may be rejected by browser stores and could raise user concerns.
**Solution: Port Forwarding with `socat`**
On your browser computer, run:
`socat TCP-LISTEN:8188,bind=127.0.0.1,fork TCP:REMOTE.IP.ADDRESS.HERE:8188`
- Replace `REMOTE.IP.ADDRESS.HERE` with the IP of the machine running LoRA Manager.
- Adjust the port if needed.
This lets the extension connect to `127.0.0.1:8188` as usual, with traffic forwarded to your remote server.
_Thanks to user **Temikus** for sharing this solution!_
---
## Roadmap
The extension will evolve alongside **LoRA Manager** improvements. Planned features include:
- [x] Support for **additional model types** (e.g., embeddings)
- [ ] One-click **Recipe Import**
- [x] Display of in-library status for all resources in the **Resources Used** section of the image page
- [x] One-click **Auto-organize Models**
**Stay tuned — and thank you for your support!**
---

View File

@@ -0,0 +1,93 @@
# Example image route architecture
The example image routing stack mirrors the layered model route stack described in
[`docs/architecture/model_routes.md`](model_routes.md). HTTP wiring, controller setup,
handler orchestration, and long-running workflows now live in clearly separated modules so
we can extend download/import behaviour without touching the entire feature surface.
```mermaid
graph TD
subgraph HTTP
A[ExampleImagesRouteRegistrar] -->|binds| B[ExampleImagesRoutes controller]
end
subgraph Application
B --> C[ExampleImagesHandlerSet]
C --> D1[Handlers]
D1 --> E1[Use cases]
E1 --> F1[Download manager / processor / file manager]
end
subgraph Side Effects
F1 --> G1[Filesystem]
F1 --> G2[Model metadata]
F1 --> G3[WebSocket progress]
end
```
## Layer responsibilities
| Layer | Module(s) | Responsibility |
| --- | --- | --- |
| Registrar | `py/routes/example_images_route_registrar.py` | Declarative catalogue of every example image endpoint plus helpers that bind them to an `aiohttp` router. Keeps HTTP concerns symmetrical with the model registrar. |
| Controller | `py/routes/example_images_routes.py` | Lazily constructs `ExampleImagesHandlerSet`, injects defaults for the download manager, processor, and file manager, and exposes the registrar-ready mapping just like `BaseModelRoutes`. |
| Handler set | `py/routes/handlers/example_images_handlers.py` | Groups HTTP adapters by concern (downloads, imports/deletes, filesystem access). Each handler translates domain errors into HTTP responses and defers to a use case or utility service. |
| Use cases | `py/services/use_cases/example_images/*.py` | Encapsulate orchestration for downloads and imports. They validate input, translate concurrency/configuration errors, and keep handler logic declarative. |
| Supporting services | `py/utils/example_images_download_manager.py`, `py/utils/example_images_processor.py`, `py/utils/example_images_file_manager.py` | Execute long-running work: pull assets from Civitai, persist uploads, clean metadata, expose filesystem actions with guardrails, and broadcast progress snapshots. |
## Handler responsibilities & invariants
`ExampleImagesHandlerSet` flattens the handler objects into the `{"handler_name": coroutine}`
mapping consumed by the registrar. The table below outlines how each handler collaborates
with the use cases and utilities.
| Handler | Key endpoints | Collaborators | Contracts |
| --- | --- | --- | --- |
| `ExampleImagesDownloadHandler` | `/api/lm/download-example-images`, `/api/lm/example-images-status`, `/api/lm/pause-example-images`, `/api/lm/resume-example-images`, `/api/lm/force-download-example-images` | `DownloadExampleImagesUseCase`, `DownloadManager` | Delegates payload validation and concurrency checks to the use case; progress/status endpoints expose the same snapshot used for WebSocket broadcasts; pause/resume surface `DownloadNotRunningError` as HTTP 400 instead of 500. |
| `ExampleImagesManagementHandler` | `/api/lm/import-example-images`, `/api/lm/delete-example-image` | `ImportExampleImagesUseCase`, `ExampleImagesProcessor` | Multipart uploads are streamed to disk via the use case; validation failures return HTTP 400 with no filesystem side effects; deletion funnels through the processor to prune metadata and cached images consistently. |
| `ExampleImagesFileHandler` | `/api/lm/open-example-images-folder`, `/api/lm/example-image-files`, `/api/lm/has-example-images` | `ExampleImagesFileManager` | Centralises filesystem access, enforcing settings-based root paths and returning HTTP 400/404 for missing configuration or folders; responses always include `success`/`has_images` booleans for UI consumption. |
## Use case boundaries
| Use case | Entry point | Dependencies | Guarantees |
| --- | --- | --- | --- |
| `DownloadExampleImagesUseCase` | `execute(payload)` | `DownloadManager.start_download`, download configuration errors | Raises `DownloadExampleImagesInProgressError` when the manager reports an active job, rewraps configuration errors into `DownloadExampleImagesConfigurationError`, and lets `ExampleImagesDownloadError` bubble as 500s so handlers do not duplicate logging. |
| `ImportExampleImagesUseCase` | `execute(request)` | `ExampleImagesProcessor.import_images`, temporary file helpers | Supports multipart or JSON payloads, normalises file paths into a single list, cleans up temp files even on failure, and maps validation issues to `ImportExampleImagesValidationError` for HTTP 400 responses. |
## Maintaining critical invariants
* **Shared progress snapshots** - The download handler returns the same snapshot built by
`DownloadManager`, guaranteeing parity between HTTP polling endpoints and WebSocket
progress events.
* **Safe filesystem access** - All folder/file actions flow through
`ExampleImagesFileManager`, which validates the configured example image root and ensures
responses never leak absolute paths outside the allowed directory.
* **Metadata hygiene** - Import/delete operations run through `ExampleImagesProcessor`,
which updates model metadata via `MetadataManager` and notifies the relevant scanners so
cache state stays in sync.
## Migration notes
The refactor brings the example image stack in line with the model/recipe stacks:
1. `ExampleImagesRouteRegistrar` now owns the declarative route list. Downstream projects
should rely on `ExampleImagesRoutes.to_route_mapping()` instead of manually wiring
handler callables.
2. `ExampleImagesRoutes` caches its `ExampleImagesHandlerSet` just like
`BaseModelRoutes`. If you previously instantiated handlers directly, inject custom
collaborators via the controller constructor (`download_manager`, `processor`,
`file_manager`) to keep test seams predictable.
3. Tests that mocked `ExampleImagesRoutes.setup_routes` should switch to patching
`DownloadExampleImagesUseCase`/`ImportExampleImagesUseCase` at import time. The handlers
expect those abstractions to surface validation/concurrency errors, and bypassing them
will skip the HTTP-friendly error mapping.
## Extending the stack
1. Add the endpoint to `ROUTE_DEFINITIONS` with a unique `handler_name`.
2. Expose the coroutine on an existing handler class (or create a new handler and extend
`ExampleImagesHandlerSet`).
3. Wire additional services or factories inside `_build_handler_set` on
`ExampleImagesRoutes`, mirroring how the model stack introduces new use cases.
`tests/routes/test_example_images_routes.py` exercises registrar binding, download pause
flows, and import validations. Use it as a template when introducing new handler
collaborators or error mappings.

View File

@@ -0,0 +1,100 @@
# Base model route architecture
The model routing stack now splits HTTP wiring, orchestration logic, and
business rules into discrete layers. The goal is to make it obvious where a
new collaborator should live and which contract it must honour. The diagram
below captures the end-to-end flow for a typical request:
```mermaid
graph TD
subgraph HTTP
A[ModelRouteRegistrar] -->|binds| B[BaseModelRoutes handler proxy]
end
subgraph Application
B --> C[ModelHandlerSet]
C --> D1[Handlers]
D1 --> E1[Use cases]
E1 --> F1[Services / scanners]
end
subgraph Side Effects
F1 --> G1[Cache & metadata]
F1 --> G2[Filesystem]
F1 --> G3[WebSocket state]
end
```
Every box maps to a concrete module:
| Layer | Module(s) | Responsibility |
| --- | --- | --- |
| Registrar | `py/routes/model_route_registrar.py` | Declarative list of routes shared by every model type and helper methods for binding them to an `aiohttp` application. |
| Route controller | `py/routes/base_model_routes.py` | Constructs the handler graph, injects shared services, exposes proxies that surface `503 Service not ready` when the model service has not been attached. |
| Handler set | `py/routes/handlers/model_handlers.py` | Thin HTTP adapters grouped by concern (page rendering, listings, mutations, queries, downloads, CivitAI integration, move operations, auto-organize). |
| Use cases | `py/services/use_cases/*.py` | Encapsulate long-running flows (`DownloadModelUseCase`, `BulkMetadataRefreshUseCase`, `AutoOrganizeUseCase`). They normalise validation errors and concurrency constraints before returning control to the handlers. |
| Services | `py/services/*.py` | Existing services and scanners that mutate caches, write metadata, move files, and broadcast WebSocket updates. |
## Handler responsibilities & contracts
`ModelHandlerSet` flattens the handler objects into the exact callables used by
the registrar. The table below highlights the separation of concerns within
the set and the invariants that must hold after each handler returns.
| Handler | Key endpoints | Collaborators | Contracts |
| --- | --- | --- | --- |
| `ModelPageView` | `/{prefix}` | `SettingsManager`, `server_i18n`, Jinja environment, `service.scanner` | Template is rendered with `is_initializing` flag when caches are cold; i18n filter is registered exactly once per environment instance. |
| `ModelListingHandler` | `/api/lm/{prefix}/list` | `service.get_paginated_data`, `service.format_response` | Listings respect pagination query parameters and cap `page_size` at 100; every item is formatted before response. |
| `ModelManagementHandler` | Mutations (delete, exclude, metadata, preview, tags, rename, bulk delete, duplicate verification) | `ModelLifecycleService`, `MetadataSyncService`, `PreviewAssetService`, `TagUpdateService`, scanner cache/index | Cache state mirrors filesystem changes: deletes prune cache & hash index, preview replacements synchronise metadata and cache NSFW levels, metadata saves trigger cache resort when names change. |
| `ModelQueryHandler` | Read-only queries (top tags, folders, duplicates, metadata, URLs) | Service query helpers & scanner cache | Outputs always wrapped in `{"success": True}` when no error; duplicate/filename grouping omits empty entries; invalid parameters (e.g. missing `model_root`) return HTTP 400. |
| `ModelDownloadHandler` | `/api/lm/download-model`, `/download-model-get`, `/download-progress/{id}`, `/cancel-download-get` | `DownloadModelUseCase`, `DownloadCoordinator`, `WebSocketManager` | Payload validation errors become HTTP 400 without mutating download progress cache; early-access failures surface as HTTP 401; successful downloads cache progress snapshots that back both WebSocket broadcasts and polling endpoints. |
| `ModelCivitaiHandler` | CivitAI metadata routes | `MetadataSyncService`, metadata provider factory, `BulkMetadataRefreshUseCase` | `fetch_all_civitai` streams progress via `WebSocketBroadcastCallback`; version lookups validate model type before returning; local availability fields derive from hash lookups without mutating cache state. |
| `ModelMoveHandler` | `move_model`, `move_models_bulk` | `ModelMoveService` | Moves execute atomically per request; bulk operations aggregate success/failure per file set. |
| `ModelAutoOrganizeHandler` | `/api/lm/{prefix}/auto-organize` (GET/POST), `/auto-organize-progress` | `AutoOrganizeUseCase`, `WebSocketProgressCallback`, `WebSocketManager` | Enforces single-flight execution using the shared lock; progress broadcasts remain available to polling clients until explicitly cleared; conflicts return HTTP 409 with a descriptive error. |
## Use case boundaries
Each use case exposes a narrow asynchronous API that hides the underlying
services. Their error mapping is essential for predictable HTTP responses.
| Use case | Entry point | Dependencies | Guarantees |
| --- | --- | --- | --- |
| `DownloadModelUseCase` | `execute(payload)` | `DownloadCoordinator.schedule_download` | Translates `ValueError` into `DownloadModelValidationError` for HTTP 400, recognises early-access errors (`"401"` in message) and surfaces them as `DownloadModelEarlyAccessError`, forwards success dictionaries untouched. |
| `AutoOrganizeUseCase` | `execute(file_paths, progress_callback)` | `ModelFileService.auto_organize_models`, `WebSocketManager` lock | Guarded by `ws_manager` lock + status checks; raises `AutoOrganizeInProgressError` before invoking the file service when another run is already active. |
| `BulkMetadataRefreshUseCase` | `execute_with_error_handling(progress_callback)` | `MetadataSyncService`, `SettingsManager`, `WebSocketBroadcastCallback` | Iterates through cached models, applies metadata sync, emits progress snapshots that handlers broadcast unchanged. |
## Maintaining legacy contracts
The refactor preserves the invariants called out in the previous architecture
notes. The most critical ones are reiterated here to emphasise the
collaboration points:
1. **Cache mutations** Delete, exclude, rename, and bulk delete operations are
channelled through `ModelManagementHandler`. The handler delegates to
`ModelLifecycleService` or `MetadataSyncService`, and the scanner cache is
mutated in-place before the handler returns. The accompanying tests assert
that `scanner._cache.raw_data` and `scanner._hash_index` stay in sync after
each mutation.
2. **Preview updates** `PreviewAssetService.replace_preview` writes the new
asset, `MetadataSyncService` persists the JSON metadata, and
`scanner.update_preview_in_cache` mirrors the change. The handler returns
the static URL produced by `config.get_preview_static_url`, keeping browser
clients in lockstep with disk state.
3. **Download progress** `DownloadCoordinator.schedule_download` generates the
download identifier, registers a WebSocket progress callback, and caches the
latest numeric progress via `WebSocketManager`. Both `download_model`
responses and `/download-progress/{id}` polling read from the same cache to
guarantee consistent progress reporting across transports.
## Extending the stack
To add a new shared route:
1. Declare it in `COMMON_ROUTE_DEFINITIONS` using a unique handler name.
2. Implement the corresponding coroutine on one of the handlers inside
`ModelHandlerSet` (or introduce a new handler class when the concern does not
fit existing ones).
3. Inject additional dependencies in `BaseModelRoutes._create_handler_set` by
wiring services or use cases through the constructor parameters.
Model-specific routes should continue to be registered inside the subclass
implementation of `setup_specific_routes`, reusing the shared registrar where
possible.

View File

@@ -0,0 +1,34 @@
# Multi-Library Management for Standalone Mode
## Requirements Summary
- **Independent libraries**: In standalone mode, users can maintain multiple libraries, where each library represents a distinct set of model folders (LoRAs, checkpoints, embeddings, etc.). Only one library is active at any given time, but users need a fast way to switch between them.
- **Library-specific settings**: The fields that vary per library are `folder_paths`, `default_lora_root`, `default_checkpoint_root`, and `default_embedding_root` inside `settings.json`.
- **Persistent caches**: Every library must have its own SQLite persistent model cache so that metadata generated for one library does not leak into another.
- **Backward compatibility**: Existing single-library setups should continue to work. When no multi-library configuration is provided, the application should behave exactly as before.
## Proposed Design
1. **Library registry**
- Extend the standalone configuration to hold a list of libraries, each identified by a unique name.
- Each entry stores the folder path configuration plus any library-scoped metadata (e.g. creation time, display name).
- The active library key is stored separately to allow quick switching without rewriting the full config.
2. **Settings management**
- Update `settings_manager` to load and persist the library registry. When a library is activated, hydrate the in-memory settings object with that library's folder configuration.
- Provide helper methods for creating, renaming, and deleting libraries, ensuring validation for duplicate names and path collisions.
- Continue writing the active library settings to `settings.json` for compatibility, while storing the registry in a new section such as `libraries`.
3. **Persistent model cache**
- Derive the SQLite file path from the active library, e.g. `model_cache_<library>.sqlite` or a nested directory structure like `model_cache/<library>/models.sqlite`.
- Update `PersistentModelCache` so it resolves the database path dynamically whenever the active library changes. Ensure connections are closed before switching to avoid locking issues.
- Migrate existing single cache files by treating them as the default library's cache.
4. **Model scanning workflow**
- Modify `ModelScanner` and related services to react to library switches by clearing in-memory caches, re-reading folder paths, and rehydrating metadata from the library-specific SQLite cache.
- Provide API endpoints in standalone mode to list libraries, activate one, and trigger a rescan.
5. **UI/UX considerations**
- In the standalone UI, introduce a library selector component that surfaces available libraries and offers quick switching.
- Offer feedback when switching libraries (e.g. spinner while rescanning) and guard destructive actions with confirmation prompts.
## Implementation Notes
- **Data migration**: On startup, detect if the old `settings.json` structure is present. If so, create a default library entry using the current folder paths and point the active library to it.
- **Thread safety**: Ensure that any long-running scans are cancelled or awaited before switching libraries to prevent race conditions in cache writes.
- **Testing**: Add unit tests for the settings manager to cover library CRUD operations and cache path resolution. Include integration tests that simulate switching libraries and verifying that the correct models are loaded.
- **Documentation**: Update user guides to explain how to define libraries, switch between them, and where the new cache files are stored.
- **Extensibility**: Keep the design open to future per-library settings (e.g. auto-refresh intervals, metadata overrides) by storing library data as objects instead of flat maps.

View File

@@ -0,0 +1,89 @@
# Recipe route architecture
The recipe routing stack now mirrors the modular model route design. HTTP
bindings, controller wiring, handler orchestration, and business rules live in
separate layers so new behaviours can be added without re-threading the entire
feature. The diagram below outlines the flow for a typical request:
```mermaid
graph TD
subgraph HTTP
A[RecipeRouteRegistrar] -->|binds| B[RecipeRoutes controller]
end
subgraph Application
B --> C[RecipeHandlerSet]
C --> D1[Handlers]
D1 --> E1[Use cases]
E1 --> F1[Services / scanners]
end
subgraph Side Effects
F1 --> G1[Cache & fingerprint index]
F1 --> G2[Metadata files]
F1 --> G3[Temporary shares]
end
```
## Layer responsibilities
| Layer | Module(s) | Responsibility |
| --- | --- | --- |
| Registrar | `py/routes/recipe_route_registrar.py` | Declarative list of every recipe endpoint and helper methods that bind them to an `aiohttp` application. |
| Controller | `py/routes/base_recipe_routes.py`, `py/routes/recipe_routes.py` | Lazily resolves scanners/clients from the service registry, wires shared templates/i18n, instantiates `RecipeHandlerSet`, and exposes a `{handler_name: coroutine}` mapping for the registrar. |
| Handler set | `py/routes/handlers/recipe_handlers.py` | Thin HTTP adapters grouped by concern (page view, listings, queries, mutations, sharing). They normalise responses and translate service exceptions into HTTP status codes. |
| Services & scanners | `py/services/recipes/*.py`, `py/services/recipe_scanner.py`, `py/services/service_registry.py` | Concrete business logic: metadata parsing, persistence, sharing, fingerprint/index maintenance, and cache refresh. |
## Handler responsibilities & invariants
`RecipeHandlerSet` flattens purpose-built handler objects into the callables the
registrar binds. Each handler is responsible for a narrow concern and enforces a
set of invariants before returning:
| Handler | Key endpoints | Collaborators | Contracts |
| --- | --- | --- | --- |
| `RecipePageView` | `/loras/recipes` | `SettingsManager`, `server_i18n`, Jinja environment, recipe scanner getter | Template rendered with `is_initializing` flag when caches are still warming; i18n filter registered exactly once per environment instance. |
| `RecipeListingHandler` | `/api/lm/recipes`, `/api/lm/recipe/{id}` | `recipe_scanner.get_paginated_data`, `recipe_scanner.get_recipe_by_id` | Listings respect pagination and search filters; every item receives a `file_url` fallback even when metadata is incomplete; missing recipes become HTTP 404. |
| `RecipeQueryHandler` | Tag/base-model stats, syntax, LoRA lookups | Recipe scanner cache, `format_recipe_file_url` helper | Cache snapshots are reused without forcing refresh; duplicate lookups collapse groups by fingerprint; syntax lookups return helpful errors when LoRAs are absent. |
| `RecipeManagementHandler` | Save, update, reconnect, bulk delete, widget ingest | `RecipePersistenceService`, `RecipeAnalysisService`, recipe scanner | Persistence results propagate HTTP status codes; fingerprint/index updates flow through the scanner before returning; validation errors surface as HTTP 400 without touching disk. |
| `RecipeAnalysisHandler` | Uploaded/local/remote analysis | `RecipeAnalysisService`, `civitai_client`, recipe scanner | Unsupported content types map to HTTP 400; download errors (`RecipeDownloadError`) are not retried; every response includes a `loras` array for client compatibility. |
| `RecipeSharingHandler` | Share + download | `RecipeSharingService`, recipe scanner | Share responses provide a stable download URL and filename; expired shares surface as HTTP 404; downloads stream via `web.FileResponse` with attachment headers. |
## Use case boundaries
The dedicated services encapsulate long-running work so handlers stay thin.
| Use case | Entry point | Dependencies | Guarantees |
| --- | --- | --- | --- |
| `RecipeAnalysisService` | `analyze_uploaded_image`, `analyze_remote_image`, `analyze_local_image`, `analyze_widget_metadata` | `ExifUtils`, `RecipeParserFactory`, downloader factory, optional metadata collector/processor | Normalises missing/invalid payloads into `RecipeValidationError`; generates consistent fingerprint data to keep duplicate detection stable; temporary files are cleaned up after every analysis path. |
| `RecipePersistenceService` | `save_recipe`, `delete_recipe`, `update_recipe`, `reconnect_lora`, `bulk_delete`, `save_recipe_from_widget` | `ExifUtils`, recipe scanner, card preview sizing constants | Writes images/JSON metadata atomically; updates scanner caches and hash indices before returning; recalculates fingerprints whenever LoRA assignments change. |
| `RecipeSharingService` | `share_recipe`, `prepare_download` | `tempfile`, recipe scanner | Copies originals to TTL-managed temp files; metadata lookups re-use the scanner; expired shares trigger cleanup and `RecipeNotFoundError`. |
## Maintaining critical invariants
* **Cache updates** Mutations (`save`, `delete`, `bulk_delete`, `update`) call
back into the recipe scanner to mutate the in-memory cache and fingerprint
index before returning a response. Tests assert that these methods are invoked
even when stubbing persistence.
* **Fingerprint management** `RecipePersistenceService` recomputes
fingerprints whenever LoRA metadata changes and duplicate lookups use those
fingerprints to group recipes. Handlers bubble the resulting IDs so clients
can merge duplicates without an extra fetch.
* **Metadata synchronisation** Saving or reconnecting a recipe updates the
JSON sidecar, refreshes embedded metadata via `ExifUtils`, and instructs the
scanner to resort its cache. Sharing relies on this metadata to generate
filenames and ensure downloads stay in sync with on-disk state.
## Extending the stack
1. Declare the new endpoint in `ROUTE_DEFINITIONS` with a unique handler name.
2. Implement the coroutine on an existing handler or introduce a new handler
class inside `py/routes/handlers/recipe_handlers.py` when the concern does
not fit existing ones.
3. Wire additional collaborators inside
`BaseRecipeRoutes._create_handler_set` (inject new services or factories) and
expose helper getters on the handler owner if the handler needs to share
utilities.
Integration tests in `tests/routes/test_recipe_routes.py` exercise the listing,
mutation, analysis-error, and sharing paths end-to-end, ensuring the controller
and handler wiring remains valid as new capabilities are added.

View File

@@ -0,0 +1,51 @@
# Frontend DOM Fixture Strategy
This guide outlines how to reproduce the markup emitted by the Django templates while running Vitest in jsdom. The aim is to make it straightforward to write integration-style unit tests for managers and UI helpers without having to duplicate template fragments inline.
## Loading Template Markup
Vitest executes inside Node, so we can read the same HTML templates that ship with the extension:
1. Use the helper utilities from `tests/frontend/utils/domFixtures.js` to read files under the `templates/` directory.
2. Mount the returned markup into `document.body` (or any custom container) before importing the module under test so its query selectors resolve correctly.
```js
import { renderTemplate } from '../utils/domFixtures.js'; // adjust the relative path to your spec
beforeEach(() => {
renderTemplate('loras.html', {
dataset: { page: 'loras' }
});
});
```
The helper ensures the dataset is applied to the container, which mirrors how Django sets `data-page` in production.
## Working with Partial Components
Many features are implemented as template partials located under `templates/components/`. When a test only needs a fragment (for example, the progress panel or context menu markup), load the component file directly:
```js
const container = renderTemplate('components/progress_panel.html');
const progressPanel = container.querySelector('#progress-panel');
```
This pattern avoids hand-written fixture strings and keeps the tests aligned with the actual markup.
## Resetting Between Tests
The shared Vitest setup clears `document.body` and storage APIs before each test. If a suite adds additional DOM nodes outside of the body or needs to reset custom attributes mid-test, use `resetDom()` exported from `domFixtures.js`.
```js
import { resetDom } from '../utils/domFixtures.js';
afterEach(() => {
resetDom();
});
```
## Future Enhancements
- Provide typed helpers for injecting mock script tags (e.g., replicating ComfyUI globals).
- Compose higher-level fixtures that mimic specific pages (loras, checkpoints, recipes) once those managers receive dedicated suites.

View File

@@ -0,0 +1,44 @@
# LoRA & Checkpoints Filtering/Sorting Test Matrix
This matrix captures the scenarios that Phase 3 frontend tests should cover for the LoRA and Checkpoint managers. It focuses on how search, filter, sort, and duplicate badge toggles interact so future specs can share fixtures and expectations.
## Scope
- **Components**: `PageControls`, `FilterManager`, `SearchManager`, and `ModelDuplicatesManager` wiring invoked through `CheckpointsPageManager` and `LorasPageManager`.
- **Templates**: `templates/loras.html` and `templates/checkpoints.html` along with shared filter panel and toolbar partials.
- **APIs**: Requests issued through `baseModelApi.fetchModels` (via `resetAndReload`/`refreshModels`) and duplicates badge updates.
## Shared Setup Considerations
1. Render full page templates using `renderLorasPage` / `renderCheckpointsPage` helpers before importing modules so DOM queries resolve.
2. Stub storage helpers (`getStorageItem`, `setStorageItem`, `getSessionItem`, `setSessionItem`) to observe persistence behavior without mutating real storage.
3. Mock `sidebarManager` to capture refresh calls triggered after sort/filter actions.
4. Provide fake API implementations exposing `resetAndReload`, `refreshModels`, `fetchFromCivitai`, `toggleBulkMode`, and `clearCustomFilter` so control events remain asynchronous but deterministic.
5. Supply a minimal `ModelDuplicatesManager` mock exposing `toggleDuplicateMode`, `checkDuplicatesCount`, and `updateDuplicatesBadgeAfterRefresh` to validate duplicate badge wiring.
## Scenario Matrix
| ID | Feature | Scenario | LoRAs Expectations | Checkpoints Expectations | Notes |
| --- | --- | --- | --- | --- | --- |
| F-01 | Search filter | Typing a query updates `pageState.filters.search`, persists to session, and triggers `resetAndReload` on submit | Validate `SearchManager` writes query and reloads via API stub; confirm LoRA cards pass query downstream | Same as LoRAs | Cover `enter` press and clicking search icon |
| F-02 | Tag filter | Selecting a tag chip adds it to filters, applies active styling, and reloads results | Tag stored under `filters.tags`; `FilterManager.applyFilters` persists and triggers `resetAndReload(true)` | Same; ensure base model tag set is scoped to checkpoints dataset | Include removal path |
| F-03 | Base model filter | Toggling base model checkboxes updates `filters.baseModel`, persists, and reloads | Ensure only LoRA-supported models show; toggle multi-select | Ensure SDXL/Flux base models appear as expected | Capture UI state restored from storage on next init |
| F-04 | Favorites-only | Clicking favorites toggle updates session flag and calls `resetAndReload(true)` | Button gains `.active` class and API called | Same | Verify duplicates badge refresh when active |
| F-05 | Sort selection | Changing sort select saves preference (legacy + new format) and reloads | Confirm `PageControls.saveSortPreference` invoked with option and API called | Same with checkpoints-specific defaults | Cover `convertLegacySortFormat` branch |
| F-06 | Filter persistence | Re-initializing manager loads stored filters/sort and updates DOM | Filters pre-populate chips/checkboxes; favorites state restored | Same | Requires simulating repeated construction |
| F-07 | Combined filters | Applying search + tag + base model yields aggregated query params for fetch | Assert API receives merged filter payload | Same | Validate toast messaging for active filters |
| F-08 | Clearing filters | Using "Clear filters" resets state, storage, and reloads list | `FilterManager.clearFilters` empties `filters`, removes active class, shows toast | Same | Ensure favorites-only toggle unaffected |
| F-09 | Duplicate badge toggle | Pressing "Find duplicates" toggles duplicate mode and updates badge counts post-refresh | `ModelDuplicatesManager.toggleDuplicateMode` invoked and badge refresh called after API rebuild | Same plus checkpoint-specific duplicate badge dataset | Connects to future duplicate-specific specs |
| F-10 | Bulk actions menu | Opening bulk dropdown keeps filters intact and closes on outside click | Validate dropdown class toggling and no unintended reload | Same | Guard against regression when dropdown interacts with filters |
## Automation Coverage Status
- ✅ F-01 Search filter, F-02 Tag filter, F-03 Base model filter, F-04 Favorites-only toggle, F-05 Sort selection, and F-09 Duplicate badge toggle are covered by `tests/frontend/components/pageControls.filtering.test.js` for both LoRA and checkpoint pages.
- ⏳ F-06 Filter persistence, F-07 Combined filters, F-08 Clearing filters, and F-10 Bulk actions remain to be automated alongside upcoming bulk mode refinements.
## Coverage Gaps & Follow-Ups
- Write Vitest suites that exercise the matrix for both managers, sharing fixtures through page helpers to avoid duplication.
- Capture API parameter assertions by inspecting `baseModelApi.fetchModels` mocks rather than relying solely on state mutations.
- Add regression cases for legacy storage migrations (old filter keys) once fixtures exist for older payloads.
- Extend duplicate badge coverage with scenarios where `checkDuplicatesCount` signals zero duplicates versus pending calculations.

View File

@@ -0,0 +1,33 @@
# Frontend Automation Testing Roadmap
This roadmap tracks the planned rollout of automated testing for the ComfyUI LoRA Manager frontend. Each phase builds on the infrastructure introduced in this change set and records progress so future contributors can quickly identify the next tasks.
## Phase Overview
| Phase | Goal | Primary Focus | Status | Notes |
| --- | --- | --- | --- | --- |
| Phase 0 | Establish baseline tooling | Add Node test runner, jsdom environment, and seed smoke tests | ✅ Complete | Vitest + jsdom configured, example state tests committed |
| Phase 1 | Cover state management logic | Unit test selectors, derived data helpers, and storage utilities under `static/js/state` and `static/js/utils` | ✅ Complete | Storage helpers and state selectors now exercised via deterministic suites |
| Phase 2 | Test AppCore orchestration | Simulate page bootstrapping, infinite scroll hooks, and manager registration using JSDOM DOM fixtures | ✅ Complete | AppCore initialization + page feature suites now validate manager wiring, infinite scroll hooks, and onboarding gating |
| Phase 3 | Validate page-specific managers | Add focused suites for `loras`, `checkpoints`, `embeddings`, and `recipes` managers covering filtering, sorting, and bulk actions | ✅ Complete | LoRA/checkpoint suites expanded; embeddings + recipes managers now covered with initialization, filtering, and duplicate workflows |
| Phase 4 | Interaction-level regression tests | Exercise template fragments, modals, and menus to ensure UI wiring remains intact | ✅ Complete | Vitest DOM suites cover NSFW selector, recipe modal editing, and global context menus |
| Phase 5 | Continuous integration & coverage | Integrate frontend tests into CI workflow and track coverage metrics | ✅ Complete | CI workflow runs Vitest and aggregates V8 coverage into `coverage/frontend` via a dedicated script |
## Next Steps Checklist
- [x] Expand unit tests for `storageHelpers` covering migrations and namespace behavior.
- [x] Document DOM fixture strategy for reproducing template structures in tests.
- [x] Prototype AppCore initialization test that verifies manager bootstrapping with stubbed dependencies.
- [x] Add AppCore page feature suite exercising context menu creation and infinite scroll registration via DOM fixtures.
- [x] Extend AppCore orchestration tests to cover manager wiring, bulk menu setup, and onboarding gating scenarios.
- [x] Add interaction regression suites for context menus and recipe modals to complete Phase 4.
- [x] Evaluate integrating coverage reporting once test surface grows (> 20 specs).
- [x] Create shared fixtures for the loras and checkpoints pages once dedicated manager suites are added.
- [x] Draft focused test matrix for loras/checkpoints manager filtering and sorting paths ahead of Phase 3.
- [x] Implement LoRAs manager filtering/sorting specs for scenarios F-01F-05 & F-09; queue remaining edge cases after duplicate/bulk flows stabilize.
- [x] Implement checkpoints manager filtering/sorting specs for scenarios F-01F-05 & F-09; cover remaining paths alongside bulk action work.
- [x] Implement checkpoints page manager smoke tests covering initialization and duplicate badge wiring.
- [x] Outline focused checkpoints scenarios (filtering, sorting, duplicate badge toggles) to feed into the shared test matrix.
- [ ] Add duplicate badge regression coverage for zero/pending states after API refreshes.
Maintaining this roadmap alongside code changes will make it easier to append new automated test tasks and update their progress.

28
docs/library-switching.md Normal file
View File

@@ -0,0 +1,28 @@
# Library Switching and Preview Routes
Library switching no longer requires restarting the backend. The preview
thumbnails shown in the UI are now served through a dynamic endpoint that
resolves files against the folders registered for the active library at request
time. This allows the multi-library flow to update model roots without touching
the aiohttp router, so previews remain available immediately after a switch.
## How the dynamic preview endpoint works
* `config.get_preview_static_url()` now returns `/api/lm/previews?path=<encoded>`
for any preview path. The raw filesystem location is URL encoded so that it
can be passed through the query string without leaking directory structure in
the route itself.【F:py/config.py†L398-L404】
* `PreviewRoutes` exposes the `/api/lm/previews` handler which validates the
decoded path against the directories registered for the current library. The
request is rejected if it falls outside those roots or if the file does not
exist.【F:py/routes/preview_routes.py†L5-L21】【F:py/routes/handlers/preview_handlers.py†L9-L48】
* `Config` keeps an up-to-date cache of allowed preview roots. Every time a
library is applied the cache is rebuilt using the declared LoRA, checkpoint
and embedding directories (including symlink targets). The validation logic
checks preview requests against this cache.【F:py/config.py†L51-L68】【F:py/config.py†L180-L248】【F:py/config.py†L332-L346】
Both the ComfyUI runtime (`LoraManager.add_routes`) and the standalone launcher
(`StandaloneLoraManager.add_routes`) register the new preview routes instead of
mounting a static directory per root. Switching libraries therefore works
without restarting the application, and preview URLs generated before or after a
switch continue to resolve correctly.【F:py/lora_manager.py†L21-L82】【F:standalone.py†L302-L315】

1270
locales/de.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/en.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/es.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/fr.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/he.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/ja.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/ko.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/ru.json Normal file

File diff suppressed because it is too large Load Diff

1276
locales/zh-CN.json Normal file

File diff suppressed because it is too large Load Diff

1270
locales/zh-TW.json Normal file

File diff suppressed because it is too large Load Diff

2572
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

15
package.json Normal file
View File

@@ -0,0 +1,15 @@
{
"name": "comfyui-lora-manager-frontend",
"version": "0.1.0",
"private": true,
"type": "module",
"scripts": {
"test": "vitest run",
"test:watch": "vitest",
"test:coverage": "node scripts/run_frontend_coverage.js"
},
"devDependencies": {
"jsdom": "^24.0.0",
"vitest": "^1.6.0"
}
}

View File

@@ -0,0 +1,12 @@
"""Project namespace package."""
# pytest's internal compatibility layer still imports ``py.path.local`` from the
# historical ``py`` dependency. Because this project reuses the ``py`` package
# name, we expose a minimal shim so ``py.path.local`` resolves to ``pathlib.Path``
# during test runs without pulling in the external dependency.
from pathlib import Path
from types import SimpleNamespace
path = SimpleNamespace(local=Path)
__all__ = ["path"]

View File

@@ -1,27 +1,61 @@
import os
import platform
from pathlib import Path
import folder_paths # type: ignore
from typing import List
from typing import Dict, Iterable, List, Mapping, Set
import logging
import sys
import json
import urllib.parse
# Check if running in standalone mode
standalone_mode = 'nodes' not in sys.modules
from .utils.settings_paths import ensure_settings_file
# Use an environment variable to control standalone mode
standalone_mode = os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1" or os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
logger = logging.getLogger(__name__)
def _normalize_folder_paths_for_comparison(
folder_paths: Mapping[str, Iterable[str]]
) -> Dict[str, Set[str]]:
"""Normalize folder paths for comparison across libraries."""
normalized: Dict[str, Set[str]] = {}
for key, values in folder_paths.items():
if isinstance(values, str):
candidate_values: Iterable[str] = [values]
else:
try:
candidate_values = iter(values)
except TypeError:
continue
normalized_values: Set[str] = set()
for value in candidate_values:
if not isinstance(value, str):
continue
stripped = value.strip()
if not stripped:
continue
normalized_values.add(os.path.normcase(os.path.normpath(stripped)))
if normalized_values:
normalized[key] = normalized_values
return normalized
class Config:
"""Global configuration for LoRA Manager"""
def __init__(self):
self.templates_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'templates')
self.static_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static')
self.i18n_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'locales')
# Path mapping dictionary, target to link mapping
self._path_mappings = {}
# Static route mapping dictionary, target to route mapping
self._route_mappings = {}
self._path_mappings: Dict[str, str] = {}
# Normalized preview root directories used to validate preview access
self._preview_root_paths: Set[Path] = set()
self.loras_roots = self._init_lora_paths()
self.checkpoints_roots = None
self.unet_roots = None
@@ -30,45 +64,74 @@ class Config:
self.embeddings_roots = self._init_embedding_paths()
# Scan symbolic links during initialization
self._scan_symbolic_links()
self._rebuild_preview_roots()
if not standalone_mode:
# Save the paths to settings.json when running in ComfyUI mode
self.save_folder_paths_to_settings()
def save_folder_paths_to_settings(self):
"""Save folder paths to settings.json for standalone mode to use later"""
"""Persist ComfyUI-derived folder paths to the multi-library settings."""
try:
# Check if we're running in ComfyUI mode (not standalone)
# Load existing settings
settings_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'settings.json')
settings = {}
if os.path.exists(settings_path):
with open(settings_path, 'r', encoding='utf-8') as f:
settings = json.load(f)
# Update settings with paths
settings['folder_paths'] = {
'loras': self.loras_roots,
'checkpoints': self.checkpoints_roots,
'unet': self.unet_roots,
'embeddings': self.embeddings_roots,
}
# Add default roots if there's only one item and key doesn't exist
if len(self.loras_roots) == 1 and "default_lora_root" not in settings:
settings["default_lora_root"] = self.loras_roots[0]
if self.checkpoints_roots and len(self.checkpoints_roots) == 1 and "default_checkpoint_root" not in settings:
settings["default_checkpoint_root"] = self.checkpoints_roots[0]
ensure_settings_file(logger)
from .services.settings_manager import get_settings_manager
if self.embeddings_roots and len(self.embeddings_roots) == 1 and "default_embedding_root" not in settings:
settings["default_embedding_root"] = self.embeddings_roots[0]
# Save settings
with open(settings_path, 'w', encoding='utf-8') as f:
json.dump(settings, f, indent=2)
logger.info("Saved folder paths to settings.json")
settings_service = get_settings_manager()
libraries = settings_service.get_libraries()
comfy_library = libraries.get("comfyui", {})
default_library = libraries.get("default", {})
target_folder_paths = {
'loras': list(self.loras_roots),
'checkpoints': list(self.checkpoints_roots or []),
'unet': list(self.unet_roots or []),
'embeddings': list(self.embeddings_roots or []),
}
normalized_target_paths = _normalize_folder_paths_for_comparison(target_folder_paths)
if (not comfy_library and default_library and normalized_target_paths and
_normalize_folder_paths_for_comparison(default_library.get("folder_paths", {})) ==
normalized_target_paths):
try:
settings_service.rename_library("default", "comfyui")
logger.info("Renamed legacy 'default' library to 'comfyui'")
libraries = settings_service.get_libraries()
comfy_library = libraries.get("comfyui", {})
except Exception as rename_error:
logger.debug(
"Failed to rename legacy 'default' library: %s", rename_error
)
default_lora_root = comfy_library.get("default_lora_root", "")
if not default_lora_root and len(self.loras_roots) == 1:
default_lora_root = self.loras_roots[0]
default_checkpoint_root = comfy_library.get("default_checkpoint_root", "")
if (not default_checkpoint_root and self.checkpoints_roots and
len(self.checkpoints_roots) == 1):
default_checkpoint_root = self.checkpoints_roots[0]
default_embedding_root = comfy_library.get("default_embedding_root", "")
if (not default_embedding_root and self.embeddings_roots and
len(self.embeddings_roots) == 1):
default_embedding_root = self.embeddings_roots[0]
metadata = dict(comfy_library.get("metadata", {}))
metadata.setdefault("display_name", "ComfyUI")
metadata["source"] = "comfyui"
settings_service.upsert_library(
"comfyui",
folder_paths=target_folder_paths,
default_lora_root=default_lora_root,
default_checkpoint_root=default_checkpoint_root,
default_embedding_root=default_embedding_root,
metadata=metadata,
activate=True,
)
logger.info("Updated 'comfyui' library with current folder paths")
except Exception as e:
logger.warning(f"Failed to save folder paths: {e}")
@@ -125,12 +188,65 @@ class Config:
# Keep the original mapping: target path -> link path
self._path_mappings[normalized_target] = normalized_link
logger.info(f"Added path mapping: {normalized_target} -> {normalized_link}")
self._preview_root_paths.update(self._expand_preview_root(normalized_target))
self._preview_root_paths.update(self._expand_preview_root(normalized_link))
def add_route_mapping(self, path: str, route: str):
"""Add a static route mapping"""
normalized_path = os.path.normpath(path).replace(os.sep, '/')
self._route_mappings[normalized_path] = route
# logger.info(f"Added route mapping: {normalized_path} -> {route}")
def _expand_preview_root(self, path: str) -> Set[Path]:
"""Return normalized ``Path`` objects representing a preview root."""
roots: Set[Path] = set()
if not path:
return roots
try:
raw_path = Path(path).expanduser()
except Exception:
return roots
if raw_path.is_absolute():
roots.add(raw_path)
try:
resolved = raw_path.resolve(strict=False)
except RuntimeError:
resolved = raw_path.absolute()
roots.add(resolved)
try:
real_path = raw_path.resolve()
except (FileNotFoundError, RuntimeError):
real_path = resolved
roots.add(real_path)
normalized: Set[Path] = set()
for candidate in roots:
if candidate.is_absolute():
normalized.add(candidate)
else:
try:
normalized.add(candidate.resolve(strict=False))
except RuntimeError:
normalized.add(candidate.absolute())
return normalized
def _rebuild_preview_roots(self) -> None:
"""Recompute the cache of directories permitted for previews."""
preview_roots: Set[Path] = set()
for root in self.loras_roots or []:
preview_roots.update(self._expand_preview_root(root))
for root in self.base_models_roots or []:
preview_roots.update(self._expand_preview_root(root))
for root in self.embeddings_roots or []:
preview_roots.update(self._expand_preview_root(root))
for target, link in self._path_mappings.items():
preview_roots.update(self._expand_preview_root(target))
preview_roots.update(self._expand_preview_root(link))
self._preview_root_paths = {path for path in preview_roots if path.is_absolute()}
def map_path_to_link(self, path: str) -> str:
"""Map a target path back to its symbolic link path"""
@@ -154,31 +270,93 @@ class Config:
return mapped_path
return link_path
def _dedupe_existing_paths(self, raw_paths: Iterable[str]) -> Dict[str, str]:
dedup: Dict[str, str] = {}
for path in raw_paths:
if not isinstance(path, str):
continue
if not os.path.exists(path):
continue
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
normalized = os.path.normpath(path).replace(os.sep, '/')
if real_path not in dedup:
dedup[real_path] = normalized
return dedup
def _prepare_lora_paths(self, raw_paths: Iterable[str]) -> List[str]:
path_map = self._dedupe_existing_paths(raw_paths)
unique_paths = sorted(path_map.values(), key=lambda p: p.lower())
for original_path in unique_paths:
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
if real_path != original_path:
self.add_path_mapping(original_path, real_path)
return unique_paths
def _prepare_checkpoint_paths(
self, checkpoint_paths: Iterable[str], unet_paths: Iterable[str]
) -> List[str]:
checkpoint_map = self._dedupe_existing_paths(checkpoint_paths)
unet_map = self._dedupe_existing_paths(unet_paths)
merged_map: Dict[str, str] = {}
for real_path, original in {**checkpoint_map, **unet_map}.items():
if real_path not in merged_map:
merged_map[real_path] = original
unique_paths = sorted(merged_map.values(), key=lambda p: p.lower())
checkpoint_values = set(checkpoint_map.values())
unet_values = set(unet_map.values())
self.checkpoints_roots = [p for p in unique_paths if p in checkpoint_values]
self.unet_roots = [p for p in unique_paths if p in unet_values]
for original_path in unique_paths:
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
if real_path != original_path:
self.add_path_mapping(original_path, real_path)
return unique_paths
def _prepare_embedding_paths(self, raw_paths: Iterable[str]) -> List[str]:
path_map = self._dedupe_existing_paths(raw_paths)
unique_paths = sorted(path_map.values(), key=lambda p: p.lower())
for original_path in unique_paths:
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
if real_path != original_path:
self.add_path_mapping(original_path, real_path)
return unique_paths
def _apply_library_paths(self, folder_paths: Mapping[str, Iterable[str]]) -> None:
self._path_mappings.clear()
self._preview_root_paths = set()
lora_paths = folder_paths.get('loras', []) or []
checkpoint_paths = folder_paths.get('checkpoints', []) or []
unet_paths = folder_paths.get('unet', []) or []
embedding_paths = folder_paths.get('embeddings', []) or []
self.loras_roots = self._prepare_lora_paths(lora_paths)
self.base_models_roots = self._prepare_checkpoint_paths(checkpoint_paths, unet_paths)
self.embeddings_roots = self._prepare_embedding_paths(embedding_paths)
self._scan_symbolic_links()
self._rebuild_preview_roots()
def _init_lora_paths(self) -> List[str]:
"""Initialize and validate LoRA paths from ComfyUI settings"""
try:
raw_paths = folder_paths.get_folder_paths("loras")
# Normalize and resolve symlinks, store mapping from resolved -> original
path_map = {}
for path in raw_paths:
if os.path.exists(path):
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
path_map[real_path] = path_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
# Now sort and use only the deduplicated real paths
unique_paths = sorted(path_map.values(), key=lambda p: p.lower())
unique_paths = self._prepare_lora_paths(raw_paths)
logger.info("Found LoRA roots:" + ("\n - " + "\n - ".join(unique_paths) if unique_paths else "[]"))
if not unique_paths:
logger.warning("No valid loras folders found in ComfyUI configuration")
return []
for original_path in unique_paths:
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
if real_path != original_path:
self.add_path_mapping(original_path, real_path)
return unique_paths
except Exception as e:
logger.warning(f"Error initializing LoRA paths: {e}")
@@ -187,52 +365,17 @@ class Config:
def _init_checkpoint_paths(self) -> List[str]:
"""Initialize and validate checkpoint paths from ComfyUI settings"""
try:
# Get checkpoint paths from folder_paths
raw_checkpoint_paths = folder_paths.get_folder_paths("checkpoints")
raw_unet_paths = folder_paths.get_folder_paths("unet")
# Normalize and resolve symlinks for checkpoints, store mapping from resolved -> original
checkpoint_map = {}
for path in raw_checkpoint_paths:
if os.path.exists(path):
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
checkpoint_map[real_path] = checkpoint_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
# Normalize and resolve symlinks for unet, store mapping from resolved -> original
unet_map = {}
for path in raw_unet_paths:
if os.path.exists(path):
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
unet_map[real_path] = unet_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
# Merge both maps and deduplicate by real path
merged_map = {}
for real_path, orig_path in {**checkpoint_map, **unet_map}.items():
if real_path not in merged_map:
merged_map[real_path] = orig_path
unique_paths = self._prepare_checkpoint_paths(raw_checkpoint_paths, raw_unet_paths)
# Now sort and use only the deduplicated real paths
unique_paths = sorted(merged_map.values(), key=lambda p: p.lower())
# Split back into checkpoints and unet roots for class properties
self.checkpoints_roots = [p for p in unique_paths if p in checkpoint_map.values()]
self.unet_roots = [p for p in unique_paths if p in unet_map.values()]
all_paths = unique_paths
logger.info("Found checkpoint roots:" + ("\n - " + "\n - ".join(all_paths) if all_paths else "[]"))
if not all_paths:
logger.info("Found checkpoint roots:" + ("\n - " + "\n - ".join(unique_paths) if unique_paths else "[]"))
if not unique_paths:
logger.warning("No valid checkpoint folders found in ComfyUI configuration")
return []
# Initialize path mappings
for original_path in all_paths:
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
if real_path != original_path:
self.add_path_mapping(original_path, real_path)
return all_paths
return unique_paths
except Exception as e:
logger.warning(f"Error initializing checkpoint paths: {e}")
return []
@@ -241,47 +384,77 @@ class Config:
"""Initialize and validate embedding paths from ComfyUI settings"""
try:
raw_paths = folder_paths.get_folder_paths("embeddings")
# Normalize and resolve symlinks, store mapping from resolved -> original
path_map = {}
for path in raw_paths:
if os.path.exists(path):
real_path = os.path.normpath(os.path.realpath(path)).replace(os.sep, '/')
path_map[real_path] = path_map.get(real_path, path.replace(os.sep, "/")) # preserve first seen
# Now sort and use only the deduplicated real paths
unique_paths = sorted(path_map.values(), key=lambda p: p.lower())
unique_paths = self._prepare_embedding_paths(raw_paths)
logger.info("Found embedding roots:" + ("\n - " + "\n - ".join(unique_paths) if unique_paths else "[]"))
if not unique_paths:
logger.warning("No valid embeddings folders found in ComfyUI configuration")
return []
for original_path in unique_paths:
real_path = os.path.normpath(os.path.realpath(original_path)).replace(os.sep, '/')
if real_path != original_path:
self.add_path_mapping(original_path, real_path)
return unique_paths
except Exception as e:
logger.warning(f"Error initializing embedding paths: {e}")
return []
def get_preview_static_url(self, preview_path: str) -> str:
"""Convert local preview path to static URL"""
if not preview_path:
return ""
real_path = os.path.realpath(preview_path).replace(os.sep, '/')
for path, route in self._route_mappings.items():
if real_path.startswith(path):
relative_path = os.path.relpath(real_path, path).replace(os.sep, '/')
safe_parts = [urllib.parse.quote(part) for part in relative_path.split('/')]
safe_path = '/'.join(safe_parts)
return f'{route}/{safe_path}'
normalized = os.path.normpath(preview_path).replace(os.sep, '/')
encoded_path = urllib.parse.quote(normalized, safe='')
return f'/api/lm/previews?path={encoded_path}'
return ""
def is_preview_path_allowed(self, preview_path: str) -> bool:
"""Return ``True`` if ``preview_path`` is within an allowed directory."""
if not preview_path:
return False
try:
candidate = Path(preview_path).expanduser().resolve(strict=False)
except Exception:
return False
for root in self._preview_root_paths:
try:
candidate.relative_to(root)
return True
except ValueError:
continue
return False
def apply_library_settings(self, library_config: Mapping[str, object]) -> None:
"""Update runtime paths to match the provided library configuration."""
folder_paths = library_config.get('folder_paths') if isinstance(library_config, Mapping) else {}
if not isinstance(folder_paths, Mapping):
folder_paths = {}
self._apply_library_paths(folder_paths)
logger.info(
"Applied library settings with %d lora roots, %d checkpoint roots, and %d embedding roots",
len(self.loras_roots or []),
len(self.base_models_roots or []),
len(self.embeddings_roots or []),
)
def get_library_registry_snapshot(self) -> Dict[str, object]:
"""Return the current library registry and active library name."""
try:
from .services.settings_manager import get_settings_manager
settings_service = get_settings_manager()
libraries = settings_service.get_libraries()
active_library = settings_service.get_active_library_name()
return {
"active_library": active_library,
"libraries": libraries,
}
except Exception as exc: # pragma: no cover - defensive logging
logger.debug("Failed to collect library registry snapshot: %s", exc)
return {"active_library": "", "libraries": {}}
# Global config instance
config = Config()

View File

@@ -2,7 +2,6 @@ import asyncio
import sys
import os
import logging
from pathlib import Path
from server import PromptServer # type: ignore
from .config import config
@@ -11,17 +10,38 @@ from .routes.recipe_routes import RecipeRoutes
from .routes.stats_routes import StatsRoutes
from .routes.update_routes import UpdateRoutes
from .routes.misc_routes import MiscRoutes
from .routes.preview_routes import PreviewRoutes
from .routes.example_images_routes import ExampleImagesRoutes
from .services.service_registry import ServiceRegistry
from .services.settings_manager import settings
from .services.settings_manager import get_settings_manager
from .utils.example_images_migration import ExampleImagesMigration
from .services.websocket_manager import ws_manager
from .services.example_images_cleanup_service import ExampleImagesCleanupService
logger = logging.getLogger(__name__)
# Check if we're in standalone mode
STANDALONE_MODE = 'nodes' not in sys.modules
class _SettingsProxy:
def __init__(self):
self._manager = None
def _resolve(self):
if self._manager is None:
self._manager = get_settings_manager()
return self._manager
def get(self, *args, **kwargs):
return self._resolve().get(*args, **kwargs)
def __getattr__(self, item):
return getattr(self._resolve(), item)
settings = _SettingsProxy()
class LoraManager:
"""Main entry point for LoRA Manager plugin"""
@@ -49,103 +69,18 @@ class LoraManager:
asyncio_logger = logging.getLogger("asyncio")
asyncio_logger.addFilter(ConnectionResetFilter())
added_targets = set() # Track already added target paths
# Add static route for example images if the path exists in settings
example_images_path = settings.get('example_images_path')
logger.info(f"Example images path: {example_images_path}")
if example_images_path and os.path.exists(example_images_path):
app.router.add_static('/example_images_static', example_images_path)
logger.info(f"Added static route for example images: /example_images_static -> {example_images_path}")
# Add static routes for each lora root
for idx, root in enumerate(config.loras_roots, start=1):
preview_path = f'/loras_static/root{idx}/preview'
real_root = root
if root in config._path_mappings.values():
for target, link in config._path_mappings.items():
if link == root:
real_root = target
break
# Add static route for original path
app.router.add_static(preview_path, real_root)
logger.info(f"Added static route {preview_path} -> {real_root}")
# Record route mapping
config.add_route_mapping(real_root, preview_path)
added_targets.add(real_root)
# Add static routes for each checkpoint root
for idx, root in enumerate(config.base_models_roots, start=1):
preview_path = f'/checkpoints_static/root{idx}/preview'
real_root = root
if root in config._path_mappings.values():
for target, link in config._path_mappings.items():
if link == root:
real_root = target
break
# Add static route for original path
app.router.add_static(preview_path, real_root)
logger.info(f"Added static route {preview_path} -> {real_root}")
# Record route mapping
config.add_route_mapping(real_root, preview_path)
added_targets.add(real_root)
# Add static routes for each embedding root
for idx, root in enumerate(config.embeddings_roots, start=1):
preview_path = f'/embeddings_static/root{idx}/preview'
real_root = root
if root in config._path_mappings.values():
for target, link in config._path_mappings.items():
if link == root:
real_root = target
break
# Add static route for original path
app.router.add_static(preview_path, real_root)
logger.info(f"Added static route {preview_path} -> {real_root}")
# Record route mapping
config.add_route_mapping(real_root, preview_path)
added_targets.add(real_root)
# Add static routes for symlink target paths
link_idx = {
'lora': 1,
'checkpoint': 1,
'embedding': 1
}
for target_path, link_path in config._path_mappings.items():
if target_path not in added_targets:
# Determine if this is a checkpoint, lora, or embedding link based on path
is_checkpoint = any(cp_root in link_path for cp_root in config.base_models_roots)
is_checkpoint = is_checkpoint or any(cp_root in target_path for cp_root in config.base_models_roots)
is_embedding = any(emb_root in link_path for emb_root in config.embeddings_roots)
is_embedding = is_embedding or any(emb_root in target_path for emb_root in config.embeddings_roots)
if is_checkpoint:
route_path = f'/checkpoints_static/link_{link_idx["checkpoint"]}/preview'
link_idx["checkpoint"] += 1
elif is_embedding:
route_path = f'/embeddings_static/link_{link_idx["embedding"]}/preview'
link_idx["embedding"] += 1
else:
route_path = f'/loras_static/link_{link_idx["lora"]}/preview'
link_idx["lora"] += 1
try:
app.router.add_static(route_path, Path(target_path).resolve(strict=False))
logger.info(f"Added static route for link target {route_path} -> {target_path}")
config.add_route_mapping(target_path, route_path)
added_targets.add(target_path)
except Exception as e:
logger.warning(f"Failed to add static route on initialization for {target_path}: {e}")
continue
# Add static route for locales JSON files
if os.path.exists(config.i18n_path):
app.router.add_static('/locales', config.i18n_path)
logger.info(f"Added static route for locales: /locales -> {config.i18n_path}")
# Add static route for plugin assets
app.router.add_static('/loras_static', config.static_path)
@@ -161,7 +96,8 @@ class LoraManager:
RecipeRoutes.setup_routes(app)
UpdateRoutes.setup_routes(app)
MiscRoutes.setup_routes(app)
ExampleImagesRoutes.setup_routes(app)
ExampleImagesRoutes.setup_routes(app, ws_manager=ws_manager)
PreviewRoutes.setup_routes(app)
# Setup WebSocket routes that are shared across all model types
app.router.add_get('/ws/fetch-progress', ws_manager.handle_connection)
@@ -185,6 +121,9 @@ class LoraManager:
# Register DownloadManager with ServiceRegistry
await ServiceRegistry.get_download_manager()
from .services.metadata_service import initialize_metadata_providers
await initialize_metadata_providers()
# Initialize WebSocket manager
await ServiceRegistry.get_websocket_manager()
@@ -198,29 +137,188 @@ class LoraManager:
recipe_scanner = await ServiceRegistry.get_recipe_scanner()
# Create low-priority initialization tasks
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init')
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init')
asyncio.create_task(embedding_scanner.initialize_in_background(), name='embedding_cache_init')
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
init_tasks = [
asyncio.create_task(lora_scanner.initialize_in_background(), name='lora_cache_init'),
asyncio.create_task(checkpoint_scanner.initialize_in_background(), name='checkpoint_cache_init'),
asyncio.create_task(embedding_scanner.initialize_in_background(), name='embedding_cache_init'),
asyncio.create_task(recipe_scanner.initialize_in_background(), name='recipe_cache_init')
]
await ExampleImagesMigration.check_and_run_migrations()
logger.info("LoRA Manager: All services initialized and background tasks scheduled")
# Schedule post-initialization tasks to run after scanners complete
asyncio.create_task(
cls._run_post_initialization_tasks(init_tasks),
name='post_init_tasks'
)
logger.debug("LoRA Manager: All services initialized and background tasks scheduled")
except Exception as e:
logger.error(f"LoRA Manager: Error initializing services: {e}", exc_info=True)
@classmethod
async def _run_post_initialization_tasks(cls, init_tasks):
"""Run post-initialization tasks after all scanners complete"""
try:
logger.debug("LoRA Manager: Waiting for scanner initialization to complete...")
# Wait for all scanner initialization tasks to complete
await asyncio.gather(*init_tasks, return_exceptions=True)
logger.debug("LoRA Manager: Scanner initialization completed, starting post-initialization tasks...")
# Run post-initialization tasks
post_tasks = [
asyncio.create_task(cls._cleanup_backup_files(), name='cleanup_bak_files'),
# Add more post-initialization tasks here as needed
# asyncio.create_task(cls._another_post_task(), name='another_task'),
]
# Run all post-initialization tasks
results = await asyncio.gather(*post_tasks, return_exceptions=True)
# Log results
for i, result in enumerate(results):
task_name = post_tasks[i].get_name()
if isinstance(result, Exception):
logger.error(f"Post-initialization task '{task_name}' failed: {result}")
else:
logger.debug(f"Post-initialization task '{task_name}' completed successfully")
logger.debug("LoRA Manager: All post-initialization tasks completed")
except Exception as e:
logger.error(f"LoRA Manager: Error in post-initialization tasks: {e}", exc_info=True)
@classmethod
async def _cleanup_backup_files(cls):
"""Clean up .bak files in all model roots"""
try:
logger.debug("Starting cleanup of .bak files in model directories...")
# Collect all model roots
all_roots = set()
all_roots.update(config.loras_roots)
all_roots.update(config.base_models_roots)
all_roots.update(config.embeddings_roots)
total_deleted = 0
total_size_freed = 0
for root_path in all_roots:
if not os.path.exists(root_path):
continue
try:
deleted_count, size_freed = await cls._cleanup_backup_files_in_directory(root_path)
total_deleted += deleted_count
total_size_freed += size_freed
if deleted_count > 0:
logger.debug(f"Cleaned up {deleted_count} .bak files in {root_path} (freed {size_freed / (1024*1024):.2f} MB)")
except Exception as e:
logger.error(f"Error cleaning up .bak files in {root_path}: {e}")
# Yield control periodically
await asyncio.sleep(0.01)
if total_deleted > 0:
logger.debug(f"Backup cleanup completed: removed {total_deleted} .bak files, freed {total_size_freed / (1024*1024):.2f} MB total")
else:
logger.debug("Backup cleanup completed: no .bak files found")
except Exception as e:
logger.error(f"Error during backup file cleanup: {e}", exc_info=True)
@classmethod
async def _cleanup_backup_files_in_directory(cls, directory_path: str):
"""Clean up .bak files in a specific directory recursively
Args:
directory_path: Path to the directory to clean
Returns:
Tuple[int, int]: (number of files deleted, total size freed in bytes)
"""
deleted_count = 0
size_freed = 0
visited_paths = set()
def cleanup_recursive(path):
nonlocal deleted_count, size_freed
try:
real_path = os.path.realpath(path)
if real_path in visited_paths:
return
visited_paths.add(real_path)
with os.scandir(path) as it:
for entry in it:
try:
if entry.is_file(follow_symlinks=True) and entry.name.endswith('.bak'):
file_size = entry.stat().st_size
os.remove(entry.path)
deleted_count += 1
size_freed += file_size
logger.debug(f"Deleted .bak file: {entry.path}")
elif entry.is_dir(follow_symlinks=True):
cleanup_recursive(entry.path)
except Exception as e:
logger.warning(f"Could not delete .bak file {entry.path}: {e}")
except Exception as e:
logger.error(f"Error scanning directory {path} for .bak files: {e}")
# Run the recursive cleanup in a thread pool to avoid blocking
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, cleanup_recursive, directory_path)
return deleted_count, size_freed
@classmethod
async def _cleanup_example_images_folders(cls):
"""Invoke the example images cleanup service for manual execution."""
try:
service = ExampleImagesCleanupService()
result = await service.cleanup_example_image_folders()
if result.get('success'):
logger.debug(
"Manual example images cleanup completed: moved=%s",
result.get('moved_total'),
)
elif result.get('partial_success'):
logger.warning(
"Manual example images cleanup partially succeeded: moved=%s failures=%s",
result.get('moved_total'),
result.get('move_failures'),
)
else:
logger.debug(
"Manual example images cleanup skipped or failed: %s",
result.get('error', 'no changes'),
)
return result
except Exception as e: # pragma: no cover - defensive guard
logger.error(f"Error during example images cleanup: {e}", exc_info=True)
return {
'success': False,
'error': str(e),
'error_code': 'unexpected_error',
}
@classmethod
async def _cleanup(cls, app):
"""Cleanup resources using ServiceRegistry"""
try:
logger.info("LoRA Manager: Cleaning up services")
# Close CivitaiClient gracefully
civitai_client = await ServiceRegistry.get_service("civitai_client")
if civitai_client:
await civitai_client.close()
logger.info("Closed CivitaiClient connection")
except Exception as e:
logger.error(f"Error during cleanup: {e}", exc_info=True)

View File

@@ -1,9 +1,7 @@
import os
import importlib
import sys
# Check if running in standalone mode
standalone_mode = 'nodes' not in sys.modules
standalone_mode = os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1" or os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
if not standalone_mode:
from .metadata_hook import MetadataHook

View File

@@ -1,9 +1,9 @@
import json
import sys
import os
from .constants import IMAGES
# Check if running in standalone mode
standalone_mode = 'nodes' not in sys.modules
standalone_mode = os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1" or os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
from .constants import MODELS, PROMPTS, SAMPLING, LORAS, SIZE, IS_SAMPLER
@@ -295,7 +295,7 @@ class MetadataProcessor:
"seed": None,
"steps": None,
"cfg_scale": None,
"guidance": None, # Add guidance parameter
# "guidance": None, # Add guidance parameter
"sampler": None,
"scheduler": None,
"checkpoint": None,

View File

@@ -671,6 +671,7 @@ NODE_EXTRACTORS = {
"AdvancedCLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb
"smZ_CLIPTextEncode": CLIPTextEncodeExtractor, # From https://github.com/shiimizu/ComfyUI_smZNodes
"CR_ApplyControlNetStack": CR_ApplyControlNetStackExtractor, # Add CR_ApplyControlNetStack
"PCTextEncode": CLIPTextEncodeExtractor, # From https://github.com/asagi4/comfyui-prompt-control
# Latent
"EmptyLatentImage": ImageSizeExtractor,
# Flux

View File

@@ -0,0 +1 @@
"""Server middleware modules"""

View File

@@ -0,0 +1,53 @@
"""Cache control middleware for ComfyUI server"""
from aiohttp import web
from typing import Callable, Awaitable
# Time in seconds
ONE_HOUR: int = 3600
ONE_DAY: int = 86400
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
".mp4"
)
@web.middleware
async def cache_control(
request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
) -> web.Response:
"""Cache control middleware that sets appropriate cache headers based on file type and response status"""
response: web.Response = await handler(request)
if (
request.path.endswith(".js")
or request.path.endswith(".css")
or request.path.endswith("index.json")
):
response.headers.setdefault("Cache-Control", "no-cache")
return response
# Early return for non-image files - no cache headers needed
if not request.path.lower().endswith(IMG_EXTENSIONS):
return response
# Handle image files
if response.status == 404:
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}")
elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308):
# Success responses and permanent redirects - cache for 1 day
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}")
elif response.status in (302, 303, 307):
# Temporary redirects - no cache
response.headers.setdefault("Cache-Control", "no-cache")
# Note: 304 Not Modified falls through - no cache headers set
return response

View File

@@ -115,7 +115,7 @@ class LoraManagerLoader:
formatted_loras = []
for item in loaded_loras:
parts = item.split(":")
lora_name = parts[0].strip()
lora_name = parts[0]
strength_parts = parts[1].strip().split(",")
if len(strength_parts) > 1:
@@ -165,7 +165,7 @@ class LoraManagerTextLoader:
loras = []
for match in matches:
lora_name = match[0].strip()
lora_name = match[0]
model_strength = float(match[1])
clip_strength = float(match[2]) if match[2] else model_strength

View File

@@ -0,0 +1,127 @@
from comfy.comfy_types import IO
import folder_paths
from ..utils.utils import get_lora_info
from .utils import any_type
import logging
# 初始化日志记录器
logger = logging.getLogger(__name__)
# 定义新节点的类
class WanVideoLoraSelectFromText:
# 节点在UI中显示的名称
NAME = "WanVideo Lora Select From Text (LoraManager)"
# 节点所属的分类
CATEGORY = "Lora Manager/stackers"
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"low_mem_load": ("BOOLEAN", {"default": False, "tooltip": "Load LORA models with less VRAM usage, slower loading. This affects ALL LoRAs, not just the current ones. No effect if merge_loras is False"}),
"merge_lora": ("BOOLEAN", {"default": True, "tooltip": "Merge LoRAs into the model, otherwise they are loaded on the fly. Always disabled for GGUF and scaled fp8 models. This affects ALL LoRAs, not just the current one"}),
"lora_syntax": (IO.STRING, {
"multiline": True,
"defaultInput": True,
"forceInput": True,
"tooltip": "Connect a TEXT output for LoRA syntax: <lora:name:strength>"
}),
},
"optional": {
"prev_lora": ("WANVIDLORA",),
"blocks": ("BLOCKS",)
}
}
RETURN_TYPES = ("WANVIDLORA", IO.STRING, IO.STRING)
RETURN_NAMES = ("lora", "trigger_words", "active_loras")
FUNCTION = "process_loras_from_syntax"
def process_loras_from_syntax(self, lora_syntax, low_mem_load=False, merge_lora=True, **kwargs):
text_to_process = lora_syntax
blocks = kwargs.get('blocks', {})
selected_blocks = blocks.get("selected_blocks", {})
layer_filter = blocks.get("layer_filter", "")
loras_list = []
all_trigger_words = []
active_loras = []
prev_lora = kwargs.get('prev_lora', None)
if prev_lora is not None:
loras_list.extend(prev_lora)
if not merge_lora:
low_mem_load = False
parts = text_to_process.split('<lora:')
for part in parts[1:]:
end_index = part.find('>')
if end_index == -1:
continue
content = part[:end_index]
lora_parts = content.split(':')
lora_name_raw = ""
model_strength = 1.0
clip_strength = 1.0
if len(lora_parts) == 2:
lora_name_raw = lora_parts[0].strip()
try:
model_strength = float(lora_parts[1])
clip_strength = model_strength
except (ValueError, IndexError):
logger.warning(f"Invalid strength for LoRA '{lora_name_raw}'. Skipping.")
continue
elif len(lora_parts) >= 3:
lora_name_raw = lora_parts[0].strip()
try:
model_strength = float(lora_parts[1])
clip_strength = float(lora_parts[2])
except (ValueError, IndexError):
logger.warning(f"Invalid strengths for LoRA '{lora_name_raw}'. Skipping.")
continue
else:
continue
lora_path, trigger_words = get_lora_info(lora_name_raw)
lora_item = {
"path": folder_paths.get_full_path("loras", lora_path),
"strength": model_strength,
"name": lora_path.split(".")[0],
"blocks": selected_blocks,
"layer_filter": layer_filter,
"low_mem_load": low_mem_load,
"merge_loras": merge_lora,
}
loras_list.append(lora_item)
active_loras.append((lora_name_raw, model_strength, clip_strength))
all_trigger_words.extend(trigger_words)
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
formatted_loras = []
for name, model_strength, clip_strength in active_loras:
if abs(model_strength - clip_strength) > 0.001:
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}:{str(clip_strength).strip()}>")
else:
formatted_loras.append(f"<lora:{name}:{str(model_strength).strip()}>")
active_loras_text = " ".join(formatted_loras)
return (loras_list, trigger_words_text, active_loras_text)
NODE_CLASS_MAPPINGS = {
"WanVideoLoraSelectFromText": WanVideoLoraSelectFromText
}
NODE_DISPLAY_NAME_MAPPINGS = {
"WanVideoLoraSelectFromText": "WanVideo Lora Select From Text (LoraManager)"
}

View File

@@ -55,7 +55,7 @@ class RecipeMetadataParser(ABC):
# Unpack the tuple to get the actual data
civitai_info, error_msg = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
if not civitai_info or civitai_info.get("error") == "Model not found":
if not civitai_info or error_msg == "Model not found":
# Model not found or deleted
lora_entry['isDeleted'] = True
lora_entry['thumbnailUrl'] = '/loras_static/images/no-preview.png'

View File

@@ -6,6 +6,7 @@ import logging
from typing import Dict, Any
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
from ...services.metadata_service import get_default_metadata_provider
logger = logging.getLogger(__name__)
@@ -30,6 +31,9 @@ class AutomaticMetadataParser(RecipeMetadataParser):
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from Automatic1111 format"""
try:
# Get metadata provider instead of using civitai_client directly
metadata_provider = await get_default_metadata_provider()
# Split on Negative prompt if it exists
if "Negative prompt:" in user_comment:
parts = user_comment.split('Negative prompt:', 1)
@@ -216,9 +220,9 @@ class AutomaticMetadataParser(RecipeMetadataParser):
}
# Get additional info from Civitai
if civitai_client:
if metadata_provider:
try:
civitai_info = await civitai_client.get_model_version_info(resource.get("modelVersionId"))
civitai_info = await metadata_provider.get_model_version_info(resource.get("modelVersionId"))
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
@@ -271,11 +275,11 @@ class AutomaticMetadataParser(RecipeMetadataParser):
}
# Try to get info from Civitai
if civitai_client:
if metadata_provider:
try:
if lora_hash:
# If we have hash, use it for lookup
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
else:
civitai_info = None

View File

@@ -5,6 +5,7 @@ import logging
from typing import Dict, Any, Union
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
from ...services.metadata_service import get_default_metadata_provider
logger = logging.getLogger(__name__)
@@ -36,12 +37,15 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
Args:
metadata: The metadata from the image (dict)
recipe_scanner: Optional recipe scanner service
civitai_client: Optional Civitai API client
civitai_client: Optional Civitai API client (deprecated, use metadata_provider instead)
Returns:
Dict containing parsed recipe data
"""
try:
# Get metadata provider instead of using civitai_client directly
metadata_provider = await get_default_metadata_provider()
# Initialize result structure
result = {
'base_model': None,
@@ -53,6 +57,14 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
# Track already added LoRAs to prevent duplicates
added_loras = {} # key: model_version_id or hash, value: index in result["loras"]
# Extract hash information from hashes field for LoRA matching
lora_hashes = {}
if "hashes" in metadata and isinstance(metadata["hashes"], dict):
for key, hash_value in metadata["hashes"].items():
if key.startswith("LORA:"):
lora_name = key.replace("LORA:", "")
lora_hashes[lora_name] = hash_value
# Extract prompt and negative prompt
if "prompt" in metadata:
result["gen_params"]["prompt"] = metadata["prompt"]
@@ -77,9 +89,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
# Extract base model information - directly if available
if "baseModel" in metadata:
result["base_model"] = metadata["baseModel"]
elif "Model hash" in metadata and civitai_client:
elif "Model hash" in metadata and metadata_provider:
model_hash = metadata["Model hash"]
model_info = await civitai_client.get_model_by_hash(model_hash)
model_info, error = await metadata_provider.get_model_by_hash(model_hash)
if model_info:
result["base_model"] = model_info.get("baseModel", "")
elif "Model" in metadata and isinstance(metadata.get("resources"), list):
@@ -87,8 +99,8 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
for resource in metadata.get("resources", []):
if resource.get("type") == "model" and resource.get("name") == metadata.get("Model"):
# This is likely the checkpoint model
if civitai_client and resource.get("hash"):
model_info = await civitai_client.get_model_by_hash(resource.get("hash"))
if metadata_provider and resource.get("hash"):
model_info, error = await metadata_provider.get_model_by_hash(resource.get("hash"))
if model_info:
result["base_model"] = model_info.get("baseModel", "")
@@ -101,6 +113,10 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
if resource.get("type", "lora") == "lora":
lora_hash = resource.get("hash", "")
# Try to get hash from the hashes field if not present in resource
if not lora_hash and resource.get("name"):
lora_hash = lora_hashes.get(resource["name"], "")
# Skip LoRAs without proper identification (hash or modelVersionId)
if not lora_hash and not resource.get("modelVersionId"):
logger.debug(f"Skipping LoRA resource '{resource.get('name', 'Unknown')}' - no hash or modelVersionId")
@@ -126,9 +142,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
}
# Try to get info from Civitai if hash is available
if lora_entry['hash'] and civitai_client:
if lora_entry['hash'] and metadata_provider:
try:
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
@@ -182,14 +198,10 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
}
# Try to get info from Civitai if modelVersionId is available
if version_id and civitai_client:
if version_id and metadata_provider:
try:
# Use get_model_version_info instead of get_model_version
civitai_info, error = await civitai_client.get_model_version_info(version_id)
if error:
logger.warning(f"Error getting model version info: {error}")
continue
civitai_info = await metadata_provider.get_model_version_info(version_id)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
@@ -247,35 +259,84 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
'isDeleted': False
}
# If we have a version ID and civitai client, try to get more info
if version_id and civitai_client:
# If we have a version ID and metadata provider, try to get more info
if version_id and metadata_provider:
try:
# Use get_model_version_info with the version ID
civitai_info, error = await civitai_client.get_model_version_info(version_id)
civitai_info = await metadata_provider.get_model_version_info(version_id)
if error:
logger.warning(f"Error getting model version info: {error}")
else:
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts
)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts
)
if populated_entry is None:
continue # Skip invalid LoRA types
if populated_entry is None:
continue # Skip invalid LoRA types
lora_entry = populated_entry
# Track this LoRA for deduplication
if version_id:
added_loras[version_id] = len(result["loras"])
lora_entry = populated_entry
# Track this LoRA for deduplication
if version_id:
added_loras[version_id] = len(result["loras"])
except Exception as e:
logger.error(f"Error fetching Civitai info for model ID {version_id}: {e}")
result["loras"].append(lora_entry)
# If we found LoRA hashes in the metadata but haven't already
# populated entries for them, fall back to creating LoRAs from
# the hashes section. Some Civitai image responses only include
# LoRA information here without explicit resources entries.
for lora_name, lora_hash in lora_hashes.items():
if not lora_hash:
continue
# Skip LoRAs we've already added via resources or other fields
if lora_hash in added_loras:
continue
lora_entry = {
'name': lora_name,
'type': "lora",
'weight': 1.0,
'hash': lora_hash,
'existsLocally': False,
'localPath': None,
'file_name': lora_name,
'thumbnailUrl': '/loras_static/images/no-preview.png',
'baseModel': '',
'size': 0,
'downloadUrl': '',
'isDeleted': False
}
if metadata_provider:
try:
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
civitai_info,
recipe_scanner,
base_model_counts,
lora_hash
)
if populated_entry is None:
continue
lora_entry = populated_entry
if 'id' in lora_entry and lora_entry['id']:
added_loras[str(lora_entry['id'])] = len(result["loras"])
except Exception as e:
logger.error(f"Error fetching Civitai info for LoRA hash {lora_hash}: {e}")
added_loras[lora_hash] = len(result["loras"])
result["loras"].append(lora_entry)
# Check for LoRA info in the format "Lora_0 Model hash", "Lora_0 Model name", etc.
lora_index = 0
while f"Lora_{lora_index} Model hash" in metadata and f"Lora_{lora_index} Model name" in metadata:
@@ -304,9 +365,9 @@ class CivitaiApiMetadataParser(RecipeMetadataParser):
}
# Try to get info from Civitai if hash is available
if lora_entry['hash'] and civitai_client:
if lora_entry['hash'] and metadata_provider:
try:
civitai_info = await civitai_client.get_model_by_hash(lora_hash)
civitai_info = await metadata_provider.get_model_by_hash(lora_hash)
populated_entry = await self.populate_lora_from_civitai(
lora_entry,

View File

@@ -6,6 +6,7 @@ import logging
from typing import Dict, Any
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
from ...services.metadata_service import get_default_metadata_provider
logger = logging.getLogger(__name__)
@@ -26,6 +27,9 @@ class ComfyMetadataParser(RecipeMetadataParser):
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from Civitai ComfyUI metadata format"""
try:
# Get metadata provider instead of using civitai_client directly
metadata_provider = await get_default_metadata_provider()
data = json.loads(user_comment)
loras = []
@@ -73,10 +77,10 @@ class ComfyMetadataParser(RecipeMetadataParser):
'isDeleted': False
}
# Get additional info from Civitai if client is available
if civitai_client:
# Get additional info from Civitai if metadata provider is available
if metadata_provider:
try:
civitai_info_tuple = await civitai_client.get_model_version_info(model_version_id)
civitai_info_tuple = await metadata_provider.get_model_version_info(model_version_id)
# Populate lora entry with Civitai info
populated_entry = await self.populate_lora_from_civitai(
lora_entry,
@@ -116,9 +120,9 @@ class ComfyMetadataParser(RecipeMetadataParser):
}
# Get additional checkpoint info from Civitai
if civitai_client:
if metadata_provider:
try:
civitai_info_tuple = await civitai_client.get_model_version_info(checkpoint_version_id)
civitai_info_tuple = await metadata_provider.get_model_version_info(checkpoint_version_id)
civitai_info, _ = civitai_info_tuple if isinstance(civitai_info_tuple, tuple) else (civitai_info_tuple, None)
# Populate checkpoint with Civitai info
checkpoint = await self.populate_checkpoint_from_civitai(checkpoint, civitai_info)

View File

@@ -5,6 +5,7 @@ import logging
from typing import Dict, Any
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
from ...services.metadata_service import get_default_metadata_provider
logger = logging.getLogger(__name__)
@@ -18,8 +19,11 @@ class MetaFormatParser(RecipeMetadataParser):
return re.search(self.METADATA_MARKER, user_comment, re.IGNORECASE | re.DOTALL) is not None
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from images with meta format metadata"""
"""Parse metadata from images with meta format metadata (Lora_N Model hash format)"""
try:
# Get metadata provider instead of using civitai_client directly
metadata_provider = await get_default_metadata_provider()
# Extract prompt and negative prompt
parts = user_comment.split('Negative prompt:', 1)
prompt = parts[0].strip()
@@ -122,9 +126,9 @@ class MetaFormatParser(RecipeMetadataParser):
}
# Get info from Civitai by hash if available
if civitai_client and hash_value:
if metadata_provider and hash_value:
try:
civitai_info = await civitai_client.get_model_by_hash(hash_value)
civitai_info = await metadata_provider.get_model_by_hash(hash_value)
# Populate lora entry with Civitai info
populated_entry = await self.populate_lora_from_civitai(
lora_entry,

View File

@@ -7,6 +7,7 @@ from typing import Dict, Any
from ...config import config
from ..base import RecipeMetadataParser
from ..constants import GEN_PARAM_KEYS
from ...services.metadata_service import get_default_metadata_provider
logger = logging.getLogger(__name__)
@@ -23,6 +24,9 @@ class RecipeFormatParser(RecipeMetadataParser):
async def parse_metadata(self, user_comment: str, recipe_scanner=None, civitai_client=None) -> Dict[str, Any]:
"""Parse metadata from images with dedicated recipe metadata format"""
try:
# Get metadata provider instead of using civitai_client directly
metadata_provider = await get_default_metadata_provider()
# Extract recipe metadata from user comment
try:
# Look for recipe metadata section
@@ -71,9 +75,9 @@ class RecipeFormatParser(RecipeMetadataParser):
lora_entry['localPath'] = None
# Try to get additional info from Civitai if we have a model version ID
if lora.get('modelVersionId') and civitai_client:
if lora.get('modelVersionId') and metadata_provider:
try:
civitai_info_tuple = await civitai_client.get_model_version_info(lora['modelVersionId'])
civitai_info_tuple = await metadata_provider.get_model_version_info(lora['modelVersionId'])
# Populate lora entry with Civitai info
populated_entry = await self.populate_lora_from_civitai(
lora_entry,

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,217 @@
"""Base infrastructure shared across recipe routes."""
from __future__ import annotations
import logging
import os
from typing import Callable, Mapping
import jinja2
from aiohttp import web
from ..config import config
from ..recipes import RecipeParserFactory
from ..services.downloader import get_downloader
from ..services.recipes import (
RecipeAnalysisService,
RecipePersistenceService,
RecipeSharingService,
)
from ..services.server_i18n import server_i18n
from ..services.service_registry import ServiceRegistry
from ..services.settings_manager import get_settings_manager
from ..utils.constants import CARD_PREVIEW_WIDTH
from ..utils.exif_utils import ExifUtils
from .handlers.recipe_handlers import (
RecipeAnalysisHandler,
RecipeHandlerSet,
RecipeListingHandler,
RecipeManagementHandler,
RecipePageView,
RecipeQueryHandler,
RecipeSharingHandler,
)
from .recipe_route_registrar import ROUTE_DEFINITIONS
logger = logging.getLogger(__name__)
class BaseRecipeRoutes:
"""Common dependency and startup wiring for recipe routes."""
_HANDLER_NAMES: tuple[str, ...] = tuple(
definition.handler_name for definition in ROUTE_DEFINITIONS
)
template_name: str = "recipes.html"
def __init__(self) -> None:
self.recipe_scanner = None
self.lora_scanner = None
self.civitai_client = None
self.settings = get_settings_manager()
self.server_i18n = server_i18n
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.templates_path),
autoescape=True,
)
self._i18n_registered = False
self._startup_hooks_registered = False
self._handler_set: RecipeHandlerSet | None = None
self._handler_mapping: dict[str, Callable] | None = None
async def attach_dependencies(self, app: web.Application | None = None) -> None:
"""Resolve shared services from the registry."""
await self._ensure_services()
self._ensure_i18n_filter()
async def ensure_dependencies_ready(self) -> None:
"""Ensure dependencies are available for request handlers."""
if self.recipe_scanner is None or self.civitai_client is None:
await self.attach_dependencies()
def register_startup_hooks(self, app: web.Application) -> None:
"""Register startup hooks once for dependency wiring."""
if self._startup_hooks_registered:
return
app.on_startup.append(self.attach_dependencies)
app.on_startup.append(self.prewarm_cache)
self._startup_hooks_registered = True
async def prewarm_cache(self, app: web.Application | None = None) -> None:
"""Pre-load recipe and LoRA caches on startup."""
try:
await self.attach_dependencies(app)
if self.lora_scanner is not None:
await self.lora_scanner.get_cached_data()
hash_index = getattr(self.lora_scanner, "_hash_index", None)
if hash_index is not None and hasattr(hash_index, "_hash_to_path"):
_ = len(hash_index._hash_to_path)
if self.recipe_scanner is not None:
await self.recipe_scanner.get_cached_data(force_refresh=True)
except Exception as exc:
logger.error("Error pre-warming recipe cache: %s", exc, exc_info=True)
def to_route_mapping(self) -> Mapping[str, Callable]:
"""Return a mapping of handler name to coroutine for registrar binding."""
if self._handler_mapping is None:
handler_set = self._create_handler_set()
self._handler_set = handler_set
self._handler_mapping = handler_set.to_route_mapping()
return self._handler_mapping
# Internal helpers -------------------------------------------------
async def _ensure_services(self) -> None:
if self.recipe_scanner is None:
self.recipe_scanner = await ServiceRegistry.get_recipe_scanner()
self.lora_scanner = getattr(self.recipe_scanner, "_lora_scanner", None)
if self.civitai_client is None:
self.civitai_client = await ServiceRegistry.get_civitai_client()
def _ensure_i18n_filter(self) -> None:
if not self._i18n_registered:
self.template_env.filters["t"] = self.server_i18n.create_template_filter()
self._i18n_registered = True
def get_handler_owner(self):
"""Return the object supplying bound handler coroutines."""
if self._handler_set is None:
self._handler_set = self._create_handler_set()
return self._handler_set
def _create_handler_set(self) -> RecipeHandlerSet:
recipe_scanner_getter = lambda: self.recipe_scanner
civitai_client_getter = lambda: self.civitai_client
standalone_mode = os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1" or os.environ.get("HF_HUB_DISABLE_TELEMETRY", "0") == "0"
if not standalone_mode:
from ..metadata_collector import get_metadata # type: ignore[import-not-found]
from ..metadata_collector.metadata_processor import ( # type: ignore[import-not-found]
MetadataProcessor,
)
from ..metadata_collector.metadata_registry import ( # type: ignore[import-not-found]
MetadataRegistry,
)
else: # pragma: no cover - optional dependency path
get_metadata = None # type: ignore[assignment]
MetadataProcessor = None # type: ignore[assignment]
MetadataRegistry = None # type: ignore[assignment]
analysis_service = RecipeAnalysisService(
exif_utils=ExifUtils,
recipe_parser_factory=RecipeParserFactory,
downloader_factory=get_downloader,
metadata_collector=get_metadata,
metadata_processor_cls=MetadataProcessor,
metadata_registry_cls=MetadataRegistry,
standalone_mode=standalone_mode,
logger=logger,
)
persistence_service = RecipePersistenceService(
exif_utils=ExifUtils,
card_preview_width=CARD_PREVIEW_WIDTH,
logger=logger,
)
sharing_service = RecipeSharingService(logger=logger)
page_view = RecipePageView(
ensure_dependencies_ready=self.ensure_dependencies_ready,
settings_service=self.settings,
server_i18n=self.server_i18n,
template_env=self.template_env,
template_name=self.template_name,
recipe_scanner_getter=recipe_scanner_getter,
logger=logger,
)
listing = RecipeListingHandler(
ensure_dependencies_ready=self.ensure_dependencies_ready,
recipe_scanner_getter=recipe_scanner_getter,
logger=logger,
)
query = RecipeQueryHandler(
ensure_dependencies_ready=self.ensure_dependencies_ready,
recipe_scanner_getter=recipe_scanner_getter,
format_recipe_file_url=listing.format_recipe_file_url,
logger=logger,
)
management = RecipeManagementHandler(
ensure_dependencies_ready=self.ensure_dependencies_ready,
recipe_scanner_getter=recipe_scanner_getter,
logger=logger,
persistence_service=persistence_service,
analysis_service=analysis_service,
)
analysis = RecipeAnalysisHandler(
ensure_dependencies_ready=self.ensure_dependencies_ready,
recipe_scanner_getter=recipe_scanner_getter,
civitai_client_getter=civitai_client_getter,
logger=logger,
analysis_service=analysis_service,
)
sharing = RecipeSharingHandler(
ensure_dependencies_ready=self.ensure_dependencies_ready,
recipe_scanner_getter=recipe_scanner_getter,
logger=logger,
sharing_service=sharing_service,
)
return RecipeHandlerSet(
page_view=page_view,
listing=listing,
query=query,
management=management,
analysis=analysis,
sharing=sharing,
)

View File

@@ -2,6 +2,7 @@ import logging
from aiohttp import web
from .base_model_routes import BaseModelRoutes
from .model_route_registrar import ModelRouteRegistrar
from ..services.checkpoint_service import CheckpointService
from ..services.service_registry import ServiceRegistry
from ..config import config
@@ -13,19 +14,16 @@ class CheckpointRoutes(BaseModelRoutes):
def __init__(self):
"""Initialize Checkpoint routes with Checkpoint service"""
# Service will be initialized later via setup_routes
self.service = None
self.civitai_client = None
super().__init__()
self.template_name = "checkpoints.html"
async def initialize_services(self):
"""Initialize services from ServiceRegistry"""
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
self.service = CheckpointService(checkpoint_scanner)
self.civitai_client = await ServiceRegistry.get_civitai_client()
# Initialize parent with the service
super().__init__(self.service)
# Attach service dependencies
self.attach_service(self.service)
def setup_routes(self, app: web.Application):
"""Setup Checkpoint routes"""
@@ -35,17 +33,22 @@ class CheckpointRoutes(BaseModelRoutes):
# Setup common routes with 'checkpoints' prefix (includes page route)
super().setup_routes(app, 'checkpoints')
def setup_specific_routes(self, app: web.Application, prefix: str):
def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str):
"""Setup Checkpoint-specific routes"""
# Checkpoint-specific CivitAI integration
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_checkpoint)
# Checkpoint info by name
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_checkpoint_info)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/info/{name}', prefix, self.get_checkpoint_info)
# Checkpoint roots and Unet roots
app.router.add_get(f'/api/{prefix}/checkpoints_roots', self.get_checkpoints_roots)
app.router.add_get(f'/api/{prefix}/unet_roots', self.get_unet_roots)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/checkpoints_roots', prefix, self.get_checkpoints_roots)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/unet_roots', prefix, self.get_unet_roots)
def _validate_civitai_model_type(self, model_type: str) -> bool:
"""Validate CivitAI model type for Checkpoint"""
return model_type.lower() == 'checkpoint'
def _get_expected_model_types(self) -> str:
"""Get expected model types string for error messages"""
return "Checkpoint"
async def get_checkpoint_info(self, request: web.Request) -> web.Response:
"""Get detailed information for a specific checkpoint by name"""
@@ -62,53 +65,6 @@ class CheckpointRoutes(BaseModelRoutes):
logger.error(f"Error in get_checkpoint_info: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
async def get_civitai_versions_checkpoint(self, request: web.Request) -> web.Response:
"""Get available versions for a Civitai checkpoint model with local availability info"""
try:
model_id = request.match_info['model_id']
response = await self.civitai_client.get_model_versions(model_id)
if not response or not response.get('modelVersions'):
return web.Response(status=404, text="Model not found")
versions = response.get('modelVersions', [])
model_type = response.get('type', '')
# Check model type - should be Checkpoint
if model_type.lower() != 'checkpoint':
return web.json_response({
'error': f"Model type mismatch. Expected Checkpoint, got {model_type}"
}, status=400)
# Check local availability for each version
for version in versions:
# Find the primary model file (type="Model" and primary=true) in the files list
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model' and file.get('primary') == True), None)
# If no primary file found, try to find any model file
if not model_file:
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model'), None)
if model_file:
sha256 = model_file.get('hashes', {}).get('SHA256')
if sha256:
# Set existsLocally and localPath at the version level
version['existsLocally'] = self.service.has_hash(sha256)
if version['existsLocally']:
version['localPath'] = self.service.get_path_by_hash(sha256)
# Also set the model file size at the version level for easier access
version['modelSizeKB'] = model_file.get('sizeKB')
else:
# No model file found in this version
version['existsLocally'] = False
return web.json_response(versions)
except Exception as e:
logger.error(f"Error fetching checkpoint model versions: {e}")
return web.Response(status=500, text=str(e))
async def get_checkpoints_roots(self, request: web.Request) -> web.Response:
"""Return the list of checkpoint roots from config"""
try:

View File

@@ -2,6 +2,7 @@ import logging
from aiohttp import web
from .base_model_routes import BaseModelRoutes
from .model_route_registrar import ModelRouteRegistrar
from ..services.embedding_service import EmbeddingService
from ..services.service_registry import ServiceRegistry
@@ -12,19 +13,16 @@ class EmbeddingRoutes(BaseModelRoutes):
def __init__(self):
"""Initialize Embedding routes with Embedding service"""
# Service will be initialized later via setup_routes
self.service = None
self.civitai_client = None
super().__init__()
self.template_name = "embeddings.html"
async def initialize_services(self):
"""Initialize services from ServiceRegistry"""
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
self.service = EmbeddingService(embedding_scanner)
self.civitai_client = await ServiceRegistry.get_civitai_client()
# Initialize parent with the service
super().__init__(self.service)
# Attach service dependencies
self.attach_service(self.service)
def setup_routes(self, app: web.Application):
"""Setup Embedding routes"""
@@ -34,13 +32,18 @@ class EmbeddingRoutes(BaseModelRoutes):
# Setup common routes with 'embeddings' prefix (includes page route)
super().setup_routes(app, 'embeddings')
def setup_specific_routes(self, app: web.Application, prefix: str):
def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str):
"""Setup Embedding-specific routes"""
# Embedding-specific CivitAI integration
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_embedding)
# Embedding info by name
app.router.add_get(f'/api/{prefix}/info/{{name}}', self.get_embedding_info)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/info/{name}', prefix, self.get_embedding_info)
def _validate_civitai_model_type(self, model_type: str) -> bool:
"""Validate CivitAI model type for Embedding"""
return model_type.lower() == 'textualinversion'
def _get_expected_model_types(self) -> str:
"""Get expected model types string for error messages"""
return "TextualInversion"
async def get_embedding_info(self, request: web.Request) -> web.Response:
"""Get detailed information for a specific embedding by name"""
@@ -56,50 +59,3 @@ class EmbeddingRoutes(BaseModelRoutes):
except Exception as e:
logger.error(f"Error in get_embedding_info: {e}", exc_info=True)
return web.json_response({"error": str(e)}, status=500)
async def get_civitai_versions_embedding(self, request: web.Request) -> web.Response:
"""Get available versions for a Civitai embedding model with local availability info"""
try:
model_id = request.match_info['model_id']
response = await self.civitai_client.get_model_versions(model_id)
if not response or not response.get('modelVersions'):
return web.Response(status=404, text="Model not found")
versions = response.get('modelVersions', [])
model_type = response.get('type', '')
# Check model type - should be TextualInversion (Embedding)
if model_type.lower() not in ['textualinversion', 'embedding']:
return web.json_response({
'error': f"Model type mismatch. Expected TextualInversion/Embedding, got {model_type}"
}, status=400)
# Check local availability for each version
for version in versions:
# Find the primary model file (type="Model" and primary=true) in the files list
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model' and file.get('primary') == True), None)
# If no primary file found, try to find any model file
if not model_file:
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model'), None)
if model_file:
sha256 = model_file.get('hashes', {}).get('SHA256')
if sha256:
# Set existsLocally and localPath at the version level
version['existsLocally'] = self.service.has_hash(sha256)
if version['existsLocally']:
version['localPath'] = self.service.get_path_by_hash(sha256)
# Also set the model file size at the version level for easier access
version['modelSizeKB'] = model_file.get('sizeKB')
else:
# No model file found in this version
version['existsLocally'] = False
return web.json_response(versions)
except Exception as e:
logger.error(f"Error fetching embedding model versions: {e}")
return web.Response(status=500, text=str(e))

View File

@@ -0,0 +1,62 @@
"""Route registrar for example image endpoints."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Iterable, Mapping
from aiohttp import web
@dataclass(frozen=True)
class RouteDefinition:
"""Declarative configuration for a HTTP route."""
method: str
path: str
handler_name: str
ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
RouteDefinition("POST", "/api/lm/download-example-images", "download_example_images"),
RouteDefinition("POST", "/api/lm/import-example-images", "import_example_images"),
RouteDefinition("GET", "/api/lm/example-images-status", "get_example_images_status"),
RouteDefinition("POST", "/api/lm/pause-example-images", "pause_example_images"),
RouteDefinition("POST", "/api/lm/resume-example-images", "resume_example_images"),
RouteDefinition("POST", "/api/lm/open-example-images-folder", "open_example_images_folder"),
RouteDefinition("GET", "/api/lm/example-image-files", "get_example_image_files"),
RouteDefinition("GET", "/api/lm/has-example-images", "has_example_images"),
RouteDefinition("POST", "/api/lm/delete-example-image", "delete_example_image"),
RouteDefinition("POST", "/api/lm/force-download-example-images", "force_download_example_images"),
RouteDefinition("POST", "/api/lm/cleanup-example-image-folders", "cleanup_example_image_folders"),
)
class ExampleImagesRouteRegistrar:
"""Bind declarative example image routes to an aiohttp router."""
_METHOD_MAP = {
"GET": "add_get",
"POST": "add_post",
"PUT": "add_put",
"DELETE": "add_delete",
}
def __init__(self, app: web.Application) -> None:
self._app = app
def register_routes(
self,
handler_lookup: Mapping[str, Callable[[web.Request], object]],
*,
definitions: Iterable[RouteDefinition] = ROUTE_DEFINITIONS,
) -> None:
"""Register each route definition using the supplied handlers."""
for definition in definitions:
handler = handler_lookup[definition.handler_name]
self._bind_route(definition.method, definition.path, handler)
def _bind_route(self, method: str, path: str, handler: Callable[[web.Request], object]) -> None:
add_method_name = self._METHOD_MAP[method.upper()]
add_method = getattr(self._app.router, add_method_name)
add_method(path, handler)

View File

@@ -1,74 +1,88 @@
from __future__ import annotations
import logging
from ..utils.example_images_download_manager import DownloadManager
from ..utils.example_images_processor import ExampleImagesProcessor
from typing import Callable, Mapping
from aiohttp import web
from .example_images_route_registrar import ExampleImagesRouteRegistrar
from .handlers.example_images_handlers import (
ExampleImagesDownloadHandler,
ExampleImagesFileHandler,
ExampleImagesHandlerSet,
ExampleImagesManagementHandler,
)
from ..services.use_cases.example_images import (
DownloadExampleImagesUseCase,
ImportExampleImagesUseCase,
)
from ..utils.example_images_download_manager import (
DownloadManager,
get_default_download_manager,
)
from ..utils.example_images_file_manager import ExampleImagesFileManager
from ..services.websocket_manager import ws_manager
from ..utils.example_images_processor import ExampleImagesProcessor
from ..services.example_images_cleanup_service import ExampleImagesCleanupService
logger = logging.getLogger(__name__)
class ExampleImagesRoutes:
"""Routes for example images related functionality"""
@staticmethod
def setup_routes(app):
"""Register example images routes"""
app.router.add_post('/api/download-example-images', ExampleImagesRoutes.download_example_images)
app.router.add_post('/api/import-example-images', ExampleImagesRoutes.import_example_images)
app.router.add_get('/api/example-images-status', ExampleImagesRoutes.get_example_images_status)
app.router.add_post('/api/pause-example-images', ExampleImagesRoutes.pause_example_images)
app.router.add_post('/api/resume-example-images', ExampleImagesRoutes.resume_example_images)
app.router.add_post('/api/open-example-images-folder', ExampleImagesRoutes.open_example_images_folder)
app.router.add_get('/api/example-image-files', ExampleImagesRoutes.get_example_image_files)
app.router.add_get('/api/has-example-images', ExampleImagesRoutes.has_example_images)
app.router.add_post('/api/delete-example-image', ExampleImagesRoutes.delete_example_image)
app.router.add_post('/api/force-download-example-images', ExampleImagesRoutes.force_download_example_images)
"""Route controller for example image endpoints."""
@staticmethod
async def download_example_images(request):
"""Download example images for models from Civitai"""
return await DownloadManager.start_download(request)
def __init__(
self,
*,
ws_manager,
download_manager: DownloadManager | None = None,
processor=ExampleImagesProcessor,
file_manager=ExampleImagesFileManager,
cleanup_service: ExampleImagesCleanupService | None = None,
) -> None:
if ws_manager is None:
raise ValueError("ws_manager is required")
self._download_manager = download_manager or get_default_download_manager(ws_manager)
self._processor = processor
self._file_manager = file_manager
self._cleanup_service = cleanup_service or ExampleImagesCleanupService()
self._handler_set: ExampleImagesHandlerSet | None = None
self._handler_mapping: Mapping[str, Callable[[web.Request], web.StreamResponse]] | None = None
@staticmethod
async def get_example_images_status(request):
"""Get the current status of example images download"""
return await DownloadManager.get_status(request)
@classmethod
def setup_routes(cls, app: web.Application, *, ws_manager) -> None:
"""Register routes on the given aiohttp application using default wiring."""
@staticmethod
async def pause_example_images(request):
"""Pause the example images download"""
return await DownloadManager.pause_download(request)
controller = cls(ws_manager=ws_manager)
controller.register(app)
@staticmethod
async def resume_example_images(request):
"""Resume the example images download"""
return await DownloadManager.resume_download(request)
@staticmethod
async def open_example_images_folder(request):
"""Open the example images folder for a specific model"""
return await ExampleImagesFileManager.open_folder(request)
def register(self, app: web.Application) -> None:
"""Bind the controller's handlers to the aiohttp router."""
@staticmethod
async def get_example_image_files(request):
"""Get list of example image files for a specific model"""
return await ExampleImagesFileManager.get_files(request)
registrar = ExampleImagesRouteRegistrar(app)
registrar.register_routes(self.to_route_mapping())
@staticmethod
async def import_example_images(request):
"""Import local example images for a model"""
return await ExampleImagesProcessor.import_images(request)
@staticmethod
async def has_example_images(request):
"""Check if example images folder exists and is not empty for a model"""
return await ExampleImagesFileManager.has_images(request)
def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], web.StreamResponse]]:
"""Return the registrar-compatible mapping of handler names to callables."""
@staticmethod
async def delete_example_image(request):
"""Delete a custom example image for a model"""
return await ExampleImagesProcessor.delete_custom_image(request)
if self._handler_mapping is None:
handler_set = self._build_handler_set()
self._handler_set = handler_set
self._handler_mapping = handler_set.to_route_mapping()
return self._handler_mapping
@staticmethod
async def force_download_example_images(request):
"""Force download example images for specific models"""
return await DownloadManager.start_force_download(request)
def _build_handler_set(self) -> ExampleImagesHandlerSet:
logger.debug("Building ExampleImagesHandlerSet with %s, %s, %s", self._download_manager, self._processor, self._file_manager)
download_use_case = DownloadExampleImagesUseCase(download_manager=self._download_manager)
download_handler = ExampleImagesDownloadHandler(download_use_case, self._download_manager)
import_use_case = ImportExampleImagesUseCase(processor=self._processor)
management_handler = ExampleImagesManagementHandler(
import_use_case,
self._processor,
self._cleanup_service,
)
file_handler = ExampleImagesFileHandler(self._file_manager)
return ExampleImagesHandlerSet(
download=download_handler,
management=management_handler,
files=file_handler,
)

View File

@@ -0,0 +1,159 @@
"""Handler set for example image routes."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Mapping
from aiohttp import web
from ...services.use_cases.example_images import (
DownloadExampleImagesConfigurationError,
DownloadExampleImagesInProgressError,
DownloadExampleImagesUseCase,
ImportExampleImagesUseCase,
ImportExampleImagesValidationError,
)
from ...utils.example_images_download_manager import (
DownloadConfigurationError,
DownloadInProgressError,
DownloadNotRunningError,
ExampleImagesDownloadError,
)
from ...utils.example_images_processor import ExampleImagesImportError
class ExampleImagesDownloadHandler:
"""HTTP adapters for download-related example image endpoints."""
def __init__(
self,
download_use_case: DownloadExampleImagesUseCase,
download_manager,
) -> None:
self._download_use_case = download_use_case
self._download_manager = download_manager
async def download_example_images(self, request: web.Request) -> web.StreamResponse:
try:
payload = await request.json()
result = await self._download_use_case.execute(payload)
return web.json_response(result)
except DownloadExampleImagesInProgressError as exc:
response = {
'success': False,
'error': str(exc),
'status': exc.progress,
}
return web.json_response(response, status=400)
except DownloadExampleImagesConfigurationError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=400)
except ExampleImagesDownloadError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=500)
async def get_example_images_status(self, request: web.Request) -> web.StreamResponse:
result = await self._download_manager.get_status(request)
return web.json_response(result)
async def pause_example_images(self, request: web.Request) -> web.StreamResponse:
try:
result = await self._download_manager.pause_download(request)
return web.json_response(result)
except DownloadNotRunningError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=400)
async def resume_example_images(self, request: web.Request) -> web.StreamResponse:
try:
result = await self._download_manager.resume_download(request)
return web.json_response(result)
except DownloadNotRunningError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=400)
async def force_download_example_images(self, request: web.Request) -> web.StreamResponse:
try:
payload = await request.json()
result = await self._download_manager.start_force_download(payload)
return web.json_response(result)
except DownloadInProgressError as exc:
response = {
'success': False,
'error': str(exc),
'status': exc.progress_snapshot,
}
return web.json_response(response, status=400)
except DownloadConfigurationError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=400)
except ExampleImagesDownloadError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=500)
class ExampleImagesManagementHandler:
"""HTTP adapters for import/delete endpoints."""
def __init__(self, import_use_case: ImportExampleImagesUseCase, processor, cleanup_service) -> None:
self._import_use_case = import_use_case
self._processor = processor
self._cleanup_service = cleanup_service
async def import_example_images(self, request: web.Request) -> web.StreamResponse:
try:
result = await self._import_use_case.execute(request)
return web.json_response(result)
except ImportExampleImagesValidationError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=400)
except ExampleImagesImportError as exc:
return web.json_response({'success': False, 'error': str(exc)}, status=500)
async def delete_example_image(self, request: web.Request) -> web.StreamResponse:
return await self._processor.delete_custom_image(request)
async def cleanup_example_image_folders(self, request: web.Request) -> web.StreamResponse:
result = await self._cleanup_service.cleanup_example_image_folders()
if result.get('success') or result.get('partial_success'):
return web.json_response(result, status=200)
error_code = result.get('error_code')
status = 400 if error_code in {'path_not_configured', 'path_not_found'} else 500
return web.json_response(result, status=status)
class ExampleImagesFileHandler:
"""HTTP adapters for filesystem-centric endpoints."""
def __init__(self, file_manager) -> None:
self._file_manager = file_manager
async def open_example_images_folder(self, request: web.Request) -> web.StreamResponse:
return await self._file_manager.open_folder(request)
async def get_example_image_files(self, request: web.Request) -> web.StreamResponse:
return await self._file_manager.get_files(request)
async def has_example_images(self, request: web.Request) -> web.StreamResponse:
return await self._file_manager.has_images(request)
@dataclass(frozen=True)
class ExampleImagesHandlerSet:
"""Aggregate of handlers exposed to the registrar."""
download: ExampleImagesDownloadHandler
management: ExampleImagesManagementHandler
files: ExampleImagesFileHandler
def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], web.StreamResponse]]:
"""Flatten handler methods into the registrar mapping."""
return {
"download_example_images": self.download.download_example_images,
"get_example_images_status": self.download.get_example_images_status,
"pause_example_images": self.download.pause_example_images,
"resume_example_images": self.download.resume_example_images,
"force_download_example_images": self.download.force_download_example_images,
"import_example_images": self.management.import_example_images,
"delete_example_image": self.management.delete_example_image,
"cleanup_example_image_folders": self.management.cleanup_example_image_folders,
"open_example_images_folder": self.files.open_example_images_folder,
"get_example_image_files": self.files.get_example_image_files,
"has_example_images": self.files.has_example_images,
}

View File

@@ -0,0 +1,969 @@
"""Handlers for miscellaneous routes.
The legacy :mod:`py.routes.misc_routes` module bundled HTTP wiring and
business logic in a single class. This module mirrors the model route
architecture by splitting the responsibilities into dedicated handler
objects that can be composed by the route controller.
"""
from __future__ import annotations
import asyncio
import logging
import os
import subprocess
import sys
from dataclasses import dataclass
from typing import Awaitable, Callable, Dict, Mapping, Protocol
from aiohttp import web
from ...config import config
from ...services.metadata_service import (
get_metadata_archive_manager,
update_metadata_providers,
)
from ...services.service_registry import ServiceRegistry
from ...services.settings_manager import get_settings_manager
from ...services.websocket_manager import ws_manager
from ...services.downloader import get_downloader
from ...utils.constants import (
CIVITAI_USER_MODEL_TYPES,
DEFAULT_NODE_COLOR,
NODE_TYPES,
SUPPORTED_MEDIA_EXTENSIONS,
VALID_LORA_TYPES,
)
from ...utils.civitai_utils import rewrite_preview_url
from ...utils.example_images_paths import is_valid_example_images_root
from ...utils.lora_metadata import extract_trained_words
from ...utils.usage_stats import UsageStats
logger = logging.getLogger(__name__)
class PromptServerProtocol(Protocol):
"""Subset of PromptServer used by the handlers."""
instance: "PromptServerProtocol"
def send_sync(self, event: str, payload: dict) -> None: # pragma: no cover - protocol
...
class DownloaderProtocol(Protocol):
async def refresh_session(self) -> None: # pragma: no cover - protocol
...
class UsageStatsFactory(Protocol):
def __call__(self) -> UsageStats: # pragma: no cover - protocol
...
class MetadataProviderProtocol(Protocol):
async def get_model_versions(self, model_id: int) -> dict | None: # pragma: no cover - protocol
...
class MetadataArchiveManagerProtocol(Protocol):
async def download_and_extract_database(
self, progress_callback: Callable[[str, str], None]
) -> bool: # pragma: no cover - protocol
...
async def remove_database(self) -> bool: # pragma: no cover - protocol
...
def is_database_available(self) -> bool: # pragma: no cover - protocol
...
def get_database_path(self) -> str | None: # pragma: no cover - protocol
...
class NodeRegistry:
"""Thread-safe registry for tracking LoRA nodes in active workflows."""
def __init__(self) -> None:
self._lock = asyncio.Lock()
self._nodes: Dict[str, dict] = {}
self._registry_updated = asyncio.Event()
async def register_nodes(self, nodes: list[dict]) -> None:
async with self._lock:
self._nodes.clear()
for node in nodes:
node_id = node["node_id"]
graph_id = str(node["graph_id"])
unique_id = f"{graph_id}:{node_id}"
node_type = node.get("type", "")
type_id = NODE_TYPES.get(node_type, 0)
bgcolor = node.get("bgcolor") or DEFAULT_NODE_COLOR
self._nodes[unique_id] = {
"id": node_id,
"graph_id": graph_id,
"graph_name": node.get("graph_name"),
"unique_id": unique_id,
"bgcolor": bgcolor,
"title": node.get("title"),
"type": type_id,
"type_name": node_type,
}
logger.debug("Registered %s nodes in registry", len(nodes))
self._registry_updated.set()
async def get_registry(self) -> dict:
async with self._lock:
return {
"nodes": dict(self._nodes),
"node_count": len(self._nodes),
}
async def wait_for_update(self, timeout: float = 1.0) -> bool:
self._registry_updated.clear()
try:
await asyncio.wait_for(self._registry_updated.wait(), timeout=timeout)
return True
except asyncio.TimeoutError:
return False
class HealthCheckHandler:
async def health_check(self, request: web.Request) -> web.Response:
return web.json_response({"status": "ok"})
class SettingsHandler:
"""Sync settings between backend and frontend."""
_SYNC_KEYS = (
"civitai_api_key",
"default_lora_root",
"default_checkpoint_root",
"default_embedding_root",
"base_model_path_mappings",
"download_path_templates",
"enable_metadata_archive_db",
"language",
"proxy_enabled",
"proxy_type",
"proxy_host",
"proxy_port",
"proxy_username",
"proxy_password",
"example_images_path",
"optimize_example_images",
"auto_download_example_images",
"blur_mature_content",
"autoplay_on_hover",
"display_density",
"card_info_display",
"include_trigger_words",
"show_only_sfw",
"compact_mode",
)
_PROXY_KEYS = {"proxy_enabled", "proxy_host", "proxy_port", "proxy_username", "proxy_password", "proxy_type"}
def __init__(
self,
*,
settings_service=None,
metadata_provider_updater: Callable[[], Awaitable[None]] = update_metadata_providers,
downloader_factory: Callable[[], Awaitable[DownloaderProtocol]] = get_downloader,
) -> None:
self._settings = settings_service or get_settings_manager()
self._metadata_provider_updater = metadata_provider_updater
self._downloader_factory = downloader_factory
async def get_libraries(self, request: web.Request) -> web.Response:
"""Return the registered libraries and the active selection."""
try:
snapshot = config.get_library_registry_snapshot()
libraries = snapshot.get("libraries", {})
active_library = snapshot.get("active_library", "")
return web.json_response(
{
"success": True,
"libraries": libraries,
"active_library": active_library,
}
)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error getting library registry: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_settings(self, request: web.Request) -> web.Response:
try:
response_data = {}
for key in self._SYNC_KEYS:
value = self._settings.get(key)
if value is not None:
response_data[key] = value
return web.json_response({"success": True, "settings": response_data})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error getting settings: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def activate_library(self, request: web.Request) -> web.Response:
"""Activate the selected library."""
try:
data = await request.json()
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error parsing activate library request: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": "Invalid JSON payload"}, status=400)
library_name = data.get("library") or data.get("library_name")
if not isinstance(library_name, str) or not library_name.strip():
return web.json_response(
{"success": False, "error": "Library name is required"}, status=400
)
try:
normalized_name = library_name.strip()
self._settings.activate_library(normalized_name)
snapshot = config.get_library_registry_snapshot()
libraries = snapshot.get("libraries", {})
active_library = snapshot.get("active_library", "")
return web.json_response(
{
"success": True,
"active_library": active_library,
"libraries": libraries,
}
)
except KeyError as exc:
logger.debug("Attempted to activate unknown library '%s'", library_name)
return web.json_response({"success": False, "error": str(exc)}, status=404)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error activating library '%s': %s", library_name, exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def update_settings(self, request: web.Request) -> web.Response:
try:
data = await request.json()
proxy_changed = False
for key, value in data.items():
if value == self._settings.get(key):
continue
if key == "example_images_path" and value:
validation_error = self._validate_example_images_path(value)
if validation_error:
return web.json_response({"success": False, "error": validation_error})
if value == "__DELETE__" and key in ("proxy_username", "proxy_password"):
self._settings.delete(key)
else:
self._settings.set(key, value)
if key == "enable_metadata_archive_db":
await self._metadata_provider_updater()
if key in self._PROXY_KEYS:
proxy_changed = True
if proxy_changed:
downloader = await self._downloader_factory()
await downloader.refresh_session()
return web.json_response({"success": True})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error updating settings: %s", exc, exc_info=True)
return web.Response(status=500, text=str(exc))
def _validate_example_images_path(self, folder_path: str) -> str | None:
if not os.path.exists(folder_path):
return f"Path does not exist: {folder_path}"
if not os.path.isdir(folder_path):
return "Please set a dedicated folder for example images."
if not self._is_dedicated_example_images_folder(folder_path):
return "Please set a dedicated folder for example images."
return None
def _is_dedicated_example_images_folder(self, folder_path: str) -> bool:
return is_valid_example_images_root(folder_path)
class UsageStatsHandler:
def __init__(self, usage_stats_factory: UsageStatsFactory = UsageStats) -> None:
self._usage_stats_factory = usage_stats_factory
async def update_usage_stats(self, request: web.Request) -> web.Response:
try:
data = await request.json()
prompt_id = data.get("prompt_id")
if not prompt_id:
return web.json_response({"success": False, "error": "Missing prompt_id"}, status=400)
usage_stats = self._usage_stats_factory()
await usage_stats.process_execution(prompt_id)
return web.json_response({"success": True})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to update usage stats: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_usage_stats(self, request: web.Request) -> web.Response:
try:
usage_stats = self._usage_stats_factory()
stats = await usage_stats.get_stats()
stats_response = {"success": True, "data": stats, "format_version": 2}
return web.json_response(stats_response)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to get usage stats: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
class LoraCodeHandler:
def __init__(self, prompt_server: type[PromptServerProtocol]) -> None:
self._prompt_server = prompt_server
async def update_lora_code(self, request: web.Request) -> web.Response:
try:
data = await request.json()
node_ids = data.get("node_ids")
lora_code = data.get("lora_code", "")
mode = data.get("mode", "append")
if not lora_code:
return web.json_response({"success": False, "error": "Missing lora_code parameter"}, status=400)
results = []
if node_ids is None:
try:
self._prompt_server.instance.send_sync(
"lora_code_update", {"id": -1, "lora_code": lora_code, "mode": mode}
)
results.append({"node_id": "broadcast", "success": True})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error broadcasting lora code: %s", exc)
results.append({"node_id": "broadcast", "success": False, "error": str(exc)})
else:
for entry in node_ids:
node_identifier = entry
graph_identifier = None
if isinstance(entry, dict):
node_identifier = entry.get("node_id")
graph_identifier = entry.get("graph_id")
if node_identifier is None:
results.append(
{
"node_id": node_identifier,
"graph_id": graph_identifier,
"success": False,
"error": "Missing node_id parameter",
}
)
continue
try:
parsed_node_id = int(node_identifier)
except (TypeError, ValueError):
parsed_node_id = node_identifier
payload = {
"id": parsed_node_id,
"lora_code": lora_code,
"mode": mode,
}
if graph_identifier is not None:
payload["graph_id"] = str(graph_identifier)
try:
self._prompt_server.instance.send_sync(
"lora_code_update",
payload,
)
results.append(
{
"node_id": parsed_node_id,
"graph_id": payload.get("graph_id"),
"success": True,
}
)
except Exception as exc: # pragma: no cover - defensive logging
logger.error(
"Error sending lora code to node %s (graph %s): %s",
parsed_node_id,
graph_identifier,
exc,
)
results.append(
{
"node_id": parsed_node_id,
"graph_id": payload.get("graph_id"),
"success": False,
"error": str(exc),
}
)
return web.json_response({"success": True, "results": results})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to update lora code: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
class TrainedWordsHandler:
async def get_trained_words(self, request: web.Request) -> web.Response:
try:
file_path = request.query.get("file_path")
if not file_path:
return web.json_response({"success": False, "error": "Missing file_path parameter"}, status=400)
if not os.path.exists(file_path):
return web.json_response({"success": False, "error": "File not found"}, status=404)
if not file_path.endswith(".safetensors"):
return web.json_response({"success": False, "error": "File must be a safetensors file"}, status=400)
trained_words, class_tokens = await extract_trained_words(file_path)
return web.json_response(
{
"success": True,
"trained_words": trained_words,
"class_tokens": class_tokens,
}
)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to get trained words: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
class ModelExampleFilesHandler:
async def get_model_example_files(self, request: web.Request) -> web.Response:
try:
model_path = request.query.get("model_path")
if not model_path:
return web.json_response({"success": False, "error": "Missing model_path parameter"}, status=400)
model_dir = os.path.dirname(model_path)
if not os.path.exists(model_dir):
return web.json_response({"success": False, "error": "Model directory not found"}, status=404)
base_name = os.path.splitext(os.path.basename(model_path))[0]
files = []
pattern = f"{base_name}.example."
for file in os.listdir(model_dir):
if not file.startswith(pattern):
continue
file_full_path = os.path.join(model_dir, file)
if not os.path.isfile(file_full_path):
continue
file_ext = os.path.splitext(file)[1].lower()
if file_ext not in SUPPORTED_MEDIA_EXTENSIONS["images"] and file_ext not in SUPPORTED_MEDIA_EXTENSIONS["videos"]:
continue
try:
index = int(file[len(pattern) :].split(".")[0])
except (ValueError, IndexError):
index = float("inf")
static_url = config.get_preview_static_url(file_full_path)
files.append(
{
"name": file,
"path": static_url,
"extension": file_ext,
"is_video": file_ext in SUPPORTED_MEDIA_EXTENSIONS["videos"],
"index": index,
}
)
files.sort(key=lambda item: item["index"])
for file in files:
file.pop("index", None)
return web.json_response({"success": True, "files": files})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to get model example files: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
@dataclass
class ServiceRegistryAdapter:
get_lora_scanner: Callable[[], Awaitable]
get_checkpoint_scanner: Callable[[], Awaitable]
get_embedding_scanner: Callable[[], Awaitable]
class ModelLibraryHandler:
def __init__(self, service_registry: ServiceRegistryAdapter, metadata_provider_factory: Callable[[], Awaitable[MetadataProviderProtocol | None]]) -> None:
self._service_registry = service_registry
self._metadata_provider_factory = metadata_provider_factory
async def check_model_exists(self, request: web.Request) -> web.Response:
try:
model_id_str = request.query.get("modelId")
model_version_id_str = request.query.get("modelVersionId")
if not model_id_str:
return web.json_response({"success": False, "error": "Missing required parameter: modelId"}, status=400)
try:
model_id = int(model_id_str)
except ValueError:
return web.json_response({"success": False, "error": "Parameter modelId must be an integer"}, status=400)
lora_scanner = await self._service_registry.get_lora_scanner()
checkpoint_scanner = await self._service_registry.get_checkpoint_scanner()
embedding_scanner = await self._service_registry.get_embedding_scanner()
if model_version_id_str:
try:
model_version_id = int(model_version_id_str)
except ValueError:
return web.json_response({"success": False, "error": "Parameter modelVersionId must be an integer"}, status=400)
exists = False
model_type = None
if await lora_scanner.check_model_version_exists(model_version_id):
exists = True
model_type = "lora"
elif checkpoint_scanner and await checkpoint_scanner.check_model_version_exists(model_version_id):
exists = True
model_type = "checkpoint"
elif embedding_scanner and await embedding_scanner.check_model_version_exists(model_version_id):
exists = True
model_type = "embedding"
return web.json_response({"success": True, "exists": exists, "modelType": model_type if exists else None})
lora_versions = await lora_scanner.get_model_versions_by_id(model_id)
checkpoint_versions = []
embedding_versions = []
if not lora_versions and checkpoint_scanner:
checkpoint_versions = await checkpoint_scanner.get_model_versions_by_id(model_id)
if not lora_versions and not checkpoint_versions and embedding_scanner:
embedding_versions = await embedding_scanner.get_model_versions_by_id(model_id)
model_type = None
versions = []
if lora_versions:
model_type = "lora"
versions = lora_versions
elif checkpoint_versions:
model_type = "checkpoint"
versions = checkpoint_versions
elif embedding_versions:
model_type = "embedding"
versions = embedding_versions
return web.json_response({"success": True, "modelType": model_type, "versions": versions})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to check model existence: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_model_versions_status(self, request: web.Request) -> web.Response:
try:
model_id_str = request.query.get("modelId")
if not model_id_str:
return web.json_response({"success": False, "error": "Missing required parameter: modelId"}, status=400)
try:
model_id = int(model_id_str)
except ValueError:
return web.json_response({"success": False, "error": "Parameter modelId must be an integer"}, status=400)
metadata_provider = await self._metadata_provider_factory()
if not metadata_provider:
return web.json_response({"success": False, "error": "Metadata provider not available"}, status=503)
response = await metadata_provider.get_model_versions(model_id)
if not response or not response.get("modelVersions"):
return web.json_response({"success": False, "error": "Model not found"}, status=404)
versions = response.get("modelVersions", [])
model_name = response.get("name", "")
model_type = response.get("type", "").lower()
scanner = None
normalized_type = None
if model_type in {"lora", "locon", "dora"}:
scanner = await self._service_registry.get_lora_scanner()
normalized_type = "lora"
elif model_type == "checkpoint":
scanner = await self._service_registry.get_checkpoint_scanner()
normalized_type = "checkpoint"
elif model_type == "textualinversion":
scanner = await self._service_registry.get_embedding_scanner()
normalized_type = "embedding"
else:
return web.json_response({"success": False, "error": f'Model type "{model_type}" is not supported'}, status=400)
if not scanner:
return web.json_response({"success": False, "error": f'Scanner for type "{normalized_type}" is not available'}, status=503)
local_versions = await scanner.get_model_versions_by_id(model_id)
local_version_ids = {version["versionId"] for version in local_versions}
enriched_versions = []
for version in versions:
version_id = version.get("id")
enriched_versions.append(
{
"id": version_id,
"name": version.get("name", ""),
"thumbnailUrl": version.get("images")[0]["url"] if version.get("images") else None,
"inLibrary": version_id in local_version_ids,
}
)
return web.json_response(
{
"success": True,
"modelId": model_id,
"modelName": model_name,
"modelType": model_type,
"versions": enriched_versions,
}
)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to get model versions status: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_civitai_user_models(self, request: web.Request) -> web.Response:
try:
username = request.query.get("username")
if not username:
return web.json_response({"success": False, "error": "Missing required parameter: username"}, status=400)
metadata_provider = await self._metadata_provider_factory()
if not metadata_provider:
return web.json_response({"success": False, "error": "Metadata provider not available"}, status=503)
try:
models = await metadata_provider.get_user_models(username)
except NotImplementedError:
return web.json_response({"success": False, "error": "Metadata provider does not support user model queries"}, status=501)
if models is None:
return web.json_response({"success": False, "error": "Failed to fetch user models"}, status=502)
if not isinstance(models, list):
models = []
lora_scanner = await self._service_registry.get_lora_scanner()
checkpoint_scanner = await self._service_registry.get_checkpoint_scanner()
embedding_scanner = await self._service_registry.get_embedding_scanner()
normalized_allowed_types = {model_type.lower() for model_type in CIVITAI_USER_MODEL_TYPES}
lora_type_aliases = {model_type.lower() for model_type in VALID_LORA_TYPES}
type_scanner_map: Dict[str, object | None] = {
**{alias: lora_scanner for alias in lora_type_aliases},
"checkpoint": checkpoint_scanner,
"textualinversion": embedding_scanner,
}
versions: list[dict] = []
for model in models:
if not isinstance(model, dict):
continue
model_type = str(model.get("type", "")).lower()
if model_type not in normalized_allowed_types:
continue
scanner = type_scanner_map.get(model_type)
if scanner is None:
return web.json_response({"success": False, "error": f'Scanner for type "{model_type}" is not available'}, status=503)
tags_value = model.get("tags")
tags = tags_value if isinstance(tags_value, list) else []
model_id = model.get("id")
try:
model_id_int = int(model_id)
except (TypeError, ValueError):
continue
model_name = model.get("name", "")
versions_data = model.get("modelVersions")
if not isinstance(versions_data, list):
continue
for version in versions_data:
if not isinstance(version, dict):
continue
version_id = version.get("id")
try:
version_id_int = int(version_id)
except (TypeError, ValueError):
continue
images = version.get("images") or []
thumbnail_url = None
if images and isinstance(images, list):
first_image = images[0]
if isinstance(first_image, dict):
raw_url = first_image.get("url")
media_type = first_image.get("type")
rewritten_url, _ = rewrite_preview_url(raw_url, media_type)
thumbnail_url = rewritten_url
in_library = await scanner.check_model_version_exists(version_id_int)
versions.append(
{
"modelId": model_id_int,
"versionId": version_id_int,
"modelName": model_name,
"versionName": version.get("name", ""),
"type": model.get("type"),
"tags": tags,
"baseModel": version.get("baseModel"),
"thumbnailUrl": thumbnail_url,
"inLibrary": in_library,
}
)
return web.json_response({"success": True, "username": username, "versions": versions})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to get Civitai user models: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
class MetadataArchiveHandler:
def __init__(
self,
*,
metadata_archive_manager_factory: Callable[[], Awaitable[MetadataArchiveManagerProtocol]] = get_metadata_archive_manager,
settings_service=None,
metadata_provider_updater: Callable[[], Awaitable[None]] = update_metadata_providers,
) -> None:
self._metadata_archive_manager_factory = metadata_archive_manager_factory
self._settings = settings_service or get_settings_manager()
self._metadata_provider_updater = metadata_provider_updater
async def download_metadata_archive(self, request: web.Request) -> web.Response:
try:
archive_manager = await self._metadata_archive_manager_factory()
download_id = request.query.get("download_id")
def progress_callback(stage: str, message: str) -> None:
data = {"stage": stage, "message": message, "type": "metadata_archive_download"}
if download_id:
asyncio.create_task(ws_manager.broadcast_download_progress(download_id, data))
else:
asyncio.create_task(ws_manager.broadcast(data))
success = await archive_manager.download_and_extract_database(progress_callback)
if success:
self._settings.set("enable_metadata_archive_db", True)
await self._metadata_provider_updater()
return web.json_response({"success": True, "message": "Metadata archive database downloaded and extracted successfully"})
return web.json_response({"success": False, "error": "Failed to download and extract metadata archive database"}, status=500)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error downloading metadata archive: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def remove_metadata_archive(self, request: web.Request) -> web.Response:
try:
archive_manager = await self._metadata_archive_manager_factory()
success = await archive_manager.remove_database()
if success:
self._settings.set("enable_metadata_archive_db", False)
await self._metadata_provider_updater()
return web.json_response({"success": True, "message": "Metadata archive database removed successfully"})
return web.json_response({"success": False, "error": "Failed to remove metadata archive database"}, status=500)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error removing metadata archive: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_metadata_archive_status(self, request: web.Request) -> web.Response:
try:
archive_manager = await self._metadata_archive_manager_factory()
is_available = archive_manager.is_database_available()
is_enabled = self._settings.get("enable_metadata_archive_db", False)
db_size = 0
if is_available:
db_path = archive_manager.get_database_path()
if db_path and os.path.exists(db_path):
db_size = os.path.getsize(db_path)
return web.json_response(
{
"success": True,
"isAvailable": is_available,
"isEnabled": is_enabled,
"databaseSize": db_size,
"databasePath": archive_manager.get_database_path() if is_available else None,
}
)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error getting metadata archive status: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
class FileSystemHandler:
async def open_file_location(self, request: web.Request) -> web.Response:
try:
data = await request.json()
file_path = data.get("file_path")
if not file_path:
return web.json_response({"success": False, "error": "Missing file_path parameter"}, status=400)
file_path = os.path.abspath(file_path)
if not os.path.isfile(file_path):
return web.json_response({"success": False, "error": "File does not exist"}, status=404)
if os.name == "nt":
subprocess.Popen(["explorer", "/select,", file_path])
elif os.name == "posix":
if sys.platform == "darwin":
subprocess.Popen(["open", "-R", file_path])
else:
folder = os.path.dirname(file_path)
subprocess.Popen(["xdg-open", folder])
return web.json_response({"success": True, "message": f"Opened folder and selected file: {file_path}"})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to open file location: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
class NodeRegistryHandler:
def __init__(
self,
node_registry: NodeRegistry,
prompt_server: type[PromptServerProtocol],
*,
standalone_mode: bool,
) -> None:
self._node_registry = node_registry
self._prompt_server = prompt_server
self._standalone_mode = standalone_mode
async def register_nodes(self, request: web.Request) -> web.Response:
try:
data = await request.json()
nodes = data.get("nodes", [])
if not isinstance(nodes, list):
return web.json_response({"success": False, "error": "nodes must be a list"}, status=400)
for index, node in enumerate(nodes):
if not isinstance(node, dict):
return web.json_response({"success": False, "error": f"Node {index} must be an object"}, status=400)
node_id = node.get("node_id")
if node_id is None:
return web.json_response({"success": False, "error": f"Node {index} missing node_id parameter"}, status=400)
graph_id = node.get("graph_id")
if graph_id is None:
return web.json_response({"success": False, "error": f"Node {index} missing graph_id parameter"}, status=400)
graph_name = node.get("graph_name")
try:
node["node_id"] = int(node_id)
except (TypeError, ValueError):
return web.json_response({"success": False, "error": f"Node {index} node_id must be an integer"}, status=400)
node["graph_id"] = str(graph_id)
if graph_name is None:
node["graph_name"] = None
elif isinstance(graph_name, str):
node["graph_name"] = graph_name
else:
node["graph_name"] = str(graph_name)
await self._node_registry.register_nodes(nodes)
return web.json_response({"success": True, "message": f"{len(nodes)} nodes registered successfully"})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to register nodes: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_registry(self, request: web.Request) -> web.Response:
try:
if self._standalone_mode:
logger.warning("Registry refresh not available in standalone mode")
return web.json_response(
{
"success": False,
"error": "Standalone Mode Active",
"message": "Cannot interact with ComfyUI in standalone mode.",
},
status=503,
)
try:
self._prompt_server.instance.send_sync("lora_registry_refresh", {})
logger.debug("Sent registry refresh request to frontend")
except Exception as exc:
logger.error("Failed to send registry refresh message: %s", exc)
return web.json_response(
{
"success": False,
"error": "Communication Error",
"message": f"Failed to communicate with ComfyUI frontend: {exc}",
},
status=500,
)
registry_updated = await self._node_registry.wait_for_update(timeout=1.0)
if not registry_updated:
logger.warning("Registry refresh timeout after 1 second")
return web.json_response(
{
"success": False,
"error": "Timeout Error",
"message": "Registry refresh timeout - ComfyUI frontend may not be responsive",
},
status=408,
)
registry_info = await self._node_registry.get_registry()
return web.json_response({"success": True, "data": registry_info})
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Failed to get registry: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": "Internal Error", "message": str(exc)}, status=500)
class MiscHandlerSet:
"""Aggregate handlers into a lookup compatible with the registrar."""
def __init__(
self,
*,
health: HealthCheckHandler,
settings: SettingsHandler,
usage_stats: UsageStatsHandler,
lora_code: LoraCodeHandler,
trained_words: TrainedWordsHandler,
model_examples: ModelExampleFilesHandler,
node_registry: NodeRegistryHandler,
model_library: ModelLibraryHandler,
metadata_archive: MetadataArchiveHandler,
filesystem: FileSystemHandler,
) -> None:
self.health = health
self.settings = settings
self.usage_stats = usage_stats
self.lora_code = lora_code
self.trained_words = trained_words
self.model_examples = model_examples
self.node_registry = node_registry
self.model_library = model_library
self.metadata_archive = metadata_archive
self.filesystem = filesystem
def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], Awaitable[web.StreamResponse]]]:
return {
"health_check": self.health.health_check,
"get_settings": self.settings.get_settings,
"update_settings": self.settings.update_settings,
"get_settings_libraries": self.settings.get_libraries,
"activate_library": self.settings.activate_library,
"update_usage_stats": self.usage_stats.update_usage_stats,
"get_usage_stats": self.usage_stats.get_usage_stats,
"update_lora_code": self.lora_code.update_lora_code,
"get_trained_words": self.trained_words.get_trained_words,
"get_model_example_files": self.model_examples.get_model_example_files,
"register_nodes": self.node_registry.register_nodes,
"get_registry": self.node_registry.get_registry,
"check_model_exists": self.model_library.check_model_exists,
"get_civitai_user_models": self.model_library.get_civitai_user_models,
"download_metadata_archive": self.metadata_archive.download_metadata_archive,
"remove_metadata_archive": self.metadata_archive.remove_metadata_archive,
"get_metadata_archive_status": self.metadata_archive.get_metadata_archive_status,
"get_model_versions_status": self.model_library.get_model_versions_status,
"open_file_location": self.filesystem.open_file_location,
}
def build_service_registry_adapter() -> ServiceRegistryAdapter:
return ServiceRegistryAdapter(
get_lora_scanner=ServiceRegistry.get_lora_scanner,
get_checkpoint_scanner=ServiceRegistry.get_checkpoint_scanner,
get_embedding_scanner=ServiceRegistry.get_embedding_scanner,
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,56 @@
"""Handlers responsible for serving preview assets dynamically."""
from __future__ import annotations
import logging
import urllib.parse
from pathlib import Path
from aiohttp import web
from ...config import config as global_config
logger = logging.getLogger(__name__)
class PreviewHandler:
"""Serve preview assets for the active library at request time."""
def __init__(self, *, config=global_config) -> None:
self._config = config
async def serve_preview(self, request: web.Request) -> web.StreamResponse:
"""Return the preview file referenced by the encoded ``path`` query."""
raw_path = request.query.get("path", "")
if not raw_path:
raise web.HTTPBadRequest(text="Missing 'path' query parameter")
try:
decoded_path = urllib.parse.unquote(raw_path)
except Exception as exc: # pragma: no cover - defensive guard
logger.debug("Failed to decode preview path %s: %s", raw_path, exc)
raise web.HTTPBadRequest(text="Invalid preview path encoding") from exc
normalized = decoded_path.replace("\\", "/")
candidate = Path(normalized)
try:
resolved = candidate.expanduser().resolve(strict=False)
except Exception as exc:
logger.debug("Failed to resolve preview path %s: %s", normalized, exc)
raise web.HTTPBadRequest(text="Unable to resolve preview path") from exc
resolved_str = str(resolved)
if not self._config.is_preview_path_allowed(resolved_str):
logger.debug("Rejected preview outside allowed roots: %s", resolved_str)
raise web.HTTPForbidden(text="Preview path is not within an allowed directory")
if not resolved.is_file():
logger.debug("Preview file not found at %s", resolved_str)
raise web.HTTPNotFound(text="Preview file not found")
# aiohttp's FileResponse handles range requests and content headers for us.
return web.FileResponse(path=resolved, chunk_size=256 * 1024)
__all__ = ["PreviewHandler"]

View File

@@ -0,0 +1,723 @@
"""Dedicated handler objects for recipe-related routes."""
from __future__ import annotations
import json
import logging
import os
from dataclasses import dataclass
from typing import Any, Awaitable, Callable, Dict, Mapping, Optional
from aiohttp import web
from ...config import config
from ...services.server_i18n import server_i18n as default_server_i18n
from ...services.settings_manager import SettingsManager
from ...services.recipes import (
RecipeAnalysisService,
RecipeDownloadError,
RecipeNotFoundError,
RecipePersistenceService,
RecipeSharingService,
RecipeValidationError,
)
Logger = logging.Logger
EnsureDependenciesCallable = Callable[[], Awaitable[None]]
RecipeScannerGetter = Callable[[], Any]
CivitaiClientGetter = Callable[[], Any]
@dataclass(frozen=True)
class RecipeHandlerSet:
"""Group of handlers providing recipe route implementations."""
page_view: "RecipePageView"
listing: "RecipeListingHandler"
query: "RecipeQueryHandler"
management: "RecipeManagementHandler"
analysis: "RecipeAnalysisHandler"
sharing: "RecipeSharingHandler"
def to_route_mapping(self) -> Mapping[str, Callable[[web.Request], Awaitable[web.StreamResponse]]]:
"""Expose handler coroutines keyed by registrar handler names."""
return {
"render_page": self.page_view.render_page,
"list_recipes": self.listing.list_recipes,
"get_recipe": self.listing.get_recipe,
"analyze_uploaded_image": self.analysis.analyze_uploaded_image,
"analyze_local_image": self.analysis.analyze_local_image,
"save_recipe": self.management.save_recipe,
"delete_recipe": self.management.delete_recipe,
"get_top_tags": self.query.get_top_tags,
"get_base_models": self.query.get_base_models,
"share_recipe": self.sharing.share_recipe,
"download_shared_recipe": self.sharing.download_shared_recipe,
"get_recipe_syntax": self.query.get_recipe_syntax,
"update_recipe": self.management.update_recipe,
"reconnect_lora": self.management.reconnect_lora,
"find_duplicates": self.query.find_duplicates,
"bulk_delete": self.management.bulk_delete,
"save_recipe_from_widget": self.management.save_recipe_from_widget,
"get_recipes_for_lora": self.query.get_recipes_for_lora,
"scan_recipes": self.query.scan_recipes,
}
class RecipePageView:
"""Render the recipe shell page."""
def __init__(
self,
*,
ensure_dependencies_ready: EnsureDependenciesCallable,
settings_service: SettingsManager,
server_i18n=default_server_i18n,
template_env,
template_name: str,
recipe_scanner_getter: RecipeScannerGetter,
logger: Logger,
) -> None:
self._ensure_dependencies_ready = ensure_dependencies_ready
self._settings = settings_service
self._server_i18n = server_i18n
self._template_env = template_env
self._template_name = template_name
self._recipe_scanner_getter = recipe_scanner_getter
self._logger = logger
async def render_page(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None: # pragma: no cover - defensive guard
raise RuntimeError("Recipe scanner not available")
user_language = self._settings.get("language", "en")
self._server_i18n.set_locale(user_language)
try:
await recipe_scanner.get_cached_data(force_refresh=False)
rendered = self._template_env.get_template(self._template_name).render(
recipes=[],
is_initializing=False,
settings=self._settings,
request=request,
t=self._server_i18n.get_translation,
)
except Exception as cache_error: # pragma: no cover - logging path
self._logger.error("Error loading recipe cache data: %s", cache_error)
rendered = self._template_env.get_template(self._template_name).render(
is_initializing=True,
settings=self._settings,
request=request,
t=self._server_i18n.get_translation,
)
return web.Response(text=rendered, content_type="text/html")
except Exception as exc: # pragma: no cover - logging path
self._logger.error("Error handling recipes request: %s", exc, exc_info=True)
return web.Response(text="Error loading recipes page", status=500)
class RecipeListingHandler:
"""Provide listing and detail APIs for recipes."""
def __init__(
self,
*,
ensure_dependencies_ready: EnsureDependenciesCallable,
recipe_scanner_getter: RecipeScannerGetter,
logger: Logger,
) -> None:
self._ensure_dependencies_ready = ensure_dependencies_ready
self._recipe_scanner_getter = recipe_scanner_getter
self._logger = logger
async def list_recipes(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
page = int(request.query.get("page", "1"))
page_size = int(request.query.get("page_size", "20"))
sort_by = request.query.get("sort_by", "date")
search = request.query.get("search")
search_options = {
"title": request.query.get("search_title", "true").lower() == "true",
"tags": request.query.get("search_tags", "true").lower() == "true",
"lora_name": request.query.get("search_lora_name", "true").lower() == "true",
"lora_model": request.query.get("search_lora_model", "true").lower() == "true",
}
filters: Dict[str, list[str]] = {}
base_models = request.query.get("base_models")
if base_models:
filters["base_model"] = base_models.split(",")
tags = request.query.get("tags")
if tags:
filters["tags"] = tags.split(",")
lora_hash = request.query.get("lora_hash")
result = await recipe_scanner.get_paginated_data(
page=page,
page_size=page_size,
sort_by=sort_by,
search=search,
filters=filters,
search_options=search_options,
lora_hash=lora_hash,
)
for item in result.get("items", []):
file_path = item.get("file_path")
if file_path:
item["file_url"] = self.format_recipe_file_url(file_path)
else:
item.setdefault("file_url", "/loras_static/images/no-preview.png")
item.setdefault("loras", [])
item.setdefault("base_model", "")
return web.json_response(result)
except Exception as exc:
self._logger.error("Error retrieving recipes: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def get_recipe(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
recipe_id = request.match_info["recipe_id"]
recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
if not recipe:
return web.json_response({"error": "Recipe not found"}, status=404)
return web.json_response(recipe)
except Exception as exc:
self._logger.error("Error retrieving recipe details: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
def format_recipe_file_url(self, file_path: str) -> str:
try:
normalized_path = os.path.normpath(file_path)
static_url = config.get_preview_static_url(normalized_path)
if static_url:
return static_url
except Exception as exc: # pragma: no cover - logging path
self._logger.error("Error formatting recipe file URL: %s", exc, exc_info=True)
return "/loras_static/images/no-preview.png"
return "/loras_static/images/no-preview.png"
class RecipeQueryHandler:
"""Provide read-only insights on recipe data."""
def __init__(
self,
*,
ensure_dependencies_ready: EnsureDependenciesCallable,
recipe_scanner_getter: RecipeScannerGetter,
format_recipe_file_url: Callable[[str], str],
logger: Logger,
) -> None:
self._ensure_dependencies_ready = ensure_dependencies_ready
self._recipe_scanner_getter = recipe_scanner_getter
self._format_recipe_file_url = format_recipe_file_url
self._logger = logger
async def get_top_tags(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
limit = int(request.query.get("limit", "20"))
cache = await recipe_scanner.get_cached_data()
tag_counts: Dict[str, int] = {}
for recipe in getattr(cache, "raw_data", []):
for tag in recipe.get("tags", []) or []:
tag_counts[tag] = tag_counts.get(tag, 0) + 1
sorted_tags = [{"tag": tag, "count": count} for tag, count in tag_counts.items()]
sorted_tags.sort(key=lambda entry: entry["count"], reverse=True)
return web.json_response({"success": True, "tags": sorted_tags[:limit]})
except Exception as exc:
self._logger.error("Error retrieving top tags: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_base_models(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
cache = await recipe_scanner.get_cached_data()
base_model_counts: Dict[str, int] = {}
for recipe in getattr(cache, "raw_data", []):
base_model = recipe.get("base_model")
if base_model:
base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
sorted_models = [{"name": model, "count": count} for model, count in base_model_counts.items()]
sorted_models.sort(key=lambda entry: entry["count"], reverse=True)
return web.json_response({"success": True, "base_models": sorted_models})
except Exception as exc:
self._logger.error("Error retrieving base models: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_recipes_for_lora(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
lora_hash = request.query.get("hash")
if not lora_hash:
return web.json_response({"success": False, "error": "Lora hash is required"}, status=400)
matching_recipes = await recipe_scanner.get_recipes_for_lora(lora_hash)
return web.json_response({"success": True, "recipes": matching_recipes})
except Exception as exc:
self._logger.error("Error getting recipes for Lora: %s", exc)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def scan_recipes(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
self._logger.info("Manually triggering recipe cache rebuild")
await recipe_scanner.get_cached_data(force_refresh=True)
return web.json_response({"success": True, "message": "Recipe cache refreshed successfully"})
except Exception as exc:
self._logger.error("Error refreshing recipe cache: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def find_duplicates(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
duplicate_groups = await recipe_scanner.find_all_duplicate_recipes()
response_data = []
for fingerprint, recipe_ids in duplicate_groups.items():
if len(recipe_ids) <= 1:
continue
recipes = []
for recipe_id in recipe_ids:
recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
if recipe:
recipes.append(
{
"id": recipe.get("id"),
"title": recipe.get("title"),
"file_url": recipe.get("file_url")
or self._format_recipe_file_url(recipe.get("file_path", "")),
"modified": recipe.get("modified"),
"created_date": recipe.get("created_date"),
"lora_count": len(recipe.get("loras", [])),
}
)
if len(recipes) >= 2:
recipes.sort(key=lambda entry: entry.get("modified", 0), reverse=True)
response_data.append(
{
"fingerprint": fingerprint,
"count": len(recipes),
"recipes": recipes,
}
)
response_data.sort(key=lambda entry: entry["count"], reverse=True)
return web.json_response({"success": True, "duplicate_groups": response_data})
except Exception as exc:
self._logger.error("Error finding duplicate recipes: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def get_recipe_syntax(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
recipe_id = request.match_info["recipe_id"]
try:
syntax_parts = await recipe_scanner.get_recipe_syntax_tokens(recipe_id)
except RecipeNotFoundError:
return web.json_response({"error": "Recipe not found"}, status=404)
if not syntax_parts:
return web.json_response({"error": "No LoRAs found in this recipe"}, status=400)
return web.json_response({"success": True, "syntax": " ".join(syntax_parts)})
except Exception as exc:
self._logger.error("Error generating recipe syntax: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
class RecipeManagementHandler:
"""Handle create/update/delete style recipe operations."""
def __init__(
self,
*,
ensure_dependencies_ready: EnsureDependenciesCallable,
recipe_scanner_getter: RecipeScannerGetter,
logger: Logger,
persistence_service: RecipePersistenceService,
analysis_service: RecipeAnalysisService,
) -> None:
self._ensure_dependencies_ready = ensure_dependencies_ready
self._recipe_scanner_getter = recipe_scanner_getter
self._logger = logger
self._persistence_service = persistence_service
self._analysis_service = analysis_service
async def save_recipe(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
reader = await request.multipart()
payload = await self._parse_save_payload(reader)
result = await self._persistence_service.save_recipe(
recipe_scanner=recipe_scanner,
image_bytes=payload["image_bytes"],
image_base64=payload["image_base64"],
name=payload["name"],
tags=payload["tags"],
metadata=payload["metadata"],
)
return web.json_response(result.payload, status=result.status)
except RecipeValidationError as exc:
return web.json_response({"error": str(exc)}, status=400)
except Exception as exc:
self._logger.error("Error saving recipe: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def delete_recipe(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
recipe_id = request.match_info["recipe_id"]
result = await self._persistence_service.delete_recipe(
recipe_scanner=recipe_scanner, recipe_id=recipe_id
)
return web.json_response(result.payload, status=result.status)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc)}, status=404)
except Exception as exc:
self._logger.error("Error deleting recipe: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def update_recipe(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
recipe_id = request.match_info["recipe_id"]
data = await request.json()
result = await self._persistence_service.update_recipe(
recipe_scanner=recipe_scanner, recipe_id=recipe_id, updates=data
)
return web.json_response(result.payload, status=result.status)
except RecipeValidationError as exc:
return web.json_response({"error": str(exc)}, status=400)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc)}, status=404)
except Exception as exc:
self._logger.error("Error updating recipe: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def reconnect_lora(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
data = await request.json()
for field in ("recipe_id", "lora_index", "target_name"):
if field not in data:
raise RecipeValidationError(f"Missing required field: {field}")
result = await self._persistence_service.reconnect_lora(
recipe_scanner=recipe_scanner,
recipe_id=data["recipe_id"],
lora_index=int(data["lora_index"]),
target_name=data["target_name"],
)
return web.json_response(result.payload, status=result.status)
except RecipeValidationError as exc:
return web.json_response({"error": str(exc)}, status=400)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc)}, status=404)
except Exception as exc:
self._logger.error("Error reconnecting LoRA: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def bulk_delete(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
data = await request.json()
recipe_ids = data.get("recipe_ids", [])
result = await self._persistence_service.bulk_delete(
recipe_scanner=recipe_scanner, recipe_ids=recipe_ids
)
return web.json_response(result.payload, status=result.status)
except RecipeValidationError as exc:
return web.json_response({"success": False, "error": str(exc)}, status=400)
except RecipeNotFoundError as exc:
return web.json_response({"success": False, "error": str(exc)}, status=404)
except Exception as exc:
self._logger.error("Error performing bulk delete: %s", exc, exc_info=True)
return web.json_response({"success": False, "error": str(exc)}, status=500)
async def save_recipe_from_widget(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
analysis = await self._analysis_service.analyze_widget_metadata(
recipe_scanner=recipe_scanner
)
metadata = analysis.payload.get("metadata")
image_bytes = analysis.payload.get("image_bytes")
if not metadata or image_bytes is None:
raise RecipeValidationError("Unable to extract metadata from widget")
result = await self._persistence_service.save_recipe_from_widget(
recipe_scanner=recipe_scanner,
metadata=metadata,
image_bytes=image_bytes,
)
return web.json_response(result.payload, status=result.status)
except RecipeValidationError as exc:
return web.json_response({"error": str(exc)}, status=400)
except Exception as exc:
self._logger.error("Error saving recipe from widget: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def _parse_save_payload(self, reader) -> dict[str, Any]:
image_bytes: Optional[bytes] = None
image_base64: Optional[str] = None
name: Optional[str] = None
tags: list[str] = []
metadata: Optional[Dict[str, Any]] = None
while True:
field = await reader.next()
if field is None:
break
if field.name == "image":
image_chunks = bytearray()
while True:
chunk = await field.read_chunk()
if not chunk:
break
image_chunks.extend(chunk)
image_bytes = bytes(image_chunks)
elif field.name == "image_base64":
image_base64 = await field.text()
elif field.name == "name":
name = await field.text()
elif field.name == "tags":
tags_text = await field.text()
try:
parsed_tags = json.loads(tags_text)
tags = parsed_tags if isinstance(parsed_tags, list) else []
except Exception:
tags = []
elif field.name == "metadata":
metadata_text = await field.text()
try:
metadata = json.loads(metadata_text)
except Exception:
metadata = {}
return {
"image_bytes": image_bytes,
"image_base64": image_base64,
"name": name,
"tags": tags,
"metadata": metadata,
}
class RecipeAnalysisHandler:
"""Analyze images to extract recipe metadata."""
def __init__(
self,
*,
ensure_dependencies_ready: EnsureDependenciesCallable,
recipe_scanner_getter: RecipeScannerGetter,
civitai_client_getter: CivitaiClientGetter,
logger: Logger,
analysis_service: RecipeAnalysisService,
) -> None:
self._ensure_dependencies_ready = ensure_dependencies_ready
self._recipe_scanner_getter = recipe_scanner_getter
self._civitai_client_getter = civitai_client_getter
self._logger = logger
self._analysis_service = analysis_service
async def analyze_uploaded_image(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
civitai_client = self._civitai_client_getter()
if recipe_scanner is None or civitai_client is None:
raise RuntimeError("Required services unavailable")
content_type = request.headers.get("Content-Type", "")
if "multipart/form-data" in content_type:
reader = await request.multipart()
field = await reader.next()
if field is None or field.name != "image":
raise RecipeValidationError("No image field found")
image_chunks = bytearray()
while True:
chunk = await field.read_chunk()
if not chunk:
break
image_chunks.extend(chunk)
result = await self._analysis_service.analyze_uploaded_image(
image_bytes=bytes(image_chunks),
recipe_scanner=recipe_scanner,
)
return web.json_response(result.payload, status=result.status)
if "application/json" in content_type:
data = await request.json()
result = await self._analysis_service.analyze_remote_image(
url=data.get("url"),
recipe_scanner=recipe_scanner,
civitai_client=civitai_client,
)
return web.json_response(result.payload, status=result.status)
raise RecipeValidationError("Unsupported content type")
except RecipeValidationError as exc:
return web.json_response({"error": str(exc), "loras": []}, status=400)
except RecipeDownloadError as exc:
return web.json_response({"error": str(exc), "loras": []}, status=400)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc), "loras": []}, status=404)
except Exception as exc:
self._logger.error("Error analyzing recipe image: %s", exc, exc_info=True)
return web.json_response({"error": str(exc), "loras": []}, status=500)
async def analyze_local_image(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
data = await request.json()
result = await self._analysis_service.analyze_local_image(
file_path=data.get("path"),
recipe_scanner=recipe_scanner,
)
return web.json_response(result.payload, status=result.status)
except RecipeValidationError as exc:
return web.json_response({"error": str(exc), "loras": []}, status=400)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc), "loras": []}, status=404)
except Exception as exc:
self._logger.error("Error analyzing local image: %s", exc, exc_info=True)
return web.json_response({"error": str(exc), "loras": []}, status=500)
class RecipeSharingHandler:
"""Serve endpoints related to recipe sharing."""
def __init__(
self,
*,
ensure_dependencies_ready: EnsureDependenciesCallable,
recipe_scanner_getter: RecipeScannerGetter,
logger: Logger,
sharing_service: RecipeSharingService,
) -> None:
self._ensure_dependencies_ready = ensure_dependencies_ready
self._recipe_scanner_getter = recipe_scanner_getter
self._logger = logger
self._sharing_service = sharing_service
async def share_recipe(self, request: web.Request) -> web.Response:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
recipe_id = request.match_info["recipe_id"]
result = await self._sharing_service.share_recipe(
recipe_scanner=recipe_scanner, recipe_id=recipe_id
)
return web.json_response(result.payload, status=result.status)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc)}, status=404)
except Exception as exc:
self._logger.error("Error sharing recipe: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)
async def download_shared_recipe(self, request: web.Request) -> web.StreamResponse:
try:
await self._ensure_dependencies_ready()
recipe_scanner = self._recipe_scanner_getter()
if recipe_scanner is None:
raise RuntimeError("Recipe scanner unavailable")
recipe_id = request.match_info["recipe_id"]
download_info = await self._sharing_service.prepare_download(
recipe_scanner=recipe_scanner, recipe_id=recipe_id
)
return web.FileResponse(
download_info.file_path,
headers={
"Content-Disposition": f'attachment; filename="{download_info.download_filename}"'
},
)
except RecipeNotFoundError as exc:
return web.json_response({"error": str(exc)}, status=404)
except Exception as exc:
self._logger.error("Error downloading shared recipe: %s", exc, exc_info=True)
return web.json_response({"error": str(exc)}, status=500)

View File

@@ -5,9 +5,9 @@ from typing import Dict
from server import PromptServer # type: ignore
from .base_model_routes import BaseModelRoutes
from .model_route_registrar import ModelRouteRegistrar
from ..services.lora_service import LoraService
from ..services.service_registry import ServiceRegistry
from ..utils.routes_common import ModelRouteUtils
from ..utils.utils import get_lora_info
logger = logging.getLogger(__name__)
@@ -17,43 +17,34 @@ class LoraRoutes(BaseModelRoutes):
def __init__(self):
"""Initialize LoRA routes with LoRA service"""
# Service will be initialized later via setup_routes
self.service = None
self.civitai_client = None
super().__init__()
self.template_name = "loras.html"
async def initialize_services(self):
"""Initialize services from ServiceRegistry"""
lora_scanner = await ServiceRegistry.get_lora_scanner()
self.service = LoraService(lora_scanner)
self.civitai_client = await ServiceRegistry.get_civitai_client()
# Initialize parent with the service
super().__init__(self.service)
# Attach service dependencies
self.attach_service(self.service)
def setup_routes(self, app: web.Application):
"""Setup LoRA routes"""
# Schedule service initialization on app startup
app.on_startup.append(lambda _: self.initialize_services())
# Setup common routes with 'loras' prefix (includes page route)
super().setup_routes(app, 'loras')
def setup_specific_routes(self, app: web.Application, prefix: str):
def setup_specific_routes(self, registrar: ModelRouteRegistrar, prefix: str):
"""Setup LoRA-specific routes"""
# LoRA-specific query routes
app.router.add_get(f'/api/{prefix}/letter-counts', self.get_letter_counts)
app.router.add_get(f'/api/{prefix}/get-trigger-words', self.get_lora_trigger_words)
app.router.add_get(f'/api/{prefix}/model-description', self.get_lora_model_description)
app.router.add_get(f'/api/{prefix}/usage-tips-by-path', self.get_lora_usage_tips_by_path)
# CivitAI integration with LoRA-specific validation
app.router.add_get(f'/api/{prefix}/civitai/versions/{{model_id}}', self.get_civitai_versions_lora)
app.router.add_get(f'/api/{prefix}/civitai/model/version/{{modelVersionId}}', self.get_civitai_model_by_version)
app.router.add_get(f'/api/{prefix}/civitai/model/hash/{{hash}}', self.get_civitai_model_by_hash)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/letter-counts', prefix, self.get_letter_counts)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/get-trigger-words', prefix, self.get_lora_trigger_words)
registrar.add_prefixed_route('GET', '/api/lm/{prefix}/usage-tips-by-path', prefix, self.get_lora_usage_tips_by_path)
# ComfyUI integration
app.router.add_post(f'/api/{prefix}/get_trigger_words', self.get_trigger_words)
registrar.add_prefixed_route('POST', '/api/lm/{prefix}/get_trigger_words', prefix, self.get_trigger_words)
def _parse_specific_params(self, request: web.Request) -> Dict:
"""Parse LoRA-specific parameters"""
@@ -79,6 +70,15 @@ class LoraRoutes(BaseModelRoutes):
return params
def _validate_civitai_model_type(self, model_type: str) -> bool:
"""Validate CivitAI model type for LoRA"""
from ..utils.constants import VALID_LORA_TYPES
return model_type.lower() in VALID_LORA_TYPES
def _get_expected_model_types(self) -> str:
"""Get expected model types string for error messages"""
return "LORA, LoCon, or DORA"
# LoRA-specific route handlers
async def get_letter_counts(self, request: web.Request) -> web.Response:
"""Get count of LoRAs for each letter of the alphabet"""
@@ -213,159 +213,6 @@ class LoraRoutes(BaseModelRoutes):
'error': str(e)
}, status=500)
# CivitAI integration methods
async def get_civitai_versions_lora(self, request: web.Request) -> web.Response:
"""Get available versions for a Civitai LoRA model with local availability info"""
try:
model_id = request.match_info['model_id']
response = await self.civitai_client.get_model_versions(model_id)
if not response or not response.get('modelVersions'):
return web.Response(status=404, text="Model not found")
versions = response.get('modelVersions', [])
model_type = response.get('type', '')
# Check model type - should be LORA, LoCon, or DORA
from ..utils.constants import VALID_LORA_TYPES
if model_type.lower() not in VALID_LORA_TYPES:
return web.json_response({
'error': f"Model type mismatch. Expected LORA or LoCon, got {model_type}"
}, status=400)
# Check local availability for each version
for version in versions:
# Find the model file (type="Model") in the files list
model_file = next((file for file in version.get('files', [])
if file.get('type') == 'Model'), None)
if model_file:
sha256 = model_file.get('hashes', {}).get('SHA256')
if sha256:
# Set existsLocally and localPath at the version level
version['existsLocally'] = self.service.has_hash(sha256)
if version['existsLocally']:
version['localPath'] = self.service.get_path_by_hash(sha256)
# Also set the model file size at the version level for easier access
version['modelSizeKB'] = model_file.get('sizeKB')
else:
# No model file found in this version
version['existsLocally'] = False
return web.json_response(versions)
except Exception as e:
logger.error(f"Error fetching LoRA model versions: {e}")
return web.Response(status=500, text=str(e))
async def get_civitai_model_by_version(self, request: web.Request) -> web.Response:
"""Get CivitAI model details by model version ID"""
try:
model_version_id = request.match_info.get('modelVersionId')
# Get model details from Civitai API
model, error_msg = await self.civitai_client.get_model_version_info(model_version_id)
if not model:
# Log warning for failed model retrieval
logger.warning(f"Failed to fetch model version {model_version_id}: {error_msg}")
# Determine status code based on error message
status_code = 404 if error_msg and "not found" in error_msg.lower() else 500
return web.json_response({
"success": False,
"error": error_msg or "Failed to fetch model information"
}, status=status_code)
return web.json_response(model)
except Exception as e:
logger.error(f"Error fetching model details: {e}")
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def get_civitai_model_by_hash(self, request: web.Request) -> web.Response:
"""Get CivitAI model details by hash"""
try:
hash = request.match_info.get('hash')
model = await self.civitai_client.get_model_by_hash(hash)
return web.json_response(model)
except Exception as e:
logger.error(f"Error fetching model details by hash: {e}")
return web.json_response({
"success": False,
"error": str(e)
}, status=500)
async def get_lora_model_description(self, request: web.Request) -> web.Response:
"""Get model description for a Lora model"""
try:
# Get parameters
model_id = request.query.get('model_id')
file_path = request.query.get('file_path')
if not model_id:
return web.json_response({
'success': False,
'error': 'Model ID is required'
}, status=400)
# Check if we already have the description stored in metadata
description = None
tags = []
creator = {}
if file_path:
import os
from ..utils.metadata_manager import MetadataManager
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
description = metadata.get('modelDescription')
tags = metadata.get('tags', [])
creator = metadata.get('creator', {})
# If description is not in metadata, fetch from CivitAI
if not description:
logger.info(f"Fetching model metadata for model ID: {model_id}")
model_metadata, _ = await self.civitai_client.get_model_metadata(model_id)
if model_metadata:
description = model_metadata.get('description')
tags = model_metadata.get('tags', [])
creator = model_metadata.get('creator', {})
# Save the metadata to file if we have a file path and got metadata
if file_path:
try:
metadata_path = os.path.splitext(file_path)[0] + '.metadata.json'
metadata = await ModelRouteUtils.load_local_metadata(metadata_path)
metadata['modelDescription'] = description
metadata['tags'] = tags
# Ensure the civitai dict exists
if 'civitai' not in metadata:
metadata['civitai'] = {}
# Store creator in the civitai nested structure
metadata['civitai']['creator'] = creator
await MetadataManager.save_metadata(file_path, metadata, True)
except Exception as e:
logger.error(f"Error saving model metadata: {e}")
return web.json_response({
'success': True,
'description': description or "<p>No model description available.</p>",
'tags': tags,
'creator': creator
})
except Exception as e:
logger.error(f"Error getting model metadata: {e}")
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
async def get_trigger_words(self, request: web.Request) -> web.Response:
"""Get trigger words for specified LoRA models"""
try:
@@ -382,11 +229,27 @@ class LoraRoutes(BaseModelRoutes):
trigger_words_text = ",, ".join(all_trigger_words) if all_trigger_words else ""
# Send update to all connected trigger word toggle nodes
for node_id in node_ids:
PromptServer.instance.send_sync("trigger_word_update", {
"id": node_id,
for entry in node_ids:
node_identifier = entry
graph_identifier = None
if isinstance(entry, dict):
node_identifier = entry.get("node_id")
graph_identifier = entry.get("graph_id")
try:
parsed_node_id = int(node_identifier)
except (TypeError, ValueError):
parsed_node_id = node_identifier
payload = {
"id": parsed_node_id,
"message": trigger_words_text
})
}
if graph_identifier is not None:
payload["graph_id"] = str(graph_identifier)
PromptServer.instance.send_sync("trigger_word_update", payload)
return web.json_response({"success": True})

View File

@@ -0,0 +1,70 @@
"""Route registrar for miscellaneous endpoints.
This module mirrors the model route registrar architecture so that
miscellaneous endpoints share a consistent registration flow.
"""
from dataclasses import dataclass
from typing import Callable, Iterable, Mapping
from aiohttp import web
@dataclass(frozen=True)
class RouteDefinition:
"""Declarative definition for a HTTP route."""
method: str
path: str
handler_name: str
MISC_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
RouteDefinition("GET", "/api/lm/settings", "get_settings"),
RouteDefinition("POST", "/api/lm/settings", "update_settings"),
RouteDefinition("GET", "/api/lm/settings/libraries", "get_settings_libraries"),
RouteDefinition("POST", "/api/lm/settings/libraries/activate", "activate_library"),
RouteDefinition("GET", "/api/lm/health-check", "health_check"),
RouteDefinition("POST", "/api/lm/open-file-location", "open_file_location"),
RouteDefinition("POST", "/api/lm/update-usage-stats", "update_usage_stats"),
RouteDefinition("GET", "/api/lm/get-usage-stats", "get_usage_stats"),
RouteDefinition("POST", "/api/lm/update-lora-code", "update_lora_code"),
RouteDefinition("GET", "/api/lm/trained-words", "get_trained_words"),
RouteDefinition("GET", "/api/lm/model-example-files", "get_model_example_files"),
RouteDefinition("POST", "/api/lm/register-nodes", "register_nodes"),
RouteDefinition("GET", "/api/lm/get-registry", "get_registry"),
RouteDefinition("GET", "/api/lm/check-model-exists", "check_model_exists"),
RouteDefinition("GET", "/api/lm/civitai/user-models", "get_civitai_user_models"),
RouteDefinition("POST", "/api/lm/download-metadata-archive", "download_metadata_archive"),
RouteDefinition("POST", "/api/lm/remove-metadata-archive", "remove_metadata_archive"),
RouteDefinition("GET", "/api/lm/metadata-archive-status", "get_metadata_archive_status"),
RouteDefinition("GET", "/api/lm/model-versions-status", "get_model_versions_status"),
)
class MiscRouteRegistrar:
"""Bind miscellaneous route definitions to an aiohttp router."""
_METHOD_MAP = {
"GET": "add_get",
"POST": "add_post",
"PUT": "add_put",
"DELETE": "add_delete",
}
def __init__(self, app: web.Application) -> None:
self._app = app
def register_routes(
self,
handler_lookup: Mapping[str, Callable[[web.Request], object]],
*,
definitions: Iterable[RouteDefinition] = MISC_ROUTE_DEFINITIONS,
) -> None:
for definition in definitions:
self._bind(definition.method, definition.path, handler_lookup[definition.handler_name])
def _bind(self, method: str, path: str, handler: Callable) -> None:
add_method_name = self._METHOD_MAP[method.upper()]
add_method = getattr(self._app.router, add_method_name)
add_method(path, handler)

View File

@@ -1,699 +1,135 @@
"""Route controller for miscellaneous endpoints."""
from __future__ import annotations
import logging
import os
import sys
import threading
import asyncio
from server import PromptServer # type: ignore
from typing import Awaitable, Callable, Mapping
from aiohttp import web
from ..services.settings_manager import settings
from server import PromptServer # type: ignore
from ..services.metadata_service import (
get_metadata_archive_manager,
get_metadata_provider,
update_metadata_providers,
)
from ..services.settings_manager import get_settings_manager
from ..services.downloader import get_downloader
from ..utils.usage_stats import UsageStats
from ..utils.lora_metadata import extract_trained_words
from ..config import config
from ..utils.constants import SUPPORTED_MEDIA_EXTENSIONS, NODE_TYPES, DEFAULT_NODE_COLOR
from ..services.service_registry import ServiceRegistry
import re
from .handlers.misc_handlers import (
FileSystemHandler,
HealthCheckHandler,
LoraCodeHandler,
MetadataArchiveHandler,
MiscHandlerSet,
ModelExampleFilesHandler,
ModelLibraryHandler,
NodeRegistry,
NodeRegistryHandler,
SettingsHandler,
TrainedWordsHandler,
UsageStatsHandler,
build_service_registry_adapter,
)
from .misc_route_registrar import MiscRouteRegistrar
logger = logging.getLogger(__name__)
standalone_mode = 'nodes' not in sys.modules
standalone_mode = os.environ.get("LORA_MANAGER_STANDALONE", "0") == "1" or os.environ.get(
"HF_HUB_DISABLE_TELEMETRY", "0"
) == "0"
# Node registry for tracking active workflow nodes
class NodeRegistry:
"""Thread-safe registry for tracking Lora nodes in active workflows"""
def __init__(self):
self._lock = threading.RLock()
self._nodes = {} # node_id -> node_info
self._registry_updated = threading.Event()
def register_nodes(self, nodes):
"""Register multiple nodes at once, replacing existing registry"""
with self._lock:
# Clear existing registry
self._nodes.clear()
# Register all new nodes
for node in nodes:
node_id = node['node_id']
node_type = node.get('type', '')
# Convert node type name to integer
type_id = NODE_TYPES.get(node_type, 0) # 0 for unknown types
# Handle null bgcolor with default color
bgcolor = node.get('bgcolor')
if bgcolor is None:
bgcolor = DEFAULT_NODE_COLOR
self._nodes[node_id] = {
'id': node_id,
'bgcolor': bgcolor,
'title': node.get('title'),
'type': type_id,
'type_name': node_type
}
logger.debug(f"Registered {len(nodes)} nodes in registry")
# Signal that registry has been updated
self._registry_updated.set()
def get_registry(self):
"""Get current registry information"""
with self._lock:
return {
'nodes': dict(self._nodes), # Return a copy
'node_count': len(self._nodes)
}
def clear_registry(self):
"""Clear the entire registry"""
with self._lock:
self._nodes.clear()
logger.info("Node registry cleared")
def wait_for_update(self, timeout=1.0):
"""Wait for registry update with timeout"""
self._registry_updated.clear()
return self._registry_updated.wait(timeout)
# Global registry instance
node_registry = NodeRegistry()
class MiscRoutes:
"""Miscellaneous routes for various utility functions"""
@staticmethod
def setup_routes(app):
"""Register miscellaneous routes"""
app.router.add_post('/api/settings', MiscRoutes.update_settings)
# Add new route for clearing cache
app.router.add_post('/api/clear-cache', MiscRoutes.clear_cache)
"""Route controller that mirrors the model route architecture."""
app.router.add_get('/api/health-check', lambda request: web.json_response({'status': 'ok'}))
def __init__(
self,
*,
settings_service=None,
usage_stats_factory: Callable[[], UsageStats] = UsageStats,
prompt_server: type[PromptServer] = PromptServer,
service_registry_adapter=build_service_registry_adapter(),
metadata_provider_factory=get_metadata_provider,
metadata_archive_manager_factory=get_metadata_archive_manager,
metadata_provider_updater=update_metadata_providers,
downloader_factory=get_downloader,
registrar_factory=MiscRouteRegistrar,
handler_set_factory=MiscHandlerSet,
node_registry: NodeRegistry | None = None,
standalone_mode_flag: bool = standalone_mode,
) -> None:
self._settings = settings_service or get_settings_manager()
self._usage_stats_factory = usage_stats_factory
self._prompt_server = prompt_server
self._service_registry_adapter = service_registry_adapter
self._metadata_provider_factory = metadata_provider_factory
self._metadata_archive_manager_factory = metadata_archive_manager_factory
self._metadata_provider_updater = metadata_provider_updater
self._downloader_factory = downloader_factory
self._registrar_factory = registrar_factory
self._handler_set_factory = handler_set_factory
self._node_registry = node_registry or NodeRegistry()
self._standalone_mode = standalone_mode_flag
# Usage stats routes
app.router.add_post('/api/update-usage-stats', MiscRoutes.update_usage_stats)
app.router.add_get('/api/get-usage-stats', MiscRoutes.get_usage_stats)
# Lora code update endpoint
app.router.add_post('/api/update-lora-code', MiscRoutes.update_lora_code)
# Add new route for getting trained words
app.router.add_get('/api/trained-words', MiscRoutes.get_trained_words)
# Add new route for getting model example files
app.router.add_get('/api/model-example-files', MiscRoutes.get_model_example_files)
# Node registry endpoints
app.router.add_post('/api/register-nodes', MiscRoutes.register_nodes)
app.router.add_get('/api/get-registry', MiscRoutes.get_registry)
# Add new route for checking if a model exists in the library
app.router.add_get('/api/check-model-exists', MiscRoutes.check_model_exists)
self._handler_mapping: Mapping[str, Callable[[web.Request], Awaitable[web.StreamResponse]]] | None = None
@staticmethod
async def clear_cache(request):
"""Clear all cache files from the cache folder"""
try:
# Get the cache folder path (relative to project directory)
project_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
cache_folder = os.path.join(project_dir, 'cache')
# Check if cache folder exists
if not os.path.exists(cache_folder):
logger.info("Cache folder does not exist, nothing to clear")
return web.json_response({'success': True, 'message': 'No cache folder found'})
# Get list of cache files before deleting for reporting
cache_files = [f for f in os.listdir(cache_folder) if os.path.isfile(os.path.join(cache_folder, f))]
deleted_files = []
# Delete each .msgpack file in the cache folder
for filename in cache_files:
if filename.endswith('.msgpack'):
file_path = os.path.join(cache_folder, filename)
try:
os.remove(file_path)
deleted_files.append(filename)
logger.info(f"Deleted cache file: {filename}")
except Exception as e:
logger.error(f"Failed to delete {filename}: {e}")
return web.json_response({
'success': False,
'error': f"Failed to delete {filename}: {str(e)}"
}, status=500)
return web.json_response({
'success': True,
'message': f"Successfully cleared {len(deleted_files)} cache files",
'deleted_files': deleted_files
})
except Exception as e:
logger.error(f"Error clearing cache files: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
def setup_routes(app: web.Application) -> None:
"""Entry point used by the application bootstrap."""
controller = MiscRoutes()
controller.bind(app)
@staticmethod
async def update_settings(request):
"""Update application settings"""
try:
data = await request.json()
# Validate and update settings
for key, value in data.items():
if value == settings.get(key):
# No change, skip
continue
# Special handling for example_images_path - verify path exists
if key == 'example_images_path' and value:
if not os.path.exists(value):
return web.json_response({
'success': False,
'error': f"Path does not exist: {value}"
})
# Path changed - server restart required for new path to take effect
old_path = settings.get('example_images_path')
if old_path != value:
logger.info(f"Example images path changed to {value} - server restart required")
# Save to settings
settings.set(key, value)
return web.json_response({'success': True})
except Exception as e:
logger.error(f"Error updating settings: {e}", exc_info=True)
return web.Response(status=500, text=str(e))
@staticmethod
async def update_usage_stats(request):
"""
Update usage statistics based on a prompt_id
Expects a JSON body with:
{
"prompt_id": "string"
}
"""
try:
# Parse the request body
data = await request.json()
prompt_id = data.get('prompt_id')
if not prompt_id:
return web.json_response({
'success': False,
'error': 'Missing prompt_id'
}, status=400)
# Call the UsageStats to process this prompt_id synchronously
usage_stats = UsageStats()
await usage_stats.process_execution(prompt_id)
return web.json_response({
'success': True
})
except Exception as e:
logger.error(f"Failed to update usage stats: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@staticmethod
async def get_usage_stats(request):
"""Get current usage statistics"""
try:
usage_stats = UsageStats()
stats = await usage_stats.get_stats()
# Add version information to help clients handle format changes
stats_response = {
'success': True,
'data': stats,
'format_version': 2 # Indicate this is the new format with history
}
return web.json_response(stats_response)
except Exception as e:
logger.error(f"Failed to get usage stats: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@staticmethod
async def update_lora_code(request):
"""
Update Lora code in ComfyUI nodes
Expects a JSON body with:
{
"node_ids": [123, 456], # Optional - List of node IDs to update (for browser mode)
"lora_code": "<lora:modelname:1.0>", # The Lora code to send
"mode": "append" # or "replace" - whether to append or replace existing code
}
"""
try:
# Parse the request body
data = await request.json()
node_ids = data.get('node_ids')
lora_code = data.get('lora_code', '')
mode = data.get('mode', 'append')
if not lora_code:
return web.json_response({
'success': False,
'error': 'Missing lora_code parameter'
}, status=400)
results = []
# Desktop mode: no specific node_ids provided
if node_ids is None:
try:
# Send broadcast message with id=-1 to all Lora Loader nodes
PromptServer.instance.send_sync("lora_code_update", {
"id": -1,
"lora_code": lora_code,
"mode": mode
})
results.append({
'node_id': 'broadcast',
'success': True
})
except Exception as e:
logger.error(f"Error broadcasting lora code: {e}")
results.append({
'node_id': 'broadcast',
'success': False,
'error': str(e)
})
else:
# Browser mode: send to specific nodes
for node_id in node_ids:
try:
# Send the message to the frontend
PromptServer.instance.send_sync("lora_code_update", {
"id": node_id,
"lora_code": lora_code,
"mode": mode
})
results.append({
'node_id': node_id,
'success': True
})
except Exception as e:
logger.error(f"Error sending lora code to node {node_id}: {e}")
results.append({
'node_id': node_id,
'success': False,
'error': str(e)
})
return web.json_response({
'success': True,
'results': results
})
except Exception as e:
logger.error(f"Failed to update lora code: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
def bind(self, app: web.Application) -> None:
registrar = self._registrar_factory(app)
registrar.register_routes(self._ensure_handler_mapping())
@staticmethod
async def get_trained_words(request):
"""
Get trained words from a safetensors file, sorted by frequency
Expects a query parameter:
file_path: Path to the safetensors file
"""
try:
# Get file path from query parameters
file_path = request.query.get('file_path')
if not file_path:
return web.json_response({
'success': False,
'error': 'Missing file_path parameter'
}, status=400)
# Check if file exists and is a safetensors file
if not os.path.exists(file_path):
return web.json_response({
'success': False,
'error': f"File not found: {file_path}"
}, status=404)
if not file_path.lower().endswith('.safetensors'):
return web.json_response({
'success': False,
'error': 'File is not a safetensors file'
}, status=400)
# Extract trained words and class_tokens
trained_words, class_tokens = await extract_trained_words(file_path)
# Return result with both trained words and class tokens
return web.json_response({
'success': True,
'trained_words': trained_words,
'class_tokens': class_tokens
})
except Exception as e:
logger.error(f"Failed to get trained words: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
def _ensure_handler_mapping(self) -> Mapping[str, Callable[[web.Request], Awaitable[web.StreamResponse]]]:
if self._handler_mapping is None:
handler_set = self._create_handler_set()
self._handler_mapping = handler_set.to_route_mapping()
return self._handler_mapping
@staticmethod
async def get_model_example_files(request):
"""
Get list of example image files for a specific model based on file path
Expects:
- file_path in query parameters
Returns:
- List of image files with their paths as static URLs
"""
try:
# Get the model file path from query parameters
file_path = request.query.get('file_path')
if not file_path:
return web.json_response({
'success': False,
'error': 'Missing file_path parameter'
}, status=400)
# Extract directory and base filename
model_dir = os.path.dirname(file_path)
model_filename = os.path.basename(file_path)
model_name = os.path.splitext(model_filename)[0]
# Check if the directory exists
if not os.path.exists(model_dir):
return web.json_response({
'success': False,
'error': 'Model directory not found',
'files': []
}, status=404)
# Look for files matching the pattern modelname.example.<index>.<ext>
files = []
pattern = f"{model_name}.example."
for file in os.listdir(model_dir):
file_lower = file.lower()
if file_lower.startswith(pattern.lower()):
file_full_path = os.path.join(model_dir, file)
if os.path.isfile(file_full_path):
# Check if the file is a supported media file
file_ext = os.path.splitext(file)[1].lower()
if (file_ext in SUPPORTED_MEDIA_EXTENSIONS['images'] or
file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos']):
# Extract the index from the filename
try:
# Extract the part after '.example.' and before file extension
index_part = file[len(pattern):].split('.')[0]
# Try to parse it as an integer
index = int(index_part)
except (ValueError, IndexError):
# If we can't parse the index, use infinity to sort at the end
index = float('inf')
# Convert file path to static URL
static_url = config.get_preview_static_url(file_full_path)
files.append({
'name': file,
'path': static_url,
'extension': file_ext,
'is_video': file_ext in SUPPORTED_MEDIA_EXTENSIONS['videos'],
'index': index
})
# Sort files by their index for consistent ordering
files.sort(key=lambda x: x['index'])
# Remove the index field as it's only used for sorting
for file in files:
file.pop('index', None)
return web.json_response({
'success': True,
'files': files
})
except Exception as e:
logger.error(f"Failed to get model example files: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
def _create_handler_set(self) -> MiscHandlerSet:
health = HealthCheckHandler()
settings_handler = SettingsHandler(
settings_service=self._settings,
metadata_provider_updater=self._metadata_provider_updater,
downloader_factory=self._downloader_factory,
)
usage_stats = UsageStatsHandler(usage_stats_factory=self._usage_stats_factory)
lora_code = LoraCodeHandler(prompt_server=self._prompt_server)
trained_words = TrainedWordsHandler()
model_examples = ModelExampleFilesHandler()
metadata_archive = MetadataArchiveHandler(
metadata_archive_manager_factory=self._metadata_archive_manager_factory,
settings_service=self._settings,
metadata_provider_updater=self._metadata_provider_updater,
)
filesystem = FileSystemHandler()
node_registry_handler = NodeRegistryHandler(
node_registry=self._node_registry,
prompt_server=self._prompt_server,
standalone_mode=self._standalone_mode,
)
model_library = ModelLibraryHandler(
service_registry=self._service_registry_adapter,
metadata_provider_factory=self._metadata_provider_factory,
)
@staticmethod
async def register_nodes(request):
"""
Register multiple Lora nodes at once
Expects a JSON body with:
{
"nodes": [
{
"node_id": 123,
"bgcolor": "#535",
"title": "Lora Loader (LoraManager)"
},
...
]
}
"""
try:
data = await request.json()
# Validate required fields
nodes = data.get('nodes', [])
if not isinstance(nodes, list):
return web.json_response({
'success': False,
'error': 'nodes must be a list'
}, status=400)
# Validate each node
for i, node in enumerate(nodes):
if not isinstance(node, dict):
return web.json_response({
'success': False,
'error': f'Node {i} must be an object'
}, status=400)
node_id = node.get('node_id')
if node_id is None:
return web.json_response({
'success': False,
'error': f'Node {i} missing node_id parameter'
}, status=400)
# Validate node_id is an integer
try:
node['node_id'] = int(node_id)
except (ValueError, TypeError):
return web.json_response({
'success': False,
'error': f'Node {i} node_id must be an integer'
}, status=400)
# Register all nodes
node_registry.register_nodes(nodes)
return web.json_response({
'success': True,
'message': f'{len(nodes)} nodes registered successfully'
})
except Exception as e:
logger.error(f"Failed to register nodes: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
@staticmethod
async def get_registry(request):
"""Get current node registry information by refreshing from frontend"""
try:
# Check if running in standalone mode
if standalone_mode:
logger.warning("Registry refresh not available in standalone mode")
return web.json_response({
'success': False,
'error': 'Standalone Mode Active',
'message': 'Cannot interact with ComfyUI in standalone mode.'
}, status=503)
# Send message to frontend to refresh registry
try:
PromptServer.instance.send_sync("lora_registry_refresh", {})
logger.debug("Sent registry refresh request to frontend")
except Exception as e:
logger.error(f"Failed to send registry refresh message: {e}")
return web.json_response({
'success': False,
'error': 'Communication Error',
'message': f'Failed to communicate with ComfyUI frontend: {str(e)}'
}, status=500)
# Wait for registry update with timeout
def wait_for_registry():
return node_registry.wait_for_update(timeout=1.0)
# Run the wait in a thread to avoid blocking the event loop
loop = asyncio.get_event_loop()
registry_updated = await loop.run_in_executor(None, wait_for_registry)
if not registry_updated:
logger.warning("Registry refresh timeout after 1 second")
return web.json_response({
'success': False,
'error': 'Timeout Error',
'message': 'Registry refresh timeout - ComfyUI frontend may not be responsive'
}, status=408)
# Get updated registry
registry_info = node_registry.get_registry()
return web.json_response({
'success': True,
'data': registry_info
})
except Exception as e:
logger.error(f"Failed to get registry: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': 'Internal Error',
'message': str(e)
}, status=500)
return self._handler_set_factory(
health=health,
settings=settings_handler,
usage_stats=usage_stats,
lora_code=lora_code,
trained_words=trained_words,
model_examples=model_examples,
node_registry=node_registry_handler,
model_library=model_library,
metadata_archive=metadata_archive,
filesystem=filesystem,
)
@staticmethod
async def check_model_exists(request):
"""
Check if a model with specified modelId and optionally modelVersionId exists in the library
Expects query parameters:
- modelId: int - Civitai model ID (required)
- modelVersionId: int - Civitai model version ID (optional)
Returns:
- If modelVersionId is provided: JSON with a boolean 'exists' field
- If modelVersionId is not provided: JSON with a list of modelVersionIds that exist in the library
"""
try:
# Get the modelId and modelVersionId from query parameters
model_id_str = request.query.get('modelId')
model_version_id_str = request.query.get('modelVersionId')
# Validate modelId parameter (required)
if not model_id_str:
return web.json_response({
'success': False,
'error': 'Missing required parameter: modelId'
}, status=400)
try:
# Convert modelId to integer
model_id = int(model_id_str)
except ValueError:
return web.json_response({
'success': False,
'error': 'Parameter modelId must be an integer'
}, status=400)
# Get all scanners
lora_scanner = await ServiceRegistry.get_lora_scanner()
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
# If modelVersionId is provided, check for specific version
if model_version_id_str:
try:
model_version_id = int(model_version_id_str)
except ValueError:
return web.json_response({
'success': False,
'error': 'Parameter modelVersionId must be an integer'
}, status=400)
# Check lora scanner first
exists = False
model_type = None
if await lora_scanner.check_model_version_exists(model_version_id):
exists = True
model_type = 'lora'
elif checkpoint_scanner and await checkpoint_scanner.check_model_version_exists(model_version_id):
exists = True
model_type = 'checkpoint'
elif embedding_scanner and await embedding_scanner.check_model_version_exists(model_version_id):
exists = True
model_type = 'embedding'
return web.json_response({
'success': True,
'exists': exists,
'modelType': model_type if exists else None
})
# If modelVersionId is not provided, return all version IDs for the model
else:
lora_versions = await lora_scanner.get_model_versions_by_id(model_id)
checkpoint_versions = []
embedding_versions = []
# 优先lora其次checkpoint最后embedding
if not lora_versions:
checkpoint_versions = await checkpoint_scanner.get_model_versions_by_id(model_id)
if not lora_versions and not checkpoint_versions:
embedding_versions = await embedding_scanner.get_model_versions_by_id(model_id)
model_type = None
versions = []
if lora_versions:
model_type = 'lora'
versions = lora_versions
elif checkpoint_versions:
model_type = 'checkpoint'
versions = checkpoint_versions
elif embedding_versions:
model_type = 'embedding'
versions = embedding_versions
return web.json_response({
'success': True,
'modelId': model_id,
'modelType': model_type,
'versions': versions
})
except Exception as e:
logger.error(f"Failed to check model existence: {e}", exc_info=True)
return web.json_response({
'success': False,
'error': str(e)
}, status=500)
__all__ = ["MiscRoutes"]

View File

@@ -0,0 +1,99 @@
"""Route registrar for model endpoints."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Iterable, Mapping
from aiohttp import web
@dataclass(frozen=True)
class RouteDefinition:
"""Declarative definition for a HTTP route."""
method: str
path_template: str
handler_name: str
def build_path(self, prefix: str) -> str:
return self.path_template.replace("{prefix}", prefix)
COMMON_ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
RouteDefinition("GET", "/api/lm/{prefix}/list", "get_models"),
RouteDefinition("POST", "/api/lm/{prefix}/delete", "delete_model"),
RouteDefinition("POST", "/api/lm/{prefix}/exclude", "exclude_model"),
RouteDefinition("POST", "/api/lm/{prefix}/fetch-civitai", "fetch_civitai"),
RouteDefinition("POST", "/api/lm/{prefix}/fetch-all-civitai", "fetch_all_civitai"),
RouteDefinition("POST", "/api/lm/{prefix}/relink-civitai", "relink_civitai"),
RouteDefinition("POST", "/api/lm/{prefix}/replace-preview", "replace_preview"),
RouteDefinition("POST", "/api/lm/{prefix}/save-metadata", "save_metadata"),
RouteDefinition("POST", "/api/lm/{prefix}/add-tags", "add_tags"),
RouteDefinition("POST", "/api/lm/{prefix}/rename", "rename_model"),
RouteDefinition("POST", "/api/lm/{prefix}/bulk-delete", "bulk_delete_models"),
RouteDefinition("POST", "/api/lm/{prefix}/verify-duplicates", "verify_duplicates"),
RouteDefinition("POST", "/api/lm/{prefix}/move_model", "move_model"),
RouteDefinition("POST", "/api/lm/{prefix}/move_models_bulk", "move_models_bulk"),
RouteDefinition("GET", "/api/lm/{prefix}/auto-organize", "auto_organize_models"),
RouteDefinition("POST", "/api/lm/{prefix}/auto-organize", "auto_organize_models"),
RouteDefinition("GET", "/api/lm/{prefix}/auto-organize-progress", "get_auto_organize_progress"),
RouteDefinition("GET", "/api/lm/{prefix}/top-tags", "get_top_tags"),
RouteDefinition("GET", "/api/lm/{prefix}/base-models", "get_base_models"),
RouteDefinition("GET", "/api/lm/{prefix}/scan", "scan_models"),
RouteDefinition("GET", "/api/lm/{prefix}/roots", "get_model_roots"),
RouteDefinition("GET", "/api/lm/{prefix}/folders", "get_folders"),
RouteDefinition("GET", "/api/lm/{prefix}/folder-tree", "get_folder_tree"),
RouteDefinition("GET", "/api/lm/{prefix}/unified-folder-tree", "get_unified_folder_tree"),
RouteDefinition("GET", "/api/lm/{prefix}/find-duplicates", "find_duplicate_models"),
RouteDefinition("GET", "/api/lm/{prefix}/find-filename-conflicts", "find_filename_conflicts"),
RouteDefinition("GET", "/api/lm/{prefix}/get-notes", "get_model_notes"),
RouteDefinition("GET", "/api/lm/{prefix}/preview-url", "get_model_preview_url"),
RouteDefinition("GET", "/api/lm/{prefix}/civitai-url", "get_model_civitai_url"),
RouteDefinition("GET", "/api/lm/{prefix}/metadata", "get_model_metadata"),
RouteDefinition("GET", "/api/lm/{prefix}/model-description", "get_model_description"),
RouteDefinition("GET", "/api/lm/{prefix}/relative-paths", "get_relative_paths"),
RouteDefinition("GET", "/api/lm/{prefix}/civitai/versions/{model_id}", "get_civitai_versions"),
RouteDefinition("GET", "/api/lm/{prefix}/civitai/model/version/{modelVersionId}", "get_civitai_model_by_version"),
RouteDefinition("GET", "/api/lm/{prefix}/civitai/model/hash/{hash}", "get_civitai_model_by_hash"),
RouteDefinition("POST", "/api/lm/download-model", "download_model"),
RouteDefinition("GET", "/api/lm/download-model-get", "download_model_get"),
RouteDefinition("GET", "/api/lm/cancel-download-get", "cancel_download_get"),
RouteDefinition("GET", "/api/lm/download-progress/{download_id}", "get_download_progress"),
RouteDefinition("GET", "/{prefix}", "handle_models_page"),
)
class ModelRouteRegistrar:
"""Bind declarative definitions to an aiohttp router."""
_METHOD_MAP = {
"GET": "add_get",
"POST": "add_post",
"PUT": "add_put",
"DELETE": "add_delete",
}
def __init__(self, app: web.Application) -> None:
self._app = app
def register_common_routes(
self,
prefix: str,
handler_lookup: Mapping[str, Callable[[web.Request], object]],
*,
definitions: Iterable[RouteDefinition] = COMMON_ROUTE_DEFINITIONS,
) -> None:
for definition in definitions:
self._bind_route(definition.method, definition.build_path(prefix), handler_lookup[definition.handler_name])
def add_route(self, method: str, path: str, handler: Callable) -> None:
self._bind_route(method, path, handler)
def add_prefixed_route(self, method: str, path_template: str, prefix: str, handler: Callable) -> None:
self._bind_route(method, path_template.replace("{prefix}", prefix), handler)
def _bind_route(self, method: str, path: str, handler: Callable) -> None:
add_method_name = self._METHOD_MAP[method.upper()]
add_method = getattr(self._app.router, add_method_name)
add_method(path, handler)

View File

@@ -0,0 +1,25 @@
"""Route controller for preview asset delivery."""
from __future__ import annotations
from aiohttp import web
from .handlers.preview_handlers import PreviewHandler
class PreviewRoutes:
"""Register routes that expose preview assets."""
def __init__(self, *, handler: PreviewHandler | None = None) -> None:
self._handler = handler or PreviewHandler()
@classmethod
def setup_routes(cls, app: web.Application) -> None:
controller = cls()
controller.register(app)
def register(self, app: web.Application) -> None:
app.router.add_get('/api/lm/previews', self._handler.serve_preview)
__all__ = ["PreviewRoutes"]

View File

@@ -0,0 +1,64 @@
"""Route registrar for recipe endpoints."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Mapping
from aiohttp import web
@dataclass(frozen=True)
class RouteDefinition:
"""Declarative definition for a recipe HTTP route."""
method: str
path: str
handler_name: str
ROUTE_DEFINITIONS: tuple[RouteDefinition, ...] = (
RouteDefinition("GET", "/loras/recipes", "render_page"),
RouteDefinition("GET", "/api/lm/recipes", "list_recipes"),
RouteDefinition("GET", "/api/lm/recipe/{recipe_id}", "get_recipe"),
RouteDefinition("POST", "/api/lm/recipes/analyze-image", "analyze_uploaded_image"),
RouteDefinition("POST", "/api/lm/recipes/analyze-local-image", "analyze_local_image"),
RouteDefinition("POST", "/api/lm/recipes/save", "save_recipe"),
RouteDefinition("DELETE", "/api/lm/recipe/{recipe_id}", "delete_recipe"),
RouteDefinition("GET", "/api/lm/recipes/top-tags", "get_top_tags"),
RouteDefinition("GET", "/api/lm/recipes/base-models", "get_base_models"),
RouteDefinition("GET", "/api/lm/recipe/{recipe_id}/share", "share_recipe"),
RouteDefinition("GET", "/api/lm/recipe/{recipe_id}/share/download", "download_shared_recipe"),
RouteDefinition("GET", "/api/lm/recipe/{recipe_id}/syntax", "get_recipe_syntax"),
RouteDefinition("PUT", "/api/lm/recipe/{recipe_id}/update", "update_recipe"),
RouteDefinition("POST", "/api/lm/recipe/lora/reconnect", "reconnect_lora"),
RouteDefinition("GET", "/api/lm/recipes/find-duplicates", "find_duplicates"),
RouteDefinition("POST", "/api/lm/recipes/bulk-delete", "bulk_delete"),
RouteDefinition("POST", "/api/lm/recipes/save-from-widget", "save_recipe_from_widget"),
RouteDefinition("GET", "/api/lm/recipes/for-lora", "get_recipes_for_lora"),
RouteDefinition("GET", "/api/lm/recipes/scan", "scan_recipes"),
)
class RecipeRouteRegistrar:
"""Bind declarative recipe definitions to an aiohttp router."""
_METHOD_MAP = {
"GET": "add_get",
"POST": "add_post",
"PUT": "add_put",
"DELETE": "add_delete",
}
def __init__(self, app: web.Application) -> None:
self._app = app
def register_routes(self, handler_lookup: Mapping[str, Callable[[web.Request], object]]) -> None:
for definition in ROUTE_DEFINITIONS:
handler = handler_lookup[definition.handler_name]
self._bind_route(definition.method, definition.path, handler)
def _bind_route(self, method: str, path: str, handler: Callable) -> None:
add_method_name = self._METHOD_MAP[method.upper()]
add_method = getattr(self._app.router, add_method_name)
add_method(path, handler)

File diff suppressed because it is too large Load Diff

View File

@@ -8,12 +8,32 @@ from collections import defaultdict, Counter
from typing import Dict, List, Any
from ..config import config
from ..services.settings_manager import settings
from ..services.settings_manager import get_settings_manager
from ..services.server_i18n import server_i18n
from ..services.service_registry import ServiceRegistry
from ..utils.usage_stats import UsageStats
logger = logging.getLogger(__name__)
class _SettingsProxy:
def __init__(self):
self._manager = None
def _resolve(self):
if self._manager is None:
self._manager = get_settings_manager()
return self._manager
def get(self, *args, **kwargs):
return self._resolve().get(*args, **kwargs)
def __getattr__(self, item):
return getattr(self._resolve(), item)
settings = _SettingsProxy()
class StatsRoutes:
"""Route handlers for Statistics page and API endpoints"""
@@ -32,7 +52,13 @@ class StatsRoutes:
self.lora_scanner = await ServiceRegistry.get_lora_scanner()
self.checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
self.embedding_scanner = await ServiceRegistry.get_embedding_scanner()
self.usage_stats = UsageStats()
# Only initialize usage stats if we have valid paths configured
try:
self.usage_stats = UsageStats()
except RuntimeError as e:
logger.warning(f"Could not initialize usage statistics: {e}")
self.usage_stats = None
async def handle_stats_page(self, request: web.Request) -> web.Response:
"""Handle GET /statistics request"""
@@ -58,11 +84,25 @@ class StatsRoutes:
is_initializing = lora_initializing or checkpoint_initializing or embedding_initializing
# 获取用户语言设置
settings_object = settings
user_language = settings_object.get('language', 'en')
settings_manager = settings_object if not isinstance(settings_object, _SettingsProxy) else settings_object._resolve()
# 设置服务端i18n语言
server_i18n.set_locale(user_language)
# 为模板环境添加i18n过滤器
if not hasattr(self.template_env, '_i18n_filter_added'):
self.template_env.filters['t'] = server_i18n.create_template_filter()
self.template_env._i18n_filter_added = True
template = self.template_env.get_template('statistics.html')
rendered = template.render(
is_initializing=is_initializing,
settings=settings,
request=request
settings=settings_manager,
request=request,
t=server_i18n.get_translation,
)
return web.Response(
@@ -488,12 +528,12 @@ class StatsRoutes:
app.router.add_get('/statistics', self.handle_stats_page)
# Register API routes
app.router.add_get('/api/stats/collection-overview', self.get_collection_overview)
app.router.add_get('/api/stats/usage-analytics', self.get_usage_analytics)
app.router.add_get('/api/stats/base-model-distribution', self.get_base_model_distribution)
app.router.add_get('/api/stats/tag-analytics', self.get_tag_analytics)
app.router.add_get('/api/stats/storage-analytics', self.get_storage_analytics)
app.router.add_get('/api/stats/insights', self.get_insights)
app.router.add_get('/api/lm/stats/collection-overview', self.get_collection_overview)
app.router.add_get('/api/lm/stats/usage-analytics', self.get_usage_analytics)
app.router.add_get('/api/lm/stats/base-model-distribution', self.get_base_model_distribution)
app.router.add_get('/api/lm/stats/tag-analytics', self.get_tag_analytics)
app.router.add_get('/api/lm/stats/storage-analytics', self.get_storage_analytics)
app.router.add_get('/api/lm/stats/insights', self.get_insights)
async def _on_startup(self, app):
"""Initialize services when the app starts"""

View File

@@ -1,26 +1,31 @@
import os
import aiohttp
import logging
import toml
import git
import zipfile
import shutil
import tempfile
from aiohttp import web
import asyncio
from aiohttp import web, ClientError
from typing import Dict, List
from ..utils.settings_paths import ensure_settings_file
from ..services.downloader import get_downloader
logger = logging.getLogger(__name__)
NETWORK_EXCEPTIONS = (ClientError, OSError, asyncio.TimeoutError)
class UpdateRoutes:
"""Routes for handling plugin update checks"""
@staticmethod
def setup_routes(app):
"""Register update check routes"""
app.router.add_get('/api/check-updates', UpdateRoutes.check_updates)
app.router.add_get('/api/version-info', UpdateRoutes.get_version_info)
app.router.add_post('/api/perform-update', UpdateRoutes.perform_update)
app.router.add_get('/api/lm/check-updates', UpdateRoutes.check_updates)
app.router.add_get('/api/lm/version-info', UpdateRoutes.get_version_info)
app.router.add_post('/api/lm/perform-update', UpdateRoutes.perform_update)
@staticmethod
async def check_updates(request):
@@ -64,6 +69,12 @@ class UpdateRoutes:
'nightly': nightly
})
except NETWORK_EXCEPTIONS as e:
logger.warning("Network unavailable during update check: %s", e)
return web.json_response({
'success': False,
'error': 'Network unavailable for update check'
})
except Exception as e:
logger.error(f"Failed to check for updates: {e}", exc_info=True)
return web.json_response({
@@ -112,7 +123,7 @@ class UpdateRoutes:
current_dir = os.path.dirname(os.path.abspath(__file__))
plugin_root = os.path.dirname(os.path.dirname(current_dir))
settings_path = os.path.join(plugin_root, 'settings.json')
settings_path = ensure_settings_file(logger)
settings_backup = None
if os.path.exists(settings_path):
with open(settings_path, 'r', encoding='utf-8') as f:
@@ -155,51 +166,66 @@ class UpdateRoutes:
async def _download_and_replace_zip(plugin_root: str) -> tuple[bool, str]:
"""
Download latest release ZIP from GitHub and replace plugin files.
Skips settings.json. Writes extracted file list to .tracking.
Skips settings.json and civitai folder. Writes extracted file list to .tracking.
"""
repo_owner = "willmiao"
repo_name = "ComfyUI-Lora-Manager"
github_api = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
try:
async with aiohttp.ClientSession() as session:
async with session.get(github_api) as resp:
if resp.status != 200:
logger.error(f"Failed to fetch release info: {resp.status}")
return False, ""
data = await resp.json()
zip_url = data.get("zipball_url")
version = data.get("tag_name", "unknown")
downloader = await get_downloader()
# Get release info
success, data = await downloader.make_request(
'GET',
github_api,
use_auth=False
)
if not success:
logger.error(f"Failed to fetch release info: {data}")
return False, ""
zip_url = data.get("zipball_url")
version = data.get("tag_name", "unknown")
# Download ZIP
async with session.get(zip_url) as zip_resp:
if zip_resp.status != 200:
logger.error(f"Failed to download ZIP: {zip_resp.status}")
return False, ""
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
tmp_zip.write(await zip_resp.read())
zip_path = tmp_zip.name
# Download ZIP to temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".zip") as tmp_zip:
tmp_zip_path = tmp_zip.name
success, result = await downloader.download_file(
url=zip_url,
save_path=tmp_zip_path,
use_auth=False,
allow_resume=False
)
if not success:
logger.error(f"Failed to download ZIP: {result}")
return False, ""
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json'])
zip_path = tmp_zip_path
# Extract ZIP to temp dir
with tempfile.TemporaryDirectory() as tmp_dir:
# Skip both settings.json and civitai folder
UpdateRoutes._clean_plugin_folder(plugin_root, skip_files=['settings.json', 'civitai'])
# Extract ZIP to temp dir
with tempfile.TemporaryDirectory() as tmp_dir:
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(tmp_dir)
# Find extracted folder (GitHub ZIP contains a root folder)
extracted_root = next(os.scandir(tmp_dir)).path
# Copy files, skipping settings.json
# Copy files, skipping settings.json and civitai folder
for item in os.listdir(extracted_root):
if item == 'settings.json' or item == 'civitai':
continue
src = os.path.join(extracted_root, item)
dst = os.path.join(plugin_root, item)
if os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json'))
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('settings.json', 'civitai'))
else:
if item == 'settings.json':
continue
shutil.copy2(src, dst)
# Write .tracking file: list all files under extracted_root, relative to extracted_root
@@ -207,15 +233,22 @@ class UpdateRoutes:
tracking_info_file = os.path.join(plugin_root, '.tracking')
tracking_files = []
for root, dirs, files in os.walk(extracted_root):
# Skip civitai folder and its contents
rel_root = os.path.relpath(root, extracted_root)
if rel_root == 'civitai' or rel_root.startswith('civitai' + os.sep):
continue
for file in files:
rel_path = os.path.relpath(os.path.join(root, file), extracted_root)
# Skip settings.json and any file under civitai
if rel_path == 'settings.json' or rel_path.startswith('civitai' + os.sep):
continue
tracking_files.append(rel_path.replace("\\", "/"))
with open(tracking_info_file, "w", encoding='utf-8') as file:
file.write('\n'.join(tracking_files))
os.remove(zip_path)
logger.info(f"Updated plugin via ZIP to {version}")
return True, version
os.remove(zip_path)
logger.info(f"Updated plugin via ZIP to {version}")
return True, version
except Exception as e:
logger.error(f"ZIP update failed: {e}", exc_info=True)
@@ -244,24 +277,27 @@ class UpdateRoutes:
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/commits/main"
try:
async with aiohttp.ClientSession() as session:
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
if response.status != 200:
logger.warning(f"Failed to fetch GitHub commit: {response.status}")
return "main", []
data = await response.json()
commit_sha = data.get('sha', '')[:7] # Short hash
commit_message = data.get('commit', {}).get('message', '')
# Format as "main-{short_hash}"
version = f"main-{commit_sha}"
# Use commit message as changelog
changelog = [commit_message] if commit_message else []
return version, changelog
downloader = await get_downloader()
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
if not success:
logger.warning(f"Failed to fetch GitHub commit: {data}")
return "main", []
commit_sha = data.get('sha', '')[:7] # Short hash
commit_message = data.get('commit', {}).get('message', '')
# Format as "main-{short_hash}"
version = f"main-{commit_sha}"
# Use commit message as changelog
changelog = [commit_message] if commit_message else []
return version, changelog
except NETWORK_EXCEPTIONS as e:
logger.warning("Unable to reach GitHub for nightly version: %s", e)
return "main", []
except Exception as e:
logger.error(f"Error fetching nightly version: {e}", exc_info=True)
return "main", []
@@ -410,23 +446,26 @@ class UpdateRoutes:
github_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
try:
async with aiohttp.ClientSession() as session:
async with session.get(github_url, headers={'Accept': 'application/vnd.github+json'}) as response:
if response.status != 200:
logger.warning(f"Failed to fetch GitHub release: {response.status}")
return "v0.0.0", []
data = await response.json()
version = data.get('tag_name', '')
if not version.startswith('v'):
version = f"v{version}"
# Extract changelog from release notes
body = data.get('body', '')
changelog = UpdateRoutes._parse_changelog(body)
return version, changelog
downloader = await get_downloader()
success, data = await downloader.make_request('GET', github_url, custom_headers={'Accept': 'application/vnd.github+json'})
if not success:
logger.warning(f"Failed to fetch GitHub release: {data}")
return "v0.0.0", []
version = data.get('tag_name', '')
if not version.startswith('v'):
version = f"v{version}"
# Extract changelog from release notes
body = data.get('body', '')
changelog = UpdateRoutes._parse_changelog(body)
return version, changelog
except NETWORK_EXCEPTIONS as e:
logger.warning("Unable to reach GitHub for release info: %s", e)
return "v0.0.0", []
except Exception as e:
logger.error(f"Error fetching remote version: {e}", exc_info=True)
return "v0.0.0", []

View File

@@ -4,98 +4,89 @@ import logging
import os
from ..utils.models import BaseModelMetadata
from ..utils.constants import NSFW_LEVELS
from .settings_manager import settings
from ..utils.utils import fuzzy_match
from ..utils.metadata_manager import MetadataManager
from .model_query import FilterCriteria, ModelCacheRepository, ModelFilterSet, SearchStrategy, SettingsProvider
from .settings_manager import get_settings_manager
logger = logging.getLogger(__name__)
class BaseModelService(ABC):
"""Base service class for all model types"""
def __init__(self, model_type: str, scanner, metadata_class: Type[BaseModelMetadata]):
"""Initialize the service
def __init__(
self,
model_type: str,
scanner,
metadata_class: Type[BaseModelMetadata],
*,
cache_repository: Optional[ModelCacheRepository] = None,
filter_set: Optional[ModelFilterSet] = None,
search_strategy: Optional[SearchStrategy] = None,
settings_provider: Optional[SettingsProvider] = None,
):
"""Initialize the service.
Args:
model_type: Type of model (lora, checkpoint, etc.)
scanner: Model scanner instance
metadata_class: Metadata class for this model type
model_type: Type of model (lora, checkpoint, etc.).
scanner: Model scanner instance.
metadata_class: Metadata class for this model type.
cache_repository: Custom repository for cache access (primarily for tests).
filter_set: Filter component controlling folder/tag/favorites logic.
search_strategy: Search component for fuzzy/text matching.
settings_provider: Settings object; defaults to the global settings manager.
"""
self.model_type = model_type
self.scanner = scanner
self.metadata_class = metadata_class
self.settings = settings_provider or get_settings_manager()
self.cache_repository = cache_repository or ModelCacheRepository(scanner)
self.filter_set = filter_set or ModelFilterSet(self.settings)
self.search_strategy = search_strategy or SearchStrategy()
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'name',
folder: str = None, search: str = None, fuzzy_search: bool = False,
base_models: list = None, tags: list = None,
search_options: dict = None, hash_filters: dict = None,
favorites_only: bool = False, **kwargs) -> Dict:
"""Get paginated and filtered model data
Args:
page: Page number (1-based)
page_size: Number of items per page
sort_by: Sort criteria, e.g. 'name', 'name:asc', 'name:desc', 'date', 'date:asc', 'date:desc'
folder: Folder filter
search: Search term
fuzzy_search: Whether to use fuzzy search
base_models: List of base models to filter by
tags: List of tags to filter by
search_options: Search options dict
hash_filters: Hash filtering options
favorites_only: Filter for favorites only
**kwargs: Additional model-specific filters
Returns:
Dict containing paginated results
"""
cache = await self.scanner.get_cached_data()
async def get_paginated_data(
self,
page: int,
page_size: int,
sort_by: str = 'name',
folder: str = None,
search: str = None,
fuzzy_search: bool = False,
base_models: list = None,
tags: list = None,
search_options: dict = None,
hash_filters: dict = None,
favorites_only: bool = False,
**kwargs,
) -> Dict:
"""Get paginated and filtered model data"""
sort_params = self.cache_repository.parse_sort(sort_by)
sorted_data = await self.cache_repository.fetch_sorted(sort_params)
# Parse sort_by into sort_key and order
if ':' in sort_by:
sort_key, order = sort_by.split(':', 1)
sort_key = sort_key.strip()
order = order.strip().lower()
if order not in ('asc', 'desc'):
order = 'asc'
else:
sort_key = sort_by.strip()
order = 'asc'
# Get default search options if not provided
if search_options is None:
search_options = {
'filename': True,
'modelname': True,
'tags': False,
'recursive': False,
}
# Get the base data set using new sort logic
filtered_data = await cache.get_sorted_data(sort_key, order)
# Apply hash filtering if provided (highest priority)
if hash_filters:
filtered_data = await self._apply_hash_filters(filtered_data, hash_filters)
# Jump to pagination for hash filters
filtered_data = await self._apply_hash_filters(sorted_data, hash_filters)
return self._paginate(filtered_data, page, page_size)
# Apply common filters
filtered_data = await self._apply_common_filters(
filtered_data, folder, base_models, tags, favorites_only, search_options
sorted_data,
folder=folder,
base_models=base_models,
tags=tags,
favorites_only=favorites_only,
search_options=search_options,
)
# Apply search filtering
if search:
filtered_data = await self._apply_search_filters(
filtered_data, search, fuzzy_search, search_options
filtered_data,
search,
fuzzy_search,
search_options,
)
# Apply model-specific filters
filtered_data = await self._apply_specific_filters(filtered_data, **kwargs)
return self._paginate(filtered_data, page, page_size)
async def _apply_hash_filters(self, data: List[Dict], hash_filters: Dict) -> List[Dict]:
"""Apply hash-based filtering"""
@@ -119,105 +110,36 @@ class BaseModelService(ABC):
return data
async def _apply_common_filters(self, data: List[Dict], folder: str = None,
base_models: list = None, tags: list = None,
favorites_only: bool = False, search_options: dict = None) -> List[Dict]:
async def _apply_common_filters(
self,
data: List[Dict],
folder: str = None,
base_models: list = None,
tags: list = None,
favorites_only: bool = False,
search_options: dict = None,
) -> List[Dict]:
"""Apply common filters that work across all model types"""
# Apply SFW filtering if enabled in settings
if settings.get('show_only_sfw', False):
data = [
item for item in data
if not item.get('preview_nsfw_level') or item.get('preview_nsfw_level') < NSFW_LEVELS['R']
]
# Apply favorites filtering if enabled
if favorites_only:
data = [
item for item in data
if item.get('favorite', False) is True
]
# Apply folder filtering
if folder is not None:
if search_options and search_options.get('recursive', False):
# Recursive folder filtering - include all subfolders
data = [
item for item in data
if item['folder'].startswith(folder)
]
else:
# Exact folder filtering
data = [
item for item in data
if item['folder'] == folder
]
# Apply base model filtering
if base_models and len(base_models) > 0:
data = [
item for item in data
if item.get('base_model') in base_models
]
# Apply tag filtering
if tags and len(tags) > 0:
data = [
item for item in data
if any(tag in item.get('tags', []) for tag in tags)
]
return data
normalized_options = self.search_strategy.normalize_options(search_options)
criteria = FilterCriteria(
folder=folder,
base_models=base_models,
tags=tags,
favorites_only=favorites_only,
search_options=normalized_options,
)
return self.filter_set.apply(data, criteria)
async def _apply_search_filters(self, data: List[Dict], search: str,
fuzzy_search: bool, search_options: dict) -> List[Dict]:
async def _apply_search_filters(
self,
data: List[Dict],
search: str,
fuzzy_search: bool,
search_options: dict,
) -> List[Dict]:
"""Apply search filtering"""
search_results = []
for item in data:
# Search by file name
if search_options.get('filename', True):
if fuzzy_search:
if fuzzy_match(item.get('file_name', ''), search):
search_results.append(item)
continue
elif search.lower() in item.get('file_name', '').lower():
search_results.append(item)
continue
# Search by model name
if search_options.get('modelname', True):
if fuzzy_search:
if fuzzy_match(item.get('model_name', ''), search):
search_results.append(item)
continue
elif search.lower() in item.get('model_name', '').lower():
search_results.append(item)
continue
# Search by tags
if search_options.get('tags', False) and 'tags' in item:
if any((fuzzy_match(tag, search) if fuzzy_search else search.lower() in tag.lower())
for tag in item['tags']):
search_results.append(item)
continue
# Search by creator
civitai = item.get('civitai')
creator_username = ''
if civitai and isinstance(civitai, dict):
creator = civitai.get('creator')
if creator and isinstance(creator, dict):
creator_username = creator.get('username', '')
if search_options.get('creator', False) and creator_username:
if fuzzy_search:
if fuzzy_match(creator_username, search):
search_results.append(item)
continue
elif search.lower() in creator_username.lower():
search_results.append(item)
continue
return search_results
normalized_options = self.search_strategy.normalize_options(search_options)
return self.search_strategy.apply(data, search, normalized_options, fuzzy_search)
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:
"""Apply model-specific filters - to be overridden by subclasses if needed"""
@@ -275,6 +197,18 @@ class BaseModelService(ABC):
"""Get model root directories"""
return self.scanner.get_model_roots()
def filter_civitai_data(self, data: Dict, minimal: bool = False) -> Dict:
"""Filter relevant fields from CivitAI data"""
if not data:
return {}
fields = ["id", "modelId", "name", "trainedWords"] if minimal else [
"id", "modelId", "name", "createdAt", "updatedAt",
"publishedAt", "trainedWords", "baseModel", "description",
"model", "images", "customImages", "creator"
]
return {k: data[k] for k in fields if k in data}
async def get_folder_tree(self, model_root: str) -> Dict:
"""Get hierarchical folder tree for a specific model root"""
cache = await self.scanner.get_cached_data()
@@ -354,7 +288,7 @@ class BaseModelService(ABC):
from ..config import config
return config.get_preview_static_url(preview_url)
return None
return '/loras_static/images/no-preview.png'
async def get_model_civitai_url(self, model_name: str) -> Dict[str, Optional[str]]:
"""Get the Civitai URL for a model file"""
@@ -379,6 +313,26 @@ class BaseModelService(ABC):
return {'civitai_url': None, 'model_id': None, 'version_id': None}
async def get_model_metadata(self, file_path: str) -> Optional[Dict]:
"""Load full metadata for a single model.
Listing/search endpoints return lightweight cache entries; this method performs
a lazy read of the on-disk metadata snapshot when callers need full detail.
"""
metadata, should_skip = await MetadataManager.load_metadata(file_path, self.metadata_class)
if should_skip or metadata is None:
return None
return self.filter_civitai_data(metadata.to_dict().get("civitai", {}))
async def get_model_description(self, file_path: str) -> Optional[str]:
"""Return the stored modelDescription field for a model."""
metadata, should_skip = await MetadataManager.load_metadata(file_path, self.metadata_class)
if should_skip or metadata is None:
return None
return metadata.modelDescription or ''
async def search_relative_paths(self, search_term: str, limit: int = 15) -> List[str]:
"""Search model relative file paths for autocomplete functionality"""
cache = await self.scanner.get_cached_data()

View File

@@ -1,11 +1,10 @@
import os
import logging
from typing import Dict, List, Optional
from typing import Dict
from .base_model_service import BaseModelService
from ..utils.models import CheckpointMetadata
from ..config import config
from ..utils.routes_common import ModelRouteUtils
logger = logging.getLogger(__name__)
@@ -34,12 +33,11 @@ class CheckpointService(BaseModelService):
"file_size": checkpoint_data.get("size", 0),
"modified": checkpoint_data.get("modified", ""),
"tags": checkpoint_data.get("tags", []),
"modelDescription": checkpoint_data.get("modelDescription", ""),
"from_civitai": checkpoint_data.get("from_civitai", True),
"notes": checkpoint_data.get("notes", ""),
"model_type": checkpoint_data.get("model_type", "checkpoint"),
"favorite": checkpoint_data.get("favorite", False),
"civitai": ModelRouteUtils.filter_civitai_data(checkpoint_data.get("civitai", {}))
"civitai": self.filter_civitai_data(checkpoint_data.get("civitai", {}), minimal=True)
}
def find_duplicate_hashes(self) -> Dict:

View File

@@ -1,11 +1,10 @@
from datetime import datetime
import aiohttp
import os
import logging
import asyncio
from email.parser import Parser
import copy
import logging
import os
from typing import Optional, Dict, Tuple, List
from urllib.parse import unquote
from .model_metadata_provider import CivitaiModelMetadataProvider, ModelMetadataProviderManager
from .downloader import get_downloader
logger = logging.getLogger(__name__)
@@ -19,6 +18,11 @@ class CivitaiClient:
async with cls._lock:
if cls._instance is None:
cls._instance = cls()
# Register this client as a metadata provider
provider_manager = await ModelMetadataProviderManager.get_instance()
provider_manager.register_provider('civitai', CivitaiModelMetadataProvider(cls._instance), True)
return cls._instance
def __init__(self):
@@ -28,80 +32,26 @@ class CivitaiClient:
self._initialized = True
self.base_url = "https://civitai.com/api/v1"
self.headers = {
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
}
self._session = None
self._session_created_at = None
# Adjust chunk size based on storage type - consider making this configurable
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better HDD throughput
@staticmethod
def _remove_comfy_metadata(model_version: Optional[Dict]) -> None:
"""Remove Comfy-specific metadata from model version images."""
if not isinstance(model_version, dict):
return
images = model_version.get("images")
if not isinstance(images, list):
return
for image in images:
if not isinstance(image, dict):
continue
meta = image.get("meta")
if isinstance(meta, dict) and "comfy" in meta:
meta.pop("comfy", None)
@property
async def session(self) -> aiohttp.ClientSession:
"""Lazy initialize the session"""
if self._session is None:
# Optimize TCP connection parameters
connector = aiohttp.TCPConnector(
ssl=True,
limit=8, # Increase from 3 to 8 for better parallelism
ttl_dns_cache=300, # Enable DNS caching with reasonable timeout
force_close=False, # Keep connections for reuse
enable_cleanup_closed=True
)
trust_env = True # Allow using system environment proxy settings
# Configure timeout parameters - increase read timeout for large files and remove sock_read timeout
timeout = aiohttp.ClientTimeout(total=None, connect=60, sock_read=None)
self._session = aiohttp.ClientSession(
connector=connector,
trust_env=trust_env,
timeout=timeout
)
self._session_created_at = datetime.now()
return self._session
async def _ensure_fresh_session(self):
"""Refresh session if it's been open too long"""
if self._session is not None:
if not hasattr(self, '_session_created_at') or \
(datetime.now() - self._session_created_at).total_seconds() > 300: # 5 minutes
await self.close()
self._session = None
return await self.session
def _parse_content_disposition(self, header: str) -> str:
"""Parse filename from content-disposition header"""
if not header:
return None
# Handle quoted filenames
if 'filename="' in header:
start = header.index('filename="') + 10
end = header.index('"', start)
return unquote(header[start:end])
# Fallback to original parsing
disposition = Parser().parsestr(f'Content-Disposition: {header}')
filename = disposition.get_param('filename')
if filename:
return unquote(filename)
return None
def _get_request_headers(self) -> dict:
"""Get request headers with optional API key"""
headers = {
'User-Agent': 'ComfyUI-LoRA-Manager/1.0',
'Content-Type': 'application/json'
}
from .settings_manager import settings
api_key = settings.get('civitai_api_key')
if (api_key):
headers['Authorization'] = f'Bearer {api_key}'
return headers
async def _download_file(self, url: str, save_dir: str, default_filename: str, progress_callback=None) -> Tuple[bool, str]:
async def download_file(self, url: str, save_dir: str, default_filename: str, progress_callback=None) -> Tuple[bool, str]:
"""Download file with resumable downloads and retry mechanism
Args:
@@ -113,316 +63,254 @@ class CivitaiClient:
Returns:
Tuple[bool, str]: (success, save_path or error message)
"""
max_retries = 5
retry_count = 0
base_delay = 2.0 # Base delay for exponential backoff
# Initial setup
session = await self._ensure_fresh_session()
downloader = await get_downloader()
save_path = os.path.join(save_dir, default_filename)
part_path = save_path + '.part'
# Get existing file size for resume
resume_offset = 0
if os.path.exists(part_path):
resume_offset = os.path.getsize(part_path)
logger.info(f"Resuming download from offset {resume_offset} bytes")
# Use unified downloader with CivitAI authentication
success, result = await downloader.download_file(
url=url,
save_path=save_path,
progress_callback=progress_callback,
use_auth=True, # Enable CivitAI authentication
allow_resume=True
)
total_size = 0
filename = default_filename
while retry_count <= max_retries:
try:
headers = self._get_request_headers()
# Add Range header for resume if we have partial data
if resume_offset > 0:
headers['Range'] = f'bytes={resume_offset}-'
# Add Range header to allow resumable downloads
headers['Accept-Encoding'] = 'identity' # Disable compression for better chunked downloads
logger.debug(f"Download attempt {retry_count + 1}/{max_retries + 1} from: {url}")
if resume_offset > 0:
logger.debug(f"Requesting range from byte {resume_offset}")
async with session.get(url, headers=headers, allow_redirects=True) as response:
# Handle different response codes
if response.status == 200:
# Full content response
if resume_offset > 0:
# Server doesn't support ranges, restart from beginning
logger.warning("Server doesn't support range requests, restarting download")
resume_offset = 0
if os.path.exists(part_path):
os.remove(part_path)
elif response.status == 206:
# Partial content response (resume successful)
content_range = response.headers.get('Content-Range')
if content_range:
# Parse total size from Content-Range header (e.g., "bytes 1024-2047/2048")
range_parts = content_range.split('/')
if len(range_parts) == 2:
total_size = int(range_parts[1])
logger.info(f"Successfully resumed download from byte {resume_offset}")
elif response.status == 416:
# Range not satisfiable - file might be complete or corrupted
if os.path.exists(part_path):
part_size = os.path.getsize(part_path)
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
# Try to get actual file size
head_response = await session.head(url, headers=self._get_request_headers())
if head_response.status == 200:
actual_size = int(head_response.headers.get('content-length', 0))
if part_size == actual_size:
# File is complete, just rename it
os.rename(part_path, save_path)
if progress_callback:
await progress_callback(100)
return True, save_path
# Remove corrupted part file and restart
os.remove(part_path)
resume_offset = 0
continue
elif response.status == 401:
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
return False, "Invalid or missing CivitAI API key, or early access restriction."
elif response.status == 403:
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
return False, "Access forbidden: You don't have permission to download this file."
else:
logger.error(f"Download failed for {url} with status {response.status}")
return False, f"Download failed with status {response.status}"
return success, result
# Get filename from content-disposition header (only on first attempt)
if retry_count == 0:
content_disposition = response.headers.get('Content-Disposition')
parsed_filename = self._parse_content_disposition(content_disposition)
if parsed_filename:
filename = parsed_filename
# Update paths with correct filename
save_path = os.path.join(save_dir, filename)
new_part_path = save_path + '.part'
# Rename existing part file if filename changed
if part_path != new_part_path and os.path.exists(part_path):
os.rename(part_path, new_part_path)
part_path = new_part_path
# Get total file size for progress calculation (if not set from Content-Range)
if total_size == 0:
total_size = int(response.headers.get('content-length', 0))
if response.status == 206:
# For partial content, add the offset to get total file size
total_size += resume_offset
current_size = resume_offset
last_progress_report_time = datetime.now()
# Stream download to file with progress updates using larger buffer
loop = asyncio.get_running_loop()
mode = 'ab' if resume_offset > 0 else 'wb'
with open(part_path, mode) as f:
async for chunk in response.content.iter_chunked(self.chunk_size):
if chunk:
# Run blocking file write in executor
await loop.run_in_executor(None, f.write, chunk)
current_size += len(chunk)
# Limit progress update frequency to reduce overhead
now = datetime.now()
time_diff = (now - last_progress_report_time).total_seconds()
if progress_callback and total_size and time_diff >= 1.0:
progress = (current_size / total_size) * 100
await progress_callback(progress)
last_progress_report_time = now
# Download completed successfully
# Verify file size if total_size was provided
final_size = os.path.getsize(part_path)
if total_size > 0 and final_size != total_size:
logger.warning(f"File size mismatch. Expected: {total_size}, Got: {final_size}")
# Don't treat this as fatal error, rename anyway
# Atomically rename .part to final file with retries
max_rename_attempts = 5
rename_attempt = 0
rename_success = False
while rename_attempt < max_rename_attempts and not rename_success:
try:
os.rename(part_path, save_path)
rename_success = True
except PermissionError as e:
rename_attempt += 1
if rename_attempt < max_rename_attempts:
logger.info(f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})")
await asyncio.sleep(2) # Wait before retrying
else:
logger.error(f"Failed to rename file after {max_rename_attempts} attempts: {e}")
return False, f"Failed to finalize download: {str(e)}"
# Ensure 100% progress is reported
if progress_callback:
await progress_callback(100)
return True, save_path
except (aiohttp.ClientError, aiohttp.ClientPayloadError,
aiohttp.ServerDisconnectedError, asyncio.TimeoutError) as e:
retry_count += 1
logger.warning(f"Network error during download (attempt {retry_count}/{max_retries + 1}): {e}")
if retry_count <= max_retries:
# Calculate delay with exponential backoff
delay = base_delay * (2 ** (retry_count - 1))
logger.info(f"Retrying in {delay} seconds...")
await asyncio.sleep(delay)
# Update resume offset for next attempt
if os.path.exists(part_path):
resume_offset = os.path.getsize(part_path)
logger.info(f"Will resume from byte {resume_offset}")
# Refresh session to get new connection
await self.close()
session = await self._ensure_fresh_session()
continue
else:
logger.error(f"Max retries exceeded for download: {e}")
return False, f"Network error after {max_retries + 1} attempts: {str(e)}"
except Exception as e:
logger.error(f"Unexpected download error: {e}")
return False, str(e)
return False, f"Download failed after {max_retries + 1} attempts"
async def get_model_by_hash(self, model_hash: str) -> Optional[Dict]:
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
try:
session = await self._ensure_fresh_session()
async with session.get(f"{self.base_url}/model-versions/by-hash/{model_hash}") as response:
if response.status == 200:
return await response.json()
return None
downloader = await get_downloader()
success, result = await downloader.make_request(
'GET',
f"{self.base_url}/model-versions/by-hash/{model_hash}",
use_auth=True
)
if success:
# Get model ID from version data
model_id = result.get('modelId')
if model_id:
# Fetch additional model metadata
success_model, data = await downloader.make_request(
'GET',
f"{self.base_url}/models/{model_id}",
use_auth=True
)
if success_model:
# Enrich version_info with model data
result['model']['description'] = data.get("description")
result['model']['tags'] = data.get("tags", [])
# Add creator from model data
result['creator'] = data.get("creator")
self._remove_comfy_metadata(result)
return result, None
# Handle specific error cases
if "not found" in str(result):
return None, "Model not found"
# Other error cases
logger.error(f"Failed to fetch model info for {model_hash[:10]}: {result}")
return None, str(result)
except Exception as e:
logger.error(f"API Error: {str(e)}")
return None
return None, str(e)
async def download_preview_image(self, image_url: str, save_path: str):
try:
session = await self._ensure_fresh_session()
async with session.get(image_url) as response:
if response.status == 200:
content = await response.read()
with open(save_path, 'wb') as f:
f.write(content)
return True
return False
downloader = await get_downloader()
success, content, headers = await downloader.download_to_memory(
image_url,
use_auth=False # Preview images don't need auth
)
if success:
# Ensure directory exists
os.makedirs(os.path.dirname(save_path), exist_ok=True)
with open(save_path, 'wb') as f:
f.write(content)
return True
return False
except Exception as e:
print(f"Download Error: {str(e)}")
logger.error(f"Download Error: {str(e)}")
return False
async def get_model_versions(self, model_id: str) -> List[Dict]:
"""Get all versions of a model with local availability info"""
try:
session = await self._ensure_fresh_session() # Use fresh session
async with session.get(f"{self.base_url}/models/{model_id}") as response:
if response.status != 200:
return None
data = await response.json()
downloader = await get_downloader()
success, result = await downloader.make_request(
'GET',
f"{self.base_url}/models/{model_id}",
use_auth=True
)
if success:
# Also return model type along with versions
return {
'modelVersions': data.get('modelVersions', []),
'type': data.get('type', '')
'modelVersions': result.get('modelVersions', []),
'type': result.get('type', ''),
'name': result.get('name', '')
}
return None
except Exception as e:
logger.error(f"Error fetching model versions: {e}")
return None
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
"""Get specific model version with additional metadata
Args:
model_id: The Civitai model ID (optional if version_id is provided)
version_id: Optional specific version ID to retrieve
Returns:
Optional[Dict]: The model version data with additional fields or None if not found
"""
"""Get specific model version with additional metadata."""
try:
session = await self._ensure_fresh_session()
headers = self._get_request_headers()
# Case 1: Only version_id is provided
downloader = await get_downloader()
if model_id is None and version_id is not None:
# First get the version info to extract model_id
async with session.get(f"{self.base_url}/model-versions/{version_id}", headers=headers) as response:
if response.status != 200:
return None
version = await response.json()
model_id = version.get('modelId')
if not model_id:
logger.error(f"No modelId found in version {version_id}")
return None
# Now get the model data for additional metadata
async with session.get(f"{self.base_url}/models/{model_id}") as response:
if response.status != 200:
return version # Return version without additional metadata
model_data = await response.json()
# Enrich version with model data
version['model']['description'] = model_data.get("description")
version['model']['tags'] = model_data.get("tags", [])
version['creator'] = model_data.get("creator")
return version
# Case 2: model_id is provided (with or without version_id)
elif model_id is not None:
# Step 1: Get model data to find version_id if not provided and get additional metadata
async with session.get(f"{self.base_url}/models/{model_id}") as response:
if response.status != 200:
return None
data = await response.json()
model_versions = data.get('modelVersions', [])
# Step 2: Determine the version_id to use
target_version_id = version_id
if target_version_id is None:
target_version_id = model_versions[0].get('id')
# Step 3: Get detailed version info using the version_id
async with session.get(f"{self.base_url}/model-versions/{target_version_id}", headers=headers) as response:
if response.status != 200:
return None
version = await response.json()
# Step 4: Enrich version_info with model data
# Add description and tags from model data
version['model']['description'] = data.get("description")
version['model']['tags'] = data.get("tags", [])
# Add creator from model data
version['creator'] = data.get("creator")
return version
# Case 3: Neither model_id nor version_id provided
else:
logger.error("Either model_id or version_id must be provided")
return None
return await self._get_version_by_id_only(downloader, version_id)
if model_id is not None:
return await self._get_version_with_model_id(downloader, model_id, version_id)
logger.error("Either model_id or version_id must be provided")
return None
except Exception as e:
logger.error(f"Error fetching model version: {e}")
return None
async def _get_version_by_id_only(self, downloader, version_id: int) -> Optional[Dict]:
version = await self._fetch_version_by_id(downloader, version_id)
if version is None:
return None
model_id = version.get('modelId')
if not model_id:
logger.error(f"No modelId found in version {version_id}")
return None
model_data = await self._fetch_model_data(downloader, model_id)
if model_data:
self._enrich_version_with_model_data(version, model_data)
self._remove_comfy_metadata(version)
return version
async def _get_version_with_model_id(self, downloader, model_id: int, version_id: Optional[int]) -> Optional[Dict]:
model_data = await self._fetch_model_data(downloader, model_id)
if not model_data:
return None
target_version = self._select_target_version(model_data, model_id, version_id)
if target_version is None:
return None
target_version_id = target_version.get('id')
version = await self._fetch_version_by_id(downloader, target_version_id) if target_version_id else None
if version is None:
model_hash = self._extract_primary_model_hash(target_version)
if model_hash:
version = await self._fetch_version_by_hash(downloader, model_hash)
else:
logger.warning(
f"No primary model hash found for model {model_id} version {target_version_id}"
)
if version is None:
version = self._build_version_from_model_data(target_version, model_id, model_data)
self._enrich_version_with_model_data(version, model_data)
self._remove_comfy_metadata(version)
return version
async def _fetch_model_data(self, downloader, model_id: int) -> Optional[Dict]:
success, data = await downloader.make_request(
'GET',
f"{self.base_url}/models/{model_id}",
use_auth=True
)
if success:
return data
logger.warning(f"Failed to fetch model data for model {model_id}")
return None
async def _fetch_version_by_id(self, downloader, version_id: Optional[int]) -> Optional[Dict]:
if version_id is None:
return None
success, version = await downloader.make_request(
'GET',
f"{self.base_url}/model-versions/{version_id}",
use_auth=True
)
if success:
return version
logger.warning(f"Failed to fetch version by id {version_id}")
return None
async def _fetch_version_by_hash(self, downloader, model_hash: Optional[str]) -> Optional[Dict]:
if not model_hash:
return None
success, version = await downloader.make_request(
'GET',
f"{self.base_url}/model-versions/by-hash/{model_hash}",
use_auth=True
)
if success:
return version
logger.warning(f"Failed to fetch version by hash {model_hash}")
return None
def _select_target_version(self, model_data: Dict, model_id: int, version_id: Optional[int]) -> Optional[Dict]:
model_versions = model_data.get('modelVersions', [])
if not model_versions:
logger.warning(f"No model versions found for model {model_id}")
return None
if version_id is not None:
target_version = next(
(item for item in model_versions if item.get('id') == version_id),
None
)
if target_version is None:
logger.warning(
f"Version {version_id} not found for model {model_id}, defaulting to first version"
)
return model_versions[0]
return target_version
return model_versions[0]
def _extract_primary_model_hash(self, version_entry: Dict) -> Optional[str]:
for file_info in version_entry.get('files', []):
if file_info.get('type') == 'Model' and file_info.get('primary'):
hashes = file_info.get('hashes', {})
model_hash = hashes.get('SHA256')
if model_hash:
return model_hash
return None
def _build_version_from_model_data(self, version_entry: Dict, model_id: int, model_data: Dict) -> Dict:
version = copy.deepcopy(version_entry)
version.pop('index', None)
version['modelId'] = model_id
version['model'] = {
'name': model_data.get('name'),
'type': model_data.get('type'),
'nsfw': model_data.get('nsfw'),
'poi': model_data.get('poi')
}
return version
def _enrich_version_with_model_data(self, version: Dict, model_data: Dict) -> None:
model_info = version.get('model')
if not isinstance(model_info, dict):
model_info = {}
version['model'] = model_info
model_info['description'] = model_data.get("description")
model_info['tags'] = model_data.get("tags", [])
version['creator'] = model_data.get("creator")
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Fetch model version metadata from Civitai
@@ -435,119 +323,38 @@ class CivitaiClient:
- An error message if there was an error, or None on success
"""
try:
session = await self._ensure_fresh_session()
downloader = await get_downloader()
url = f"{self.base_url}/model-versions/{version_id}"
headers = self._get_request_headers()
logger.debug(f"Resolving DNS for model version info: {url}")
async with session.get(url, headers=headers) as response:
if response.status == 200:
logger.debug(f"Successfully fetched model version info for: {version_id}")
return await response.json(), None
# Handle specific error cases
if response.status == 404:
# Try to parse the error message
try:
error_data = await response.json()
error_msg = error_data.get('error', f"Model not found (status 404)")
logger.warning(f"Model version not found: {version_id} - {error_msg}")
return None, error_msg
except:
return None, "Model not found (status 404)"
# Other error cases
logger.error(f"Failed to fetch model info for {version_id} (status {response.status})")
return None, f"Failed to fetch model info (status {response.status})"
success, result = await downloader.make_request(
'GET',
url,
use_auth=True
)
if success:
logger.debug(f"Successfully fetched model version info for: {version_id}")
self._remove_comfy_metadata(result)
return result, None
# Handle specific error cases
if "not found" in str(result):
error_msg = f"Model not found"
logger.warning(f"Model version not found: {version_id} - {error_msg}")
return None, error_msg
# Other error cases
logger.error(f"Failed to fetch model info for {version_id}: {result}")
return None, str(result)
except Exception as e:
error_msg = f"Error fetching model version info: {e}"
logger.error(error_msg)
return None, error_msg
async def get_model_metadata(self, model_id: str) -> Tuple[Optional[Dict], int]:
"""Fetch model metadata (description, tags, and creator info) from Civitai API
Args:
model_id: The Civitai model ID
Returns:
Tuple[Optional[Dict], int]: A tuple containing:
- A dictionary with model metadata or None if not found
- The HTTP status code from the request
"""
try:
session = await self._ensure_fresh_session()
headers = self._get_request_headers()
url = f"{self.base_url}/models/{model_id}"
async with session.get(url, headers=headers) as response:
status_code = response.status
if status_code != 200:
logger.warning(f"Failed to fetch model metadata: Status {status_code}")
return None, status_code
data = await response.json()
# Extract relevant metadata
metadata = {
"description": data.get("description") or "No model description available",
"tags": data.get("tags", []),
"creator": {
"username": data.get("creator", {}).get("username"),
"image": data.get("creator", {}).get("image")
}
}
if metadata["description"] or metadata["tags"] or metadata["creator"]["username"]:
return metadata, status_code
else:
logger.warning(f"No metadata found for model {model_id}")
return None, status_code
except Exception as e:
logger.error(f"Error fetching model metadata: {e}", exc_info=True)
return None, 0
# Keep old method for backward compatibility, delegating to the new one
async def get_model_description(self, model_id: str) -> Optional[str]:
"""Fetch the model description from Civitai API (Legacy method)"""
metadata, _ = await self.get_model_metadata(model_id)
return metadata.get("description") if metadata else None
async def close(self):
"""Close the session if it exists"""
if self._session is not None:
await self._session.close()
self._session = None
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
"""Get hash from Civitai API"""
try:
session = await self._ensure_fresh_session()
if not session:
return None
version_info = await session.get(f"{self.base_url}/model-versions/{model_version_id}")
if not version_info or not version_info.json().get('files'):
return None
# Get hash from the first file
for file_info in version_info.json().get('files', []):
if file_info.get('hashes', {}).get('SHA256'):
# Convert hash to lowercase to standardize
hash_value = file_info['hashes']['SHA256'].lower()
return hash_value
return None
except Exception as e:
logger.error(f"Error getting hash from Civitai: {e}")
return None
async def get_image_info(self, image_id: str) -> Optional[Dict]:
"""Fetch image information from Civitai API
Args:
image_id: The Civitai image ID
@@ -555,23 +362,60 @@ class CivitaiClient:
Optional[Dict]: The image data or None if not found
"""
try:
session = await self._ensure_fresh_session()
headers = self._get_request_headers()
downloader = await get_downloader()
url = f"{self.base_url}/images?imageId={image_id}&nsfw=X"
logger.debug(f"Fetching image info for ID: {image_id}")
async with session.get(url, headers=headers) as response:
if response.status == 200:
data = await response.json()
if data and "items" in data and len(data["items"]) > 0:
logger.debug(f"Successfully fetched image info for ID: {image_id}")
return data["items"][0]
logger.warning(f"No image found with ID: {image_id}")
return None
logger.error(f"Failed to fetch image info for ID: {image_id} (status {response.status})")
success, result = await downloader.make_request(
'GET',
url,
use_auth=True
)
if success:
if result and "items" in result and len(result["items"]) > 0:
logger.debug(f"Successfully fetched image info for ID: {image_id}")
return result["items"][0]
logger.warning(f"No image found with ID: {image_id}")
return None
logger.error(f"Failed to fetch image info for ID: {image_id}: {result}")
return None
except Exception as e:
error_msg = f"Error fetching image info: {e}"
logger.error(error_msg)
return None
async def get_user_models(self, username: str) -> Optional[List[Dict]]:
"""Fetch all models for a specific Civitai user."""
if not username:
return None
try:
downloader = await get_downloader()
url = f"{self.base_url}/models?username={username}"
success, result = await downloader.make_request(
'GET',
url,
use_auth=True
)
if not success:
logger.error("Failed to fetch models for %s: %s", username, result)
return None
items = result.get("items") if isinstance(result, dict) else None
if not isinstance(items, list):
return []
for model in items:
versions = model.get("modelVersions")
if not isinstance(versions, list):
continue
for version in versions:
self._remove_comfy_metadata(version)
return items
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error fetching models for %s: %s", username, exc)
return None

View File

@@ -0,0 +1,100 @@
"""Service wrapper for coordinating download lifecycle events."""
from __future__ import annotations
import logging
from typing import Any, Awaitable, Callable, Dict, Optional
logger = logging.getLogger(__name__)
class DownloadCoordinator:
"""Manage download scheduling, cancellation and introspection."""
def __init__(
self,
*,
ws_manager,
download_manager_factory: Callable[[], Awaitable],
) -> None:
self._ws_manager = ws_manager
self._download_manager_factory = download_manager_factory
async def schedule_download(self, payload: Dict[str, Any]) -> Dict[str, Any]:
"""Schedule a download using the provided payload."""
download_manager = await self._download_manager_factory()
download_id = payload.get("download_id") or self._ws_manager.generate_download_id()
payload.setdefault("download_id", download_id)
async def progress_callback(progress: Any) -> None:
await self._ws_manager.broadcast_download_progress(
download_id,
{
"status": "progress",
"progress": progress,
"download_id": download_id,
},
)
model_id = self._parse_optional_int(payload.get("model_id"), "model_id")
model_version_id = self._parse_optional_int(
payload.get("model_version_id"), "model_version_id"
)
if model_id is None and model_version_id is None:
raise ValueError(
"Missing required parameter: Please provide either 'model_id' or 'model_version_id'"
)
result = await download_manager.download_from_civitai(
model_id=model_id,
model_version_id=model_version_id,
save_dir=payload.get("model_root"),
relative_path=payload.get("relative_path", ""),
use_default_paths=payload.get("use_default_paths", False),
progress_callback=progress_callback,
download_id=download_id,
source=payload.get("source"),
)
result["download_id"] = download_id
return result
async def cancel_download(self, download_id: str) -> Dict[str, Any]:
"""Cancel an active download and emit a broadcast event."""
download_manager = await self._download_manager_factory()
result = await download_manager.cancel_download(download_id)
await self._ws_manager.broadcast_download_progress(
download_id,
{
"status": "cancelled",
"progress": 0,
"download_id": download_id,
"message": "Download cancelled by user",
},
)
return result
async def list_active_downloads(self) -> Dict[str, Any]:
"""Return the active download map from the underlying manager."""
download_manager = await self._download_manager_factory()
return await download_manager.get_active_downloads()
def _parse_optional_int(self, value: Any, field: str) -> Optional[int]:
"""Parse an optional integer from user input."""
if value is None or value == "":
return None
try:
return int(value)
except (TypeError, ValueError) as exc:
raise ValueError(f"Invalid {field}: Must be an integer") from exc

View File

@@ -3,13 +3,17 @@ import os
import asyncio
from collections import OrderedDict
import uuid
from typing import Dict
from typing import Dict, List
from urllib.parse import urlparse
from ..utils.models import LoraMetadata, CheckpointMetadata, EmbeddingMetadata
from ..utils.constants import CARD_PREVIEW_WIDTH, VALID_LORA_TYPES, CIVITAI_MODEL_TAGS
from ..utils.civitai_utils import rewrite_preview_url
from ..utils.exif_utils import ExifUtils
from ..utils.metadata_manager import MetadataManager
from .service_registry import ServiceRegistry
from .settings_manager import settings
from .settings_manager import get_settings_manager
from .metadata_service import get_default_metadata_provider
from .downloader import get_downloader
# Download to temporary file first
import tempfile
@@ -34,17 +38,10 @@ class DownloadManager:
return
self._initialized = True
self._civitai_client = None # Will be lazily initialized
# Add download management
self._active_downloads = OrderedDict() # download_id -> download_info
self._download_semaphore = asyncio.Semaphore(5) # Limit concurrent downloads
self._download_tasks = {} # download_id -> asyncio.Task
async def _get_civitai_client(self):
"""Lazily initialize CivitaiClient from registry"""
if self._civitai_client is None:
self._civitai_client = await ServiceRegistry.get_civitai_client()
return self._civitai_client
async def _get_lora_scanner(self):
"""Get the lora scanner from registry"""
@@ -57,7 +54,7 @@ class DownloadManager:
async def download_from_civitai(self, model_id: int = None, model_version_id: int = None,
save_dir: str = None, relative_path: str = '',
progress_callback=None, use_default_paths: bool = False,
download_id: str = None) -> Dict:
download_id: str = None, source: str = None) -> Dict:
"""Download model from Civitai with task tracking and concurrency control
Args:
@@ -68,6 +65,7 @@ class DownloadManager:
progress_callback: Callback function for progress updates
use_default_paths: Flag to use default paths
download_id: Unique identifier for this download task
source: Optional source parameter to specify metadata provider
Returns:
Dict with download result
@@ -91,7 +89,7 @@ class DownloadManager:
download_task = asyncio.create_task(
self._download_with_semaphore(
task_id, model_id, model_version_id, save_dir,
relative_path, progress_callback, use_default_paths
relative_path, progress_callback, use_default_paths, source
)
)
@@ -112,7 +110,8 @@ class DownloadManager:
async def _download_with_semaphore(self, task_id: str, model_id: int, model_version_id: int,
save_dir: str, relative_path: str,
progress_callback=None, use_default_paths: bool = False):
progress_callback=None, use_default_paths: bool = False,
source: str = None):
"""Execute download with semaphore to limit concurrency"""
# Update status to waiting
if task_id in self._active_downloads:
@@ -142,7 +141,7 @@ class DownloadManager:
result = await self._execute_original_download(
model_id, model_version_id, save_dir,
relative_path, tracking_callback, use_default_paths,
task_id
task_id, source
)
# Update status based on result
@@ -177,7 +176,7 @@ class DownloadManager:
async def _execute_original_download(self, model_id, model_version_id, save_dir,
relative_path, progress_callback, use_default_paths,
download_id=None):
download_id=None, source=None):
"""Wrapper for original download_from_civitai implementation"""
try:
# Check if model version already exists in library
@@ -199,11 +198,15 @@ class DownloadManager:
if await embedding_scanner.check_model_version_exists(model_version_id):
return {'success': False, 'error': 'Model version already exists in embedding library'}
# Get civitai client
civitai_client = await self._get_civitai_client()
# Get metadata provider based on source parameter
if source == 'civarchive':
from .metadata_service import get_metadata_provider
metadata_provider = await get_metadata_provider('civarchive')
else:
metadata_provider = await get_default_metadata_provider()
# Get version info based on the provided identifier
version_info = await civitai_client.get_model_version(model_id, model_version_id)
version_info = await metadata_provider.get_model_version(model_id, model_version_id)
if not version_info:
return {'success': False, 'error': 'Failed to fetch model metadata'}
@@ -240,23 +243,24 @@ class DownloadManager:
# Handle use_default_paths
if use_default_paths:
settings_manager = get_settings_manager()
# Set save_dir based on model type
if model_type == 'checkpoint':
default_path = settings.get('default_checkpoint_root')
default_path = settings_manager.get('default_checkpoint_root')
if not default_path:
return {'success': False, 'error': 'Default checkpoint root path not set in settings'}
save_dir = default_path
elif model_type == 'lora':
default_path = settings.get('default_lora_root')
default_path = settings_manager.get('default_lora_root')
if not default_path:
return {'success': False, 'error': 'Default lora root path not set in settings'}
save_dir = default_path
elif model_type == 'embedding':
default_path = settings.get('default_embedding_root')
default_path = settings_manager.get('default_embedding_root')
if not default_path:
return {'success': False, 'error': 'Default embedding root path not set in settings'}
save_dir = default_path
# Calculate relative path using template
relative_path = self._calculate_relative_path(version_info, model_type)
@@ -293,6 +297,19 @@ class DownloadManager:
file_info = next((f for f in version_info.get('files', []) if f.get('primary')), None)
if not file_info:
return {'success': False, 'error': 'No primary file found in metadata'}
mirrors = file_info.get('mirrors') or []
download_urls = []
if mirrors:
for mirror in mirrors:
if mirror.get('deletedAt') is None and mirror.get('url'):
download_urls.append(mirror['url'])
else:
download_url = file_info.get('downloadUrl')
if download_url:
download_urls.append(download_url)
if not download_urls:
return {'success': False, 'error': 'No download URL found for primary file'}
# 3. Prepare download
file_name = file_info['name']
@@ -311,7 +328,7 @@ class DownloadManager:
# 6. Start download process
result = await self._execute_download(
download_url=file_info.get('downloadUrl', ''),
download_urls=download_urls,
save_dir=save_dir,
metadata=metadata,
version_info=version_info,
@@ -346,7 +363,8 @@ class DownloadManager:
Relative path string
"""
# Get path template from settings for specific model type
path_template = settings.get_download_path_template(model_type)
settings_manager = get_settings_manager()
path_template = settings_manager.get_download_path_template(model_type)
# If template is empty, return empty path (flat structure)
if not path_template:
@@ -363,7 +381,7 @@ class DownloadManager:
author = 'Anonymous'
# Apply mapping if available
base_model_mappings = settings.get('base_model_path_mappings', {})
base_model_mappings = settings_manager.get('base_model_path_mappings', {})
mapped_base_model = base_model_mappings.get(base_model, base_model)
# Get model tags
@@ -385,17 +403,44 @@ class DownloadManager:
formatted_path = formatted_path.replace('{base_model}', mapped_base_model)
formatted_path = formatted_path.replace('{first_tag}', first_tag)
formatted_path = formatted_path.replace('{author}', author)
if model_type == 'embedding':
formatted_path = formatted_path.replace(' ', '_')
return formatted_path
async def _execute_download(self, download_url: str, save_dir: str,
metadata, version_info: Dict,
relative_path: str, progress_callback=None,
model_type: str = "lora", download_id: str = None) -> Dict:
async def _execute_download(self, download_urls: List[str], save_dir: str,
metadata, version_info: Dict,
relative_path: str, progress_callback=None,
model_type: str = "lora", download_id: str = None) -> Dict:
"""Execute the actual download process including preview images and model files"""
try:
civitai_client = await self._get_civitai_client()
save_path = metadata.file_path
# Extract original filename details
original_filename = os.path.basename(metadata.file_path)
base_name, extension = os.path.splitext(original_filename)
# Check for filename conflicts and generate unique filename if needed
# Use the hash from metadata for conflict resolution
def hash_provider():
return metadata.sha256
unique_filename = metadata.generate_unique_filename(
save_dir,
base_name,
extension,
hash_provider=hash_provider
)
# Update paths if filename changed
if unique_filename != original_filename:
logger.info(f"Filename conflict detected. Changing '{original_filename}' to '{unique_filename}'")
save_path = os.path.join(save_dir, unique_filename)
# Update metadata with new file path and name
metadata.file_path = save_path.replace(os.sep, '/')
metadata.file_name = os.path.splitext(unique_filename)[0]
else:
save_path = metadata.file_path
part_path = save_path + '.part'
metadata_path = os.path.splitext(save_path)[0] + '.metadata.json'
@@ -407,91 +452,152 @@ class DownloadManager:
# Download preview image if available
images = version_info.get('images', [])
if images:
# Report preview download progress
if progress_callback:
await progress_callback(1) # 1% progress for starting preview download
# Check if it's a video or an image
is_video = images[0].get('type') == 'video'
if (is_video):
# For videos, use .mp4 extension
preview_ext = '.mp4'
preview_path = os.path.splitext(save_path)[0] + preview_ext
# Download video directly
if await civitai_client.download_preview_image(images[0]['url'], preview_path):
metadata.preview_url = preview_path.replace(os.sep, '/')
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
else:
# For images, use WebP format for better performance
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
temp_path = temp_file.name
# Download the original image to temp path
if await civitai_client.download_preview_image(images[0]['url'], temp_path):
# Optimize and convert to WebP
preview_path = os.path.splitext(save_path)[0] + '.webp'
# Use ExifUtils to optimize and convert the image
optimized_data, _ = ExifUtils.optimize_image(
image_data=temp_path,
target_width=CARD_PREVIEW_WIDTH,
format='webp',
quality=85,
preserve_metadata=False
)
# Save the optimized image
with open(preview_path, 'wb') as f:
f.write(optimized_data)
# Update metadata
metadata.preview_url = preview_path.replace(os.sep, '/')
metadata.preview_nsfw_level = images[0].get('nsfwLevel', 0)
# Remove temporary file
try:
os.unlink(temp_path)
except Exception as e:
logger.warning(f"Failed to delete temp file: {e}")
first_image = images[0] if isinstance(images[0], dict) else None
preview_url = first_image.get('url') if first_image else None
media_type = (first_image.get('type') or '').lower() if first_image else ''
nsfw_level = first_image.get('nsfwLevel', 0) if first_image else 0
def _extension_from_url(url: str, fallback: str) -> str:
try:
parsed = urlparse(url)
except ValueError:
return fallback
ext = os.path.splitext(parsed.path)[1]
return ext or fallback
preview_downloaded = False
preview_path = None
if preview_url:
downloader = await get_downloader()
if media_type == 'video':
preview_ext = _extension_from_url(preview_url, '.mp4')
preview_path = os.path.splitext(save_path)[0] + preview_ext
rewritten_url, rewritten = rewrite_preview_url(preview_url, media_type='video')
attempt_urls: List[str] = []
if rewritten:
attempt_urls.append(rewritten_url)
attempt_urls.append(preview_url)
seen_attempts = set()
for attempt in attempt_urls:
if not attempt or attempt in seen_attempts:
continue
seen_attempts.add(attempt)
success, _ = await downloader.download_file(
attempt,
preview_path,
use_auth=False
)
if success:
preview_downloaded = True
break
else:
rewritten_url, rewritten = rewrite_preview_url(preview_url, media_type='image')
if rewritten:
preview_ext = _extension_from_url(preview_url, '.png')
preview_path = os.path.splitext(save_path)[0] + preview_ext
success, _ = await downloader.download_file(
rewritten_url,
preview_path,
use_auth=False
)
if success:
preview_downloaded = True
if not preview_downloaded:
temp_path: str | None = None
try:
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
temp_path = temp_file.name
success, content, _ = await downloader.download_to_memory(
preview_url,
use_auth=False
)
if success:
with open(temp_path, 'wb') as temp_file_handle:
temp_file_handle.write(content)
preview_path = os.path.splitext(save_path)[0] + '.webp'
optimized_data, _ = ExifUtils.optimize_image(
image_data=temp_path,
target_width=CARD_PREVIEW_WIDTH,
format='webp',
quality=85,
preserve_metadata=False
)
with open(preview_path, 'wb') as preview_file:
preview_file.write(optimized_data)
preview_downloaded = True
finally:
if temp_path and os.path.exists(temp_path):
try:
os.unlink(temp_path)
except Exception as e:
logger.warning(f"Failed to delete temp file: {e}")
if preview_downloaded and preview_path:
metadata.preview_url = preview_path.replace(os.sep, '/')
metadata.preview_nsfw_level = nsfw_level
if download_id and download_id in self._active_downloads:
self._active_downloads[download_id]['preview_path'] = preview_path
# Report preview download completion
if progress_callback:
await progress_callback(3) # 3% progress after preview download
# Download model file with progress tracking
success, result = await civitai_client._download_file(
download_url,
save_dir,
os.path.basename(save_path),
progress_callback=lambda p: self._handle_download_progress(p, progress_callback)
)
# Download model file with progress tracking using downloader
downloader = await get_downloader()
last_error = None
for download_url in download_urls:
use_auth = download_url.startswith("https://civitai.com/api/download/")
success, result = await downloader.download_file(
download_url,
save_path, # Use full path instead of separate dir and filename
progress_callback=lambda p: self._handle_download_progress(p, progress_callback),
use_auth=use_auth # Only use authentication for Civitai downloads
)
if not success:
if success:
break
last_error = result
if os.path.exists(save_path):
try:
os.remove(save_path)
except Exception as e:
logger.warning(f"Failed to remove incomplete file {save_path}: {e}")
else:
# Clean up files on failure, but preserve .part file for resume
cleanup_files = [metadata_path]
if metadata.preview_url and os.path.exists(metadata.preview_url):
cleanup_files.append(metadata.preview_url)
preview_path_value = getattr(metadata, 'preview_url', None)
if preview_path_value and os.path.exists(preview_path_value):
cleanup_files.append(preview_path_value)
for path in cleanup_files:
if path and os.path.exists(path):
try:
os.remove(path)
except Exception as e:
logger.warning(f"Failed to cleanup file {path}: {e}")
# Log but don't remove .part file to allow resume
if os.path.exists(part_path):
logger.info(f"Preserving partial download for resume: {part_path}")
return {'success': False, 'error': result}
return {'success': False, 'error': last_error or 'Failed to download file'}
# 4. Update file information (size and modified time)
metadata.update_file_info(save_path)
# 5. Final metadata update
await MetadataManager.save_metadata(save_path, metadata, True)
await MetadataManager.save_metadata(save_path, metadata)
# 6. Update cache based on model type
if model_type == "checkpoint":
@@ -606,7 +712,15 @@ class DownloadManager:
except Exception as e:
logger.error(f"Error deleting metadata file: {e}")
# Delete preview file if exists (.webp or .mp4)
preview_path_value = download_info.get('preview_path')
if preview_path_value and os.path.exists(preview_path_value):
try:
os.unlink(preview_path_value)
logger.debug(f"Deleted preview file: {preview_path_value}")
except Exception as e:
logger.error(f"Error deleting preview file: {e}")
# Delete preview file if exists (.webp or .mp4) for legacy paths
for preview_ext in ['.webp', '.mp4']:
preview_path = os.path.splitext(file_path)[0] + preview_ext
if os.path.exists(preview_path):
@@ -639,4 +753,4 @@ class DownloadManager:
}
for task_id, info in self._active_downloads.items()
]
}
}

541
py/services/downloader.py Normal file
View File

@@ -0,0 +1,541 @@
"""
Unified download manager for all HTTP/HTTPS downloads in the application.
This module provides a centralized download service with:
- Singleton pattern for global session management
- Support for authenticated downloads (e.g., CivitAI API key)
- Resumable downloads with automatic retry
- Progress tracking and callbacks
- Optimized connection pooling and timeouts
- Unified error handling and logging
"""
import os
import logging
import asyncio
import aiohttp
from datetime import datetime
from typing import Optional, Dict, Tuple, Callable, Union
from ..services.settings_manager import get_settings_manager
logger = logging.getLogger(__name__)
class Downloader:
"""Unified downloader for all HTTP/HTTPS downloads in the application."""
_instance = None
_lock = asyncio.Lock()
@classmethod
async def get_instance(cls):
"""Get singleton instance of Downloader"""
async with cls._lock:
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
"""Initialize the downloader with optimal settings"""
# Check if already initialized for singleton pattern
if hasattr(self, '_initialized'):
return
self._initialized = True
# Session management
self._session = None
self._session_created_at = None
self._proxy_url = None # Store proxy URL for current session
# Configuration
self.chunk_size = 4 * 1024 * 1024 # 4MB chunks for better throughput
self.max_retries = 5
self.base_delay = 2.0 # Base delay for exponential backoff
self.session_timeout = 300 # 5 minutes
# Default headers
self.default_headers = {
'User-Agent': 'ComfyUI-LoRA-Manager/1.0'
}
@property
async def session(self) -> aiohttp.ClientSession:
"""Get or create the global aiohttp session with optimized settings"""
if self._session is None or self._should_refresh_session():
await self._create_session()
return self._session
@property
def proxy_url(self) -> Optional[str]:
"""Get the current proxy URL (initialize if needed)"""
if not hasattr(self, '_proxy_url'):
self._proxy_url = None
return self._proxy_url
def _should_refresh_session(self) -> bool:
"""Check if session should be refreshed"""
if self._session is None:
return True
if not hasattr(self, '_session_created_at') or self._session_created_at is None:
return True
# Refresh if session is older than timeout
if (datetime.now() - self._session_created_at).total_seconds() > self.session_timeout:
return True
return False
async def _create_session(self):
"""Create a new aiohttp session with optimized settings"""
# Close existing session if any
if self._session is not None:
await self._session.close()
# Check for app-level proxy settings
proxy_url = None
settings_manager = get_settings_manager()
if settings_manager.get('proxy_enabled', False):
proxy_host = settings_manager.get('proxy_host', '').strip()
proxy_port = settings_manager.get('proxy_port', '').strip()
proxy_type = settings_manager.get('proxy_type', 'http').lower()
proxy_username = settings_manager.get('proxy_username', '').strip()
proxy_password = settings_manager.get('proxy_password', '').strip()
if proxy_host and proxy_port:
# Build proxy URL
if proxy_username and proxy_password:
proxy_url = f"{proxy_type}://{proxy_username}:{proxy_password}@{proxy_host}:{proxy_port}"
else:
proxy_url = f"{proxy_type}://{proxy_host}:{proxy_port}"
logger.debug(f"Using app-level proxy: {proxy_type}://{proxy_host}:{proxy_port}")
logger.debug("Proxy mode: app-level proxy is active.")
else:
logger.debug("Proxy mode: system-level proxy (trust_env) will be used if configured in environment.")
# Optimize TCP connection parameters
connector = aiohttp.TCPConnector(
ssl=True,
limit=8, # Concurrent connections
ttl_dns_cache=300, # DNS cache timeout
force_close=False, # Keep connections for reuse
enable_cleanup_closed=True
)
# Configure timeout parameters
timeout = aiohttp.ClientTimeout(
total=None, # No total timeout for large downloads
connect=60, # Connection timeout
sock_read=300 # 5 minute socket read timeout
)
self._session = aiohttp.ClientSession(
connector=connector,
trust_env=proxy_url is None, # Only use system proxy if no app-level proxy is set
timeout=timeout
)
# Store proxy URL for use in requests
self._proxy_url = proxy_url
self._session_created_at = datetime.now()
logger.debug("Created new HTTP session with proxy settings. App-level proxy: %s, System-level proxy (trust_env): %s", bool(proxy_url), proxy_url is None)
def _get_auth_headers(self, use_auth: bool = False) -> Dict[str, str]:
"""Get headers with optional authentication"""
headers = self.default_headers.copy()
if use_auth:
# Add CivitAI API key if available
settings_manager = get_settings_manager()
api_key = settings_manager.get('civitai_api_key')
if api_key:
headers['Authorization'] = f'Bearer {api_key}'
headers['Content-Type'] = 'application/json'
return headers
async def download_file(
self,
url: str,
save_path: str,
progress_callback: Optional[Callable[[float], None]] = None,
use_auth: bool = False,
custom_headers: Optional[Dict[str, str]] = None,
allow_resume: bool = True
) -> Tuple[bool, str]:
"""
Download a file with resumable downloads and retry mechanism
Args:
url: Download URL
save_path: Full path where the file should be saved
progress_callback: Optional callback for progress updates (0-100)
use_auth: Whether to include authentication headers (e.g., CivitAI API key)
custom_headers: Additional headers to include in request
allow_resume: Whether to support resumable downloads
Returns:
Tuple[bool, str]: (success, save_path or error message)
"""
retry_count = 0
part_path = save_path + '.part' if allow_resume else save_path
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
# Get existing file size for resume
resume_offset = 0
if allow_resume and os.path.exists(part_path):
resume_offset = os.path.getsize(part_path)
logger.info(f"Resuming download from offset {resume_offset} bytes")
total_size = 0
while retry_count <= self.max_retries:
try:
session = await self.session
# Debug log for proxy mode at request time
if self.proxy_url:
logger.debug(f"[download_file] Using app-level proxy: {self.proxy_url}")
else:
logger.debug("[download_file] Using system-level proxy (trust_env) if configured.")
# Add Range header for resume if we have partial data
request_headers = headers.copy()
if allow_resume and resume_offset > 0:
request_headers['Range'] = f'bytes={resume_offset}-'
# Disable compression for better chunked downloads
request_headers['Accept-Encoding'] = 'identity'
logger.debug(f"Download attempt {retry_count + 1}/{self.max_retries + 1} from: {url}")
if resume_offset > 0:
logger.debug(f"Requesting range from byte {resume_offset}")
async with session.get(url, headers=request_headers, allow_redirects=True, proxy=self.proxy_url) as response:
# Handle different response codes
if response.status == 200:
# Full content response
if resume_offset > 0:
# Server doesn't support ranges, restart from beginning
logger.warning("Server doesn't support range requests, restarting download")
resume_offset = 0
if os.path.exists(part_path):
os.remove(part_path)
elif response.status == 206:
# Partial content response (resume successful)
content_range = response.headers.get('Content-Range')
if content_range:
# Parse total size from Content-Range header (e.g., "bytes 1024-2047/2048")
range_parts = content_range.split('/')
if len(range_parts) == 2:
total_size = int(range_parts[1])
logger.info(f"Successfully resumed download from byte {resume_offset}")
elif response.status == 416:
# Range not satisfiable - file might be complete or corrupted
if allow_resume and os.path.exists(part_path):
part_size = os.path.getsize(part_path)
logger.warning(f"Range not satisfiable. Part file size: {part_size}")
# Try to get actual file size
head_response = await session.head(url, headers=headers, proxy=self.proxy_url)
if head_response.status == 200:
actual_size = int(head_response.headers.get('content-length', 0))
if part_size == actual_size:
# File is complete, just rename it
if allow_resume:
os.rename(part_path, save_path)
if progress_callback:
await progress_callback(100)
return True, save_path
# Remove corrupted part file and restart
os.remove(part_path)
resume_offset = 0
continue
elif response.status == 401:
logger.warning(f"Unauthorized access to resource: {url} (Status 401)")
return False, "Invalid or missing API key, or early access restriction."
elif response.status == 403:
logger.warning(f"Forbidden access to resource: {url} (Status 403)")
return False, "Access forbidden: You don't have permission to download this file."
elif response.status == 404:
logger.warning(f"Resource not found: {url} (Status 404)")
return False, "File not found - the download link may be invalid or expired."
else:
logger.error(f"Download failed for {url} with status {response.status}")
return False, f"Download failed with status {response.status}"
# Get total file size for progress calculation (if not set from Content-Range)
if total_size == 0:
total_size = int(response.headers.get('content-length', 0))
if response.status == 206:
# For partial content, add the offset to get total file size
total_size += resume_offset
current_size = resume_offset
last_progress_report_time = datetime.now()
# Ensure directory exists
os.makedirs(os.path.dirname(save_path), exist_ok=True)
# Stream download to file with progress updates
loop = asyncio.get_running_loop()
mode = 'ab' if (allow_resume and resume_offset > 0) else 'wb'
with open(part_path, mode) as f:
async for chunk in response.content.iter_chunked(self.chunk_size):
if chunk:
# Run blocking file write in executor
await loop.run_in_executor(None, f.write, chunk)
current_size += len(chunk)
# Limit progress update frequency to reduce overhead
now = datetime.now()
time_diff = (now - last_progress_report_time).total_seconds()
if progress_callback and total_size and time_diff >= 1.0:
progress = (current_size / total_size) * 100
await progress_callback(progress)
last_progress_report_time = now
# Download completed successfully
# Verify file size if total_size was provided
final_size = os.path.getsize(part_path)
if total_size > 0 and final_size != total_size:
logger.warning(f"File size mismatch. Expected: {total_size}, Got: {final_size}")
# Don't treat this as fatal error, continue anyway
# Atomically rename .part to final file (only if using resume)
if allow_resume and part_path != save_path:
max_rename_attempts = 5
rename_attempt = 0
rename_success = False
while rename_attempt < max_rename_attempts and not rename_success:
try:
# If the destination file exists, remove it first (Windows safe)
if os.path.exists(save_path):
os.remove(save_path)
os.rename(part_path, save_path)
rename_success = True
except PermissionError as e:
rename_attempt += 1
if rename_attempt < max_rename_attempts:
logger.info(f"File still in use, retrying rename in 2 seconds (attempt {rename_attempt}/{max_rename_attempts})")
await asyncio.sleep(2)
else:
logger.error(f"Failed to rename file after {max_rename_attempts} attempts: {e}")
return False, f"Failed to finalize download: {str(e)}"
# Ensure 100% progress is reported
if progress_callback:
await progress_callback(100)
return True, save_path
except (aiohttp.ClientError, aiohttp.ClientPayloadError,
aiohttp.ServerDisconnectedError, asyncio.TimeoutError) as e:
retry_count += 1
logger.warning(f"Network error during download (attempt {retry_count}/{self.max_retries + 1}): {e}")
if retry_count <= self.max_retries:
# Calculate delay with exponential backoff
delay = self.base_delay * (2 ** (retry_count - 1))
logger.info(f"Retrying in {delay} seconds...")
await asyncio.sleep(delay)
# Update resume offset for next attempt
if allow_resume and os.path.exists(part_path):
resume_offset = os.path.getsize(part_path)
logger.info(f"Will resume from byte {resume_offset}")
# Refresh session to get new connection
await self._create_session()
continue
else:
logger.error(f"Max retries exceeded for download: {e}")
return False, f"Network error after {self.max_retries + 1} attempts: {str(e)}"
except Exception as e:
logger.error(f"Unexpected download error: {e}")
return False, str(e)
return False, f"Download failed after {self.max_retries + 1} attempts"
async def download_to_memory(
self,
url: str,
use_auth: bool = False,
custom_headers: Optional[Dict[str, str]] = None,
return_headers: bool = False
) -> Tuple[bool, Union[bytes, str], Optional[Dict]]:
"""
Download a file to memory (for small files like preview images)
Args:
url: Download URL
use_auth: Whether to include authentication headers
custom_headers: Additional headers to include in request
return_headers: Whether to return response headers along with content
Returns:
Tuple[bool, Union[bytes, str], Optional[Dict]]: (success, content or error message, response headers if requested)
"""
try:
session = await self.session
# Debug log for proxy mode at request time
if self.proxy_url:
logger.debug(f"[download_to_memory] Using app-level proxy: {self.proxy_url}")
else:
logger.debug("[download_to_memory] Using system-level proxy (trust_env) if configured.")
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
async with session.get(url, headers=headers, proxy=self.proxy_url) as response:
if response.status == 200:
content = await response.read()
if return_headers:
return True, content, dict(response.headers)
else:
return True, content, None
elif response.status == 401:
error_msg = "Unauthorized access - invalid or missing API key"
return False, error_msg, None
elif response.status == 403:
error_msg = "Access forbidden"
return False, error_msg, None
elif response.status == 404:
error_msg = "File not found"
return False, error_msg, None
else:
error_msg = f"Download failed with status {response.status}"
return False, error_msg, None
except Exception as e:
logger.error(f"Error downloading to memory from {url}: {e}")
return False, str(e), None
async def get_response_headers(
self,
url: str,
use_auth: bool = False,
custom_headers: Optional[Dict[str, str]] = None
) -> Tuple[bool, Union[Dict, str]]:
"""
Get response headers without downloading the full content
Args:
url: URL to check
use_auth: Whether to include authentication headers
custom_headers: Additional headers to include in request
Returns:
Tuple[bool, Union[Dict, str]]: (success, headers dict or error message)
"""
try:
session = await self.session
# Debug log for proxy mode at request time
if self.proxy_url:
logger.debug(f"[get_response_headers] Using app-level proxy: {self.proxy_url}")
else:
logger.debug("[get_response_headers] Using system-level proxy (trust_env) if configured.")
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
async with session.head(url, headers=headers, proxy=self.proxy_url) as response:
if response.status == 200:
return True, dict(response.headers)
else:
return False, f"Head request failed with status {response.status}"
except Exception as e:
logger.error(f"Error getting headers from {url}: {e}")
return False, str(e)
async def make_request(
self,
method: str,
url: str,
use_auth: bool = False,
custom_headers: Optional[Dict[str, str]] = None,
**kwargs
) -> Tuple[bool, Union[Dict, str]]:
"""
Make a generic HTTP request and return JSON response
Args:
method: HTTP method (GET, POST, etc.)
url: Request URL
use_auth: Whether to include authentication headers
custom_headers: Additional headers to include in request
**kwargs: Additional arguments for aiohttp request
Returns:
Tuple[bool, Union[Dict, str]]: (success, response data or error message)
"""
try:
session = await self.session
# Debug log for proxy mode at request time
if self.proxy_url:
logger.debug(f"[make_request] Using app-level proxy: {self.proxy_url}")
else:
logger.debug("[make_request] Using system-level proxy (trust_env) if configured.")
# Prepare headers
headers = self._get_auth_headers(use_auth)
if custom_headers:
headers.update(custom_headers)
# Add proxy to kwargs if not already present
if 'proxy' not in kwargs:
kwargs['proxy'] = self.proxy_url
async with session.request(method, url, headers=headers, **kwargs) as response:
if response.status == 200:
# Try to parse as JSON, fall back to text
try:
data = await response.json()
return True, data
except:
text = await response.text()
return True, text
elif response.status == 401:
return False, "Unauthorized access - invalid or missing API key"
elif response.status == 403:
return False, "Access forbidden"
elif response.status == 404:
return False, "Resource not found"
else:
return False, f"Request failed with status {response.status}"
except Exception as e:
logger.error(f"Error making {method} request to {url}: {e}")
return False, str(e)
async def close(self):
"""Close the HTTP session"""
if self._session is not None:
await self._session.close()
self._session = None
self._session_created_at = None
self._proxy_url = None
logger.debug("Closed HTTP session")
async def refresh_session(self):
"""Force refresh the HTTP session (useful when proxy settings change)"""
await self._create_session()
logger.info("HTTP session refreshed due to settings change")
# Global instance accessor
async def get_downloader() -> Downloader:
"""Get the global downloader instance"""
return await Downloader.get_instance()

View File

@@ -1,11 +1,10 @@
import os
import logging
from typing import Dict, List, Optional
from typing import Dict
from .base_model_service import BaseModelService
from ..utils.models import EmbeddingMetadata
from ..config import config
from ..utils.routes_common import ModelRouteUtils
logger = logging.getLogger(__name__)
@@ -34,12 +33,11 @@ class EmbeddingService(BaseModelService):
"file_size": embedding_data.get("size", 0),
"modified": embedding_data.get("modified", ""),
"tags": embedding_data.get("tags", []),
"modelDescription": embedding_data.get("modelDescription", ""),
"from_civitai": embedding_data.get("from_civitai", True),
"notes": embedding_data.get("notes", ""),
"model_type": embedding_data.get("model_type", "embedding"),
"favorite": embedding_data.get("favorite", False),
"civitai": ModelRouteUtils.filter_civitai_data(embedding_data.get("civitai", {}))
"civitai": self.filter_civitai_data(embedding_data.get("civitai", {}), minimal=True)
}
def find_duplicate_hashes(self) -> Dict:

View File

@@ -0,0 +1,297 @@
"""Service for cleaning up example image folders."""
from __future__ import annotations
import asyncio
import logging
import os
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Tuple
from .service_registry import ServiceRegistry
from .settings_manager import get_settings_manager
from ..utils.example_images_paths import iter_library_roots
logger = logging.getLogger(__name__)
@dataclass(slots=True)
class CleanupResult:
"""Structured result returned from cleanup operations."""
success: bool
checked_folders: int
moved_empty_folders: int
moved_orphaned_folders: int
skipped_non_hash: int
move_failures: int
errors: List[str]
deleted_root: str | None
partial_success: bool
def to_dict(self) -> Dict[str, object]:
"""Convert the dataclass to a serialisable dictionary."""
data = {
"success": self.success,
"checked_folders": self.checked_folders,
"moved_empty_folders": self.moved_empty_folders,
"moved_orphaned_folders": self.moved_orphaned_folders,
"moved_total": self.moved_empty_folders + self.moved_orphaned_folders,
"skipped_non_hash": self.skipped_non_hash,
"move_failures": self.move_failures,
"errors": self.errors,
"deleted_root": self.deleted_root,
"partial_success": self.partial_success,
}
return data
class ExampleImagesCleanupService:
"""Encapsulates logic for cleaning example image folders."""
DELETED_FOLDER_NAME = "_deleted"
def __init__(self, deleted_folder_name: str | None = None) -> None:
self._deleted_folder_name = deleted_folder_name or self.DELETED_FOLDER_NAME
async def cleanup_example_image_folders(self) -> Dict[str, object]:
"""Clean empty or orphaned example image folders by moving them under a deleted bucket."""
settings_manager = get_settings_manager()
example_images_path = settings_manager.get("example_images_path")
if not example_images_path:
logger.debug("Cleanup skipped: example images path not configured")
return {
"success": False,
"error": "Example images path is not configured.",
"error_code": "path_not_configured",
}
base_root = Path(example_images_path)
if not base_root.exists():
logger.debug("Cleanup skipped: example images path missing -> %s", base_root)
return {
"success": False,
"error": "Example images path does not exist.",
"error_code": "path_not_found",
}
try:
lora_scanner = await ServiceRegistry.get_lora_scanner()
checkpoint_scanner = await ServiceRegistry.get_checkpoint_scanner()
embedding_scanner = await ServiceRegistry.get_embedding_scanner()
except Exception as exc: # pragma: no cover - defensive guard
logger.error("Failed to acquire scanners for cleanup: %s", exc, exc_info=True)
return {
"success": False,
"error": f"Failed to load model scanners: {exc}",
"error_code": "scanner_initialization_failed",
}
checked_folders = 0
moved_empty = 0
moved_orphaned = 0
skipped_non_hash = 0
move_failures = 0
errors: List[str] = []
resolved_base = base_root.resolve()
library_paths: List[Tuple[str, Path]] = []
processed_paths = {resolved_base}
for library_name, library_path in iter_library_roots():
if not library_path:
continue
library_root = Path(library_path)
try:
resolved = library_root.resolve()
except FileNotFoundError:
continue
if resolved in processed_paths:
continue
if not library_root.exists():
logger.debug(
"Skipping cleanup for library '%s': folder missing (%s)",
library_name,
library_root,
)
continue
processed_paths.add(resolved)
library_paths.append((library_name, library_root))
deleted_roots: List[Path] = []
# Build list of (label, root) pairs including the base root for legacy layouts
cleanup_targets: List[Tuple[str, Path]] = [("__base__", base_root)] + library_paths
library_root_set = {root.resolve() for _, root in library_paths}
for label, root_path in cleanup_targets:
deleted_bucket = root_path / self._deleted_folder_name
deleted_bucket.mkdir(exist_ok=True)
deleted_roots.append(deleted_bucket)
for entry in os.scandir(root_path):
if not entry.is_dir(follow_symlinks=False):
continue
if entry.name == self._deleted_folder_name:
continue
entry_path = Path(entry.path)
if label == "__base__":
try:
resolved_entry = entry_path.resolve()
except FileNotFoundError:
continue
if resolved_entry in library_root_set:
# Skip library-specific folders tracked separately
continue
checked_folders += 1
try:
if self._is_folder_empty(entry_path):
if await self._remove_empty_folder(entry_path):
moved_empty += 1
else:
move_failures += 1
continue
if not self._is_hash_folder(entry.name):
skipped_non_hash += 1
continue
hash_exists = (
lora_scanner.has_hash(entry.name)
or checkpoint_scanner.has_hash(entry.name)
or embedding_scanner.has_hash(entry.name)
)
if not hash_exists:
if await self._move_folder(entry_path, deleted_bucket):
moved_orphaned += 1
else:
move_failures += 1
except Exception as exc: # pragma: no cover - filesystem guard
move_failures += 1
error_message = f"{entry.name}: {exc}"
errors.append(error_message)
logger.error(
"Error processing example images folder %s: %s",
entry_path,
exc,
exc_info=True,
)
partial_success = move_failures > 0 and (moved_empty > 0 or moved_orphaned > 0)
success = move_failures == 0 and not errors
result = CleanupResult(
success=success,
checked_folders=checked_folders,
moved_empty_folders=moved_empty,
moved_orphaned_folders=moved_orphaned,
skipped_non_hash=skipped_non_hash,
move_failures=move_failures,
errors=errors,
deleted_root=str(deleted_roots[0]) if deleted_roots else None,
partial_success=partial_success,
)
summary = result.to_dict()
summary["deleted_roots"] = [str(path) for path in deleted_roots]
if success:
logger.info(
"Example images cleanup complete: checked=%s, moved_empty=%s, moved_orphaned=%s",
checked_folders,
moved_empty,
moved_orphaned,
)
elif partial_success:
logger.warning(
"Example images cleanup partially complete: moved=%s, failures=%s",
summary["moved_total"],
move_failures,
)
else:
logger.error(
"Example images cleanup failed: move_failures=%s, errors=%s",
move_failures,
errors,
)
return summary
@staticmethod
def _is_folder_empty(folder_path: Path) -> bool:
try:
with os.scandir(folder_path) as iterator:
return not any(iterator)
except FileNotFoundError:
return True
except OSError as exc: # pragma: no cover - defensive guard
logger.debug("Failed to inspect folder %s: %s", folder_path, exc)
return False
@staticmethod
def _is_hash_folder(name: str) -> bool:
if len(name) != 64:
return False
hex_chars = set("0123456789abcdefABCDEF")
return all(char in hex_chars for char in name)
async def _remove_empty_folder(self, folder_path: Path) -> bool:
loop = asyncio.get_running_loop()
try:
await loop.run_in_executor(
None,
shutil.rmtree,
str(folder_path),
)
logger.debug("Removed empty example images folder %s", folder_path)
return True
except Exception as exc: # pragma: no cover - filesystem guard
logger.error("Failed to remove empty example images folder %s: %s", folder_path, exc, exc_info=True)
return False
async def _move_folder(self, folder_path: Path, deleted_bucket: Path) -> bool:
destination = self._build_destination(folder_path.name, deleted_bucket)
loop = asyncio.get_running_loop()
try:
await loop.run_in_executor(
None,
shutil.move,
str(folder_path),
str(destination),
)
logger.debug("Moved example images folder %s -> %s", folder_path, destination)
return True
except Exception as exc: # pragma: no cover - filesystem guard
logger.error(
"Failed to move example images folder %s to %s: %s",
folder_path,
destination,
exc,
exc_info=True,
)
return False
def _build_destination(self, folder_name: str, deleted_bucket: Path) -> Path:
destination = deleted_bucket / folder_name
suffix = 1
while destination.exists():
destination = deleted_bucket / f"{folder_name}_{suffix}"
suffix += 1
return destination

View File

@@ -5,7 +5,6 @@ from typing import Dict, List, Optional
from .base_model_service import BaseModelService
from ..utils.models import LoraMetadata
from ..config import config
from ..utils.routes_common import ModelRouteUtils
logger = logging.getLogger(__name__)
@@ -34,12 +33,11 @@ class LoraService(BaseModelService):
"file_size": lora_data.get("size", 0),
"modified": lora_data.get("modified", ""),
"tags": lora_data.get("tags", []),
"modelDescription": lora_data.get("modelDescription", ""),
"from_civitai": lora_data.get("from_civitai", True),
"usage_tips": lora_data.get("usage_tips", ""),
"notes": lora_data.get("notes", ""),
"favorite": lora_data.get("favorite", False),
"civitai": ModelRouteUtils.filter_civitai_data(lora_data.get("civitai", {}))
"civitai": self.filter_civitai_data(lora_data.get("civitai", {}), minimal=True)
}
async def _apply_specific_filters(self, data: List[Dict], **kwargs) -> List[Dict]:

View File

@@ -0,0 +1,151 @@
import zipfile
import logging
import asyncio
from pathlib import Path
from typing import Optional
from .downloader import get_downloader
logger = logging.getLogger(__name__)
class MetadataArchiveManager:
"""Manages downloading and extracting Civitai metadata archive database"""
DOWNLOAD_URLS = [
"https://github.com/willmiao/civitai-metadata-archive-db/releases/download/db-2025-08-08/civitai.zip",
"https://huggingface.co/datasets/willmiao/civitai-metadata-archive-db/blob/main/civitai.zip"
]
def __init__(self, base_path: str):
"""Initialize with base path where files will be stored"""
self.base_path = Path(base_path)
self.civitai_folder = self.base_path / "civitai"
self.archive_path = self.base_path / "civitai.zip"
self.db_path = self.civitai_folder / "civitai.sqlite"
def is_database_available(self) -> bool:
"""Check if the SQLite database is available and valid"""
return self.db_path.exists() and self.db_path.stat().st_size > 0
def get_database_path(self) -> Optional[str]:
"""Get the path to the SQLite database if available"""
if self.is_database_available():
return str(self.db_path)
return None
async def download_and_extract_database(self, progress_callback=None) -> bool:
"""Download and extract the metadata archive database
Args:
progress_callback: Optional callback function to report progress
Returns:
bool: True if successful, False otherwise
"""
try:
# Create directories if they don't exist
self.base_path.mkdir(parents=True, exist_ok=True)
self.civitai_folder.mkdir(parents=True, exist_ok=True)
# Download the archive
if not await self._download_archive(progress_callback):
return False
# Extract the archive
if not await self._extract_archive(progress_callback):
return False
# Clean up the archive file
if self.archive_path.exists():
self.archive_path.unlink()
logger.info(f"Successfully downloaded and extracted metadata database to {self.db_path}")
return True
except Exception as e:
logger.error(f"Error downloading and extracting metadata database: {e}", exc_info=True)
return False
async def _download_archive(self, progress_callback=None) -> bool:
"""Download the zip archive from one of the available URLs"""
downloader = await get_downloader()
for url in self.DOWNLOAD_URLS:
try:
logger.info(f"Attempting to download from {url}")
if progress_callback:
progress_callback("download", f"Downloading from {url}")
# Custom progress callback to report download progress
async def download_progress(progress):
if progress_callback:
progress_callback("download", f"Downloading archive... {progress:.1f}%")
success, result = await downloader.download_file(
url=url,
save_path=str(self.archive_path),
progress_callback=download_progress,
use_auth=False, # Public download, no auth needed
allow_resume=True
)
if success:
logger.info(f"Successfully downloaded archive from {url}")
return True
else:
logger.warning(f"Failed to download from {url}: {result}")
continue
except Exception as e:
logger.warning(f"Error downloading from {url}: {e}")
continue
logger.error("Failed to download archive from any URL")
return False
async def _extract_archive(self, progress_callback=None) -> bool:
"""Extract the zip archive to the civitai folder"""
try:
if progress_callback:
progress_callback("extract", "Extracting archive...")
# Run extraction in thread pool to avoid blocking
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._extract_zip_sync)
if progress_callback:
progress_callback("extract", "Extraction completed")
return True
except Exception as e:
logger.error(f"Error extracting archive: {e}", exc_info=True)
return False
def _extract_zip_sync(self):
"""Synchronous zip extraction (runs in thread pool)"""
with zipfile.ZipFile(self.archive_path, 'r') as archive:
archive.extractall(path=self.base_path)
async def remove_database(self) -> bool:
"""Remove the metadata database and folder"""
try:
if self.civitai_folder.exists():
# Remove all files in the civitai folder
for file_path in self.civitai_folder.iterdir():
if file_path.is_file():
file_path.unlink()
# Remove the folder itself
self.civitai_folder.rmdir()
# Also remove the archive file if it exists
if self.archive_path.exists():
self.archive_path.unlink()
logger.info("Successfully removed metadata database")
return True
except Exception as e:
logger.error(f"Error removing metadata database: {e}", exc_info=True)
return False

View File

@@ -0,0 +1,119 @@
import os
import logging
from .model_metadata_provider import (
ModelMetadataProviderManager,
SQLiteModelMetadataProvider,
CivitaiModelMetadataProvider,
FallbackMetadataProvider
)
from .settings_manager import get_settings_manager
from .metadata_archive_manager import MetadataArchiveManager
from .service_registry import ServiceRegistry
logger = logging.getLogger(__name__)
async def initialize_metadata_providers():
"""Initialize and configure all metadata providers based on settings"""
provider_manager = await ModelMetadataProviderManager.get_instance()
# Clear existing providers to allow reinitialization
provider_manager.providers.clear()
provider_manager.default_provider = None
# Get settings
settings_manager = get_settings_manager()
enable_archive_db = settings_manager.get('enable_metadata_archive_db', False)
providers = []
# Initialize archive database provider if enabled
if enable_archive_db:
try:
# Initialize archive manager
base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
archive_manager = MetadataArchiveManager(base_path)
db_path = archive_manager.get_database_path()
if db_path and os.path.exists(db_path):
sqlite_provider = SQLiteModelMetadataProvider(db_path)
provider_manager.register_provider('sqlite', sqlite_provider)
providers.append(('sqlite', sqlite_provider))
logger.debug(f"SQLite metadata provider registered with database: {db_path}")
else:
logger.warning("Metadata archive database is enabled but database file not found")
except Exception as e:
logger.error(f"Failed to initialize SQLite metadata provider: {e}")
# Initialize Civitai API provider (always available as fallback)
try:
civitai_client = await ServiceRegistry.get_civitai_client()
civitai_provider = CivitaiModelMetadataProvider(civitai_client)
provider_manager.register_provider('civitai_api', civitai_provider)
providers.append(('civitai_api', civitai_provider))
logger.debug("Civitai API metadata provider registered")
except Exception as e:
logger.error(f"Failed to initialize Civitai API metadata provider: {e}")
# Register CivArchive provider, but do NOT add to fallback providers
try:
from .model_metadata_provider import CivArchiveModelMetadataProvider
civarchive_provider = CivArchiveModelMetadataProvider()
provider_manager.register_provider('civarchive', civarchive_provider)
logger.debug("CivArchive metadata provider registered (not included in fallback)")
except Exception as e:
logger.error(f"Failed to initialize CivArchive metadata provider: {e}")
# Set up fallback provider based on available providers
if len(providers) > 1:
# Always use Civitai API first, then Archive DB
ordered_providers = []
ordered_providers.extend([p[1] for p in providers if p[0] == 'civitai_api'])
ordered_providers.extend([p[1] for p in providers if p[0] == 'sqlite'])
if ordered_providers:
fallback_provider = FallbackMetadataProvider(ordered_providers)
provider_manager.register_provider('fallback', fallback_provider, is_default=True)
logger.debug(f"Fallback metadata provider registered with {len(ordered_providers)} providers, Civitai API first")
elif len(providers) == 1:
# Only one provider available, set it as default
provider_name, provider = providers[0]
provider_manager.register_provider(provider_name, provider, is_default=True)
logger.debug(f"Single metadata provider registered as default: {provider_name}")
else:
logger.warning("No metadata providers available - this may cause metadata lookup failures")
return provider_manager
async def update_metadata_providers():
"""Update metadata providers based on current settings"""
try:
# Get current settings
settings_manager = get_settings_manager()
enable_archive_db = settings_manager.get('enable_metadata_archive_db', False)
# Reinitialize all providers with new settings
provider_manager = await initialize_metadata_providers()
logger.info(f"Updated metadata providers, archive_db enabled: {enable_archive_db}")
return provider_manager
except Exception as e:
logger.error(f"Failed to update metadata providers: {e}")
return await ModelMetadataProviderManager.get_instance()
async def get_metadata_archive_manager():
"""Get metadata archive manager instance"""
base_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
return MetadataArchiveManager(base_path)
async def get_metadata_provider(provider_name: str = None):
"""Get a specific metadata provider or default provider"""
provider_manager = await ModelMetadataProviderManager.get_instance()
if provider_name:
return provider_manager._get_provider(provider_name)
return provider_manager._get_provider()
async def get_default_metadata_provider():
"""Get the default metadata provider (fallback or single provider)"""
return await get_metadata_provider()

View File

@@ -0,0 +1,356 @@
"""Services for synchronising metadata with remote providers."""
from __future__ import annotations
import json
import logging
import os
from datetime import datetime
from typing import Any, Awaitable, Callable, Dict, Iterable, Optional
from ..services.settings_manager import SettingsManager
from ..utils.model_utils import determine_base_model
logger = logging.getLogger(__name__)
class MetadataProviderProtocol:
"""Subset of metadata provider interface consumed by the sync service."""
async def get_model_by_hash(self, sha256: str) -> tuple[Optional[Dict[str, Any]], Optional[str]]:
...
async def get_model_version(
self, model_id: int, model_version_id: Optional[int]
) -> Optional[Dict[str, Any]]:
...
class MetadataSyncService:
"""High level orchestration for metadata synchronisation flows."""
def __init__(
self,
*,
metadata_manager,
preview_service,
settings: SettingsManager,
default_metadata_provider_factory: Callable[[], Awaitable[MetadataProviderProtocol]],
metadata_provider_selector: Callable[[str], Awaitable[MetadataProviderProtocol]],
) -> None:
self._metadata_manager = metadata_manager
self._preview_service = preview_service
self._settings = settings
self._get_default_provider = default_metadata_provider_factory
self._get_provider = metadata_provider_selector
async def load_local_metadata(self, metadata_path: str) -> Dict[str, Any]:
"""Load metadata JSON from disk, returning an empty structure when missing."""
if not os.path.exists(metadata_path):
return {}
try:
with open(metadata_path, "r", encoding="utf-8") as handle:
return json.load(handle)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Error loading metadata from %s: %s", metadata_path, exc)
return {}
async def mark_not_found_on_civitai(
self, metadata_path: str, local_metadata: Dict[str, Any]
) -> None:
"""Persist the not-found flag for a metadata payload."""
local_metadata["from_civitai"] = False
await self._metadata_manager.save_metadata(metadata_path, local_metadata)
@staticmethod
def is_civitai_api_metadata(meta: Dict[str, Any]) -> bool:
"""Determine if the metadata originated from the CivitAI public API."""
if not isinstance(meta, dict):
return False
files = meta.get("files")
images = meta.get("images")
source = meta.get("source")
return bool(files) and bool(images) and source != "archive_db"
async def update_model_metadata(
self,
metadata_path: str,
local_metadata: Dict[str, Any],
civitai_metadata: Dict[str, Any],
metadata_provider: Optional[MetadataProviderProtocol] = None,
) -> Dict[str, Any]:
"""Merge remote metadata into the local record and persist the result."""
existing_civitai = local_metadata.get("civitai") or {}
if (
civitai_metadata.get("source") == "archive_db"
and self.is_civitai_api_metadata(existing_civitai)
):
logger.info(
"Skip civitai update for %s (%s)",
local_metadata.get("model_name", ""),
existing_civitai.get("name", ""),
)
else:
merged_civitai = existing_civitai.copy()
merged_civitai.update(civitai_metadata)
if civitai_metadata.get("source") == "archive_db":
model_name = civitai_metadata.get("model", {}).get("name", "")
version_name = civitai_metadata.get("name", "")
logger.info(
"Recovered metadata from archive_db for deleted model: %s (%s)",
model_name,
version_name,
)
if "trainedWords" in existing_civitai:
existing_trained = existing_civitai.get("trainedWords", [])
new_trained = civitai_metadata.get("trainedWords", [])
merged_trained = list(set(existing_trained + new_trained))
merged_civitai["trainedWords"] = merged_trained
local_metadata["civitai"] = merged_civitai
if "model" in civitai_metadata and civitai_metadata["model"]:
model_data = civitai_metadata["model"]
if model_data.get("name"):
local_metadata["model_name"] = model_data["name"]
if not local_metadata.get("modelDescription") and model_data.get("description"):
local_metadata["modelDescription"] = model_data["description"]
if not local_metadata.get("tags") and model_data.get("tags"):
local_metadata["tags"] = model_data["tags"]
if model_data.get("creator") and not local_metadata.get("civitai", {}).get(
"creator"
):
local_metadata.setdefault("civitai", {})["creator"] = model_data["creator"]
local_metadata["base_model"] = determine_base_model(
civitai_metadata.get("baseModel")
)
await self._preview_service.ensure_preview_for_metadata(
metadata_path, local_metadata, civitai_metadata.get("images", [])
)
await self._metadata_manager.save_metadata(metadata_path, local_metadata)
return local_metadata
async def fetch_and_update_model(
self,
*,
sha256: str,
file_path: str,
model_data: Dict[str, Any],
update_cache_func: Callable[[str, str, Dict[str, Any]], Awaitable[bool]],
) -> tuple[bool, Optional[str]]:
"""Fetch metadata for a model and update both disk and cache state."""
if not isinstance(model_data, dict):
error = f"Invalid model_data type: {type(model_data)}"
logger.error(error)
return False, error
metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
enable_archive = self._settings.get("enable_metadata_archive_db", False)
try:
if model_data.get("civitai_deleted") is True:
if not enable_archive or model_data.get("db_checked") is True:
if not enable_archive:
error_msg = "CivitAI model is deleted and metadata archive DB is not enabled"
else:
error_msg = "CivitAI model is deleted and not found in metadata archive DB"
return (False, error_msg)
metadata_provider = await self._get_provider("sqlite")
else:
metadata_provider = await self._get_default_provider()
civitai_metadata, error = await metadata_provider.get_model_by_hash(sha256)
if not civitai_metadata:
if error == "Model not found":
model_data["from_civitai"] = False
model_data["civitai_deleted"] = True
model_data["db_checked"] = enable_archive
model_data["last_checked_at"] = datetime.now().timestamp()
data_to_save = model_data.copy()
data_to_save.pop("folder", None)
await self._metadata_manager.save_metadata(file_path, data_to_save)
error_msg = (
f"Error fetching metadata: {error} (model_name={model_data.get('model_name', '')})"
)
logger.error(error_msg)
return False, error_msg
model_data["from_civitai"] = True
model_data["civitai_deleted"] = civitai_metadata.get("source") == "archive_db"
model_data["db_checked"] = enable_archive
model_data["last_checked_at"] = datetime.now().timestamp()
local_metadata = model_data.copy()
local_metadata.pop("folder", None)
await self.update_model_metadata(
metadata_path,
local_metadata,
civitai_metadata,
metadata_provider,
)
update_payload = {
"model_name": local_metadata.get("model_name"),
"preview_url": local_metadata.get("preview_url"),
"civitai": local_metadata.get("civitai"),
}
model_data.update(update_payload)
await update_cache_func(file_path, file_path, local_metadata)
return True, None
except KeyError as exc:
error_msg = f"Error fetching metadata - Missing key: {exc} in model_data={model_data}"
logger.error(error_msg)
return False, error_msg
except Exception as exc: # pragma: no cover - error path
error_msg = f"Error fetching metadata: {exc}"
logger.error(error_msg, exc_info=True)
return False, error_msg
async def fetch_metadata_by_sha(
self, sha256: str, metadata_provider: Optional[MetadataProviderProtocol] = None
) -> tuple[Optional[Dict[str, Any]], Optional[str]]:
"""Fetch metadata for a SHA256 hash from the configured provider."""
provider = metadata_provider or await self._get_default_provider()
return await provider.get_model_by_hash(sha256)
async def relink_metadata(
self,
*,
file_path: str,
metadata: Dict[str, Any],
model_id: int,
model_version_id: Optional[int],
) -> Dict[str, Any]:
"""Relink a local metadata record to a specific CivitAI model version."""
provider = await self._get_default_provider()
civitai_metadata = await provider.get_model_version(model_id, model_version_id)
if not civitai_metadata:
raise ValueError(
f"Model version not found on CivitAI for ID: {model_id}"
+ (f" with version: {model_version_id}" if model_version_id else "")
)
primary_model_file: Optional[Dict[str, Any]] = None
for file_info in civitai_metadata.get("files", []):
if file_info.get("primary", False) and file_info.get("type") == "Model":
primary_model_file = file_info
break
if primary_model_file and primary_model_file.get("hashes", {}).get("SHA256"):
metadata["sha256"] = primary_model_file["hashes"]["SHA256"].lower()
metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
await self.update_model_metadata(
metadata_path,
metadata,
civitai_metadata,
provider,
)
return metadata
async def save_metadata_updates(
self,
*,
file_path: str,
updates: Dict[str, Any],
metadata_loader: Callable[[str], Awaitable[Dict[str, Any]]],
update_cache: Callable[[str, str, Dict[str, Any]], Awaitable[bool]],
) -> Dict[str, Any]:
"""Apply metadata updates and persist to disk and cache."""
metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
metadata = await metadata_loader(metadata_path)
for key, value in updates.items():
if isinstance(value, dict) and isinstance(metadata.get(key), dict):
metadata[key].update(value)
else:
metadata[key] = value
await self._metadata_manager.save_metadata(file_path, metadata)
await update_cache(file_path, file_path, metadata)
if "model_name" in updates:
logger.debug("Metadata update touched model_name; cache resort required")
return metadata
async def verify_duplicate_hashes(
self,
*,
file_paths: Iterable[str],
metadata_loader: Callable[[str], Awaitable[Dict[str, Any]]],
hash_calculator: Callable[[str], Awaitable[str]],
update_cache: Callable[[str, str, Dict[str, Any]], Awaitable[bool]],
) -> Dict[str, Any]:
"""Verify a collection of files share the same SHA256 hash."""
file_paths = list(file_paths)
if not file_paths:
raise ValueError("No file paths provided for verification")
results = {
"verified_as_duplicates": True,
"mismatched_files": [],
"new_hash_map": {},
}
expected_hash: Optional[str] = None
first_metadata_path = os.path.splitext(file_paths[0])[0] + ".metadata.json"
first_metadata = await metadata_loader(first_metadata_path)
if first_metadata and "sha256" in first_metadata:
expected_hash = first_metadata["sha256"].lower()
for path in file_paths:
if not os.path.exists(path):
continue
try:
actual_hash = await hash_calculator(path)
metadata_path = os.path.splitext(path)[0] + ".metadata.json"
metadata = await metadata_loader(metadata_path)
stored_hash = metadata.get("sha256", "").lower()
if not expected_hash:
expected_hash = stored_hash
if actual_hash != expected_hash:
results["verified_as_duplicates"] = False
results["mismatched_files"].append(path)
results["new_hash_map"][path] = actual_hash
if actual_hash != stored_hash:
metadata["sha256"] = actual_hash
await self._metadata_manager.save_metadata(path, metadata)
await update_cache(path, path, metadata)
except Exception as exc: # pragma: no cover - defensive path
logger.error("Error verifying hash for %s: %s", path, exc)
results["mismatched_files"].append(path)
results["new_hash_map"][path] = "error_calculating_hash"
results["verified_as_duplicates"] = False
return results

View File

@@ -1,6 +1,6 @@
import asyncio
from typing import List, Dict, Tuple
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from dataclasses import dataclass, field
from operator import itemgetter
from natsort import natsorted
@@ -17,10 +17,12 @@ SUPPORTED_SORT_MODES = [
@dataclass
class ModelCache:
"""Cache structure for model data with extensible sorting"""
"""Cache structure for model data with extensible sorting."""
raw_data: List[Dict]
folders: List[str]
version_index: Dict[int, Dict] = field(default_factory=dict)
def __post_init__(self):
self._lock = asyncio.Lock()
# Cache for last sort: (sort_key, order) -> sorted list
@@ -28,6 +30,58 @@ class ModelCache:
self._last_sorted_data: List[Dict] = []
# Default sort on init
asyncio.create_task(self.resort())
self.rebuild_version_index()
@staticmethod
def _normalize_version_id(value: Any) -> Optional[int]:
"""Normalize a potential version identifier into an integer."""
if isinstance(value, int):
return value
if isinstance(value, str):
try:
return int(value)
except ValueError:
return None
return None
def rebuild_version_index(self) -> None:
"""Rebuild the version index from the current raw data."""
self.version_index = {}
for item in self.raw_data:
self.add_to_version_index(item)
def add_to_version_index(self, item: Dict) -> None:
"""Register a cache item in the version index if possible."""
civitai_data = item.get('civitai') if isinstance(item, dict) else None
if not isinstance(civitai_data, dict):
return
version_id = self._normalize_version_id(civitai_data.get('id'))
if version_id is None:
return
self.version_index[version_id] = item
def remove_from_version_index(self, item: Dict) -> None:
"""Remove a cache item from the version index if present."""
civitai_data = item.get('civitai') if isinstance(item, dict) else None
if not isinstance(civitai_data, dict):
return
version_id = self._normalize_version_id(civitai_data.get('id'))
if version_id is None:
return
existing = self.version_index.get(version_id)
if existing is item or (
isinstance(existing, dict)
and existing.get('file_path') == item.get('file_path')
):
self.version_index.pop(version_id, None)
async def resort(self):
"""Resort cached data according to last sort mode if set"""
@@ -41,6 +95,7 @@ class ModelCache:
all_folders = set(l['folder'] for l in self.raw_data)
self.folders = sorted(list(all_folders), key=lambda x: x.lower())
self.rebuild_version_index()
def _sort_data(self, data: List[Dict], sort_key: str, order: str) -> List[Dict]:
"""Sort data by sort_key and order"""

View File

@@ -0,0 +1,464 @@
import asyncio
import os
import logging
from typing import List, Dict, Optional, Any, Set
from abc import ABC, abstractmethod
from ..utils.utils import calculate_relative_path_for_model, remove_empty_dirs
from ..utils.constants import AUTO_ORGANIZE_BATCH_SIZE
from ..services.settings_manager import get_settings_manager
logger = logging.getLogger(__name__)
class ProgressCallback(ABC):
"""Abstract callback interface for progress reporting"""
@abstractmethod
async def on_progress(self, progress_data: Dict[str, Any]) -> None:
"""Called when progress is updated"""
pass
class AutoOrganizeResult:
"""Result object for auto-organize operations"""
def __init__(self):
self.total: int = 0
self.processed: int = 0
self.success_count: int = 0
self.failure_count: int = 0
self.skipped_count: int = 0
self.operation_type: str = 'unknown'
self.cleanup_counts: Dict[str, int] = {}
self.results: List[Dict[str, Any]] = []
self.results_truncated: bool = False
self.sample_results: List[Dict[str, Any]] = []
self.is_flat_structure: bool = False
def to_dict(self) -> Dict[str, Any]:
"""Convert result to dictionary"""
result = {
'success': True,
'message': f'Auto-organize {self.operation_type} completed: {self.success_count} moved, {self.skipped_count} skipped, {self.failure_count} failed out of {self.total} total',
'summary': {
'total': self.total,
'success': self.success_count,
'skipped': self.skipped_count,
'failures': self.failure_count,
'organization_type': 'flat' if self.is_flat_structure else 'structured',
'cleaned_dirs': self.cleanup_counts,
'operation_type': self.operation_type
}
}
if self.results_truncated:
result['results_truncated'] = True
result['sample_results'] = self.sample_results
else:
result['results'] = self.results
return result
class ModelFileService:
"""Service for handling model file operations and organization"""
def __init__(self, scanner, model_type: str):
"""Initialize the service
Args:
scanner: Model scanner instance
model_type: Type of model (e.g., 'lora', 'checkpoint')
"""
self.scanner = scanner
self.model_type = model_type
def get_model_roots(self) -> List[str]:
"""Get model root directories"""
return self.scanner.get_model_roots()
async def auto_organize_models(
self,
file_paths: Optional[List[str]] = None,
progress_callback: Optional[ProgressCallback] = None
) -> AutoOrganizeResult:
"""Auto-organize models based on current settings
Args:
file_paths: Optional list of specific file paths to organize.
If None, organizes all models.
progress_callback: Optional callback for progress updates
Returns:
AutoOrganizeResult object with operation results
"""
result = AutoOrganizeResult()
source_directories: Set[str] = set()
try:
# Get all models from cache
cache = await self.scanner.get_cached_data()
all_models = cache.raw_data
# Filter models if specific file paths are provided
if file_paths:
all_models = [model for model in all_models if model.get('file_path') in file_paths]
result.operation_type = 'bulk'
else:
result.operation_type = 'all'
# Get model roots for this scanner
model_roots = self.get_model_roots()
if not model_roots:
raise ValueError('No model roots configured')
# Check if flat structure is configured for this model type
settings_manager = get_settings_manager()
path_template = settings_manager.get_download_path_template(self.model_type)
result.is_flat_structure = not path_template
# Initialize tracking
result.total = len(all_models)
# Send initial progress
if progress_callback:
await progress_callback.on_progress({
'type': 'auto_organize_progress',
'status': 'started',
'total': result.total,
'processed': 0,
'success': 0,
'failures': 0,
'skipped': 0,
'operation_type': result.operation_type
})
# Process models in batches
await self._process_models_in_batches(
all_models,
model_roots,
result,
progress_callback,
source_directories # Pass the set to track source directories
)
# Send cleanup progress
if progress_callback:
await progress_callback.on_progress({
'type': 'auto_organize_progress',
'status': 'cleaning',
'total': result.total,
'processed': result.processed,
'success': result.success_count,
'failures': result.failure_count,
'skipped': result.skipped_count,
'message': 'Cleaning up empty directories...',
'operation_type': result.operation_type
})
# Clean up empty directories - only in affected directories for bulk operations
cleanup_paths = list(source_directories) if result.operation_type == 'bulk' else model_roots
result.cleanup_counts = await self._cleanup_empty_directories(cleanup_paths)
# Send completion message
if progress_callback:
await progress_callback.on_progress({
'type': 'auto_organize_progress',
'status': 'completed',
'total': result.total,
'processed': result.processed,
'success': result.success_count,
'failures': result.failure_count,
'skipped': result.skipped_count,
'cleanup': result.cleanup_counts,
'operation_type': result.operation_type
})
return result
except Exception as e:
logger.error(f"Error in auto_organize_models: {e}", exc_info=True)
# Send error message
if progress_callback:
await progress_callback.on_progress({
'type': 'auto_organize_progress',
'status': 'error',
'error': str(e),
'operation_type': result.operation_type
})
raise e
async def _process_models_in_batches(
self,
all_models: List[Dict[str, Any]],
model_roots: List[str],
result: AutoOrganizeResult,
progress_callback: Optional[ProgressCallback],
source_directories: Optional[Set[str]] = None
) -> None:
"""Process models in batches to avoid overwhelming the system"""
for i in range(0, result.total, AUTO_ORGANIZE_BATCH_SIZE):
batch = all_models[i:i + AUTO_ORGANIZE_BATCH_SIZE]
for model in batch:
await self._process_single_model(model, model_roots, result, source_directories)
result.processed += 1
# Send progress update after each batch
if progress_callback:
await progress_callback.on_progress({
'type': 'auto_organize_progress',
'status': 'processing',
'total': result.total,
'processed': result.processed,
'success': result.success_count,
'failures': result.failure_count,
'skipped': result.skipped_count,
'operation_type': result.operation_type
})
# Small delay between batches
await asyncio.sleep(0.1)
async def _process_single_model(
self,
model: Dict[str, Any],
model_roots: List[str],
result: AutoOrganizeResult,
source_directories: Optional[Set[str]] = None
) -> None:
"""Process a single model for organization"""
try:
file_path = model.get('file_path')
model_name = model.get('model_name', 'Unknown')
if not file_path:
self._add_result(result, model_name, False, "No file path found")
result.failure_count += 1
return
# Find which model root this file belongs to
current_root = self._find_model_root(file_path, model_roots)
if not current_root:
self._add_result(result, model_name, False,
"Model file not found in any configured root directory")
result.failure_count += 1
return
# Determine target directory
target_dir = await self._calculate_target_directory(
model, current_root, result.is_flat_structure
)
if target_dir is None:
self._add_result(result, model_name, False,
"Skipped - insufficient metadata for organization")
result.skipped_count += 1
return
current_dir = os.path.dirname(file_path)
# Skip if already in correct location
if current_dir.replace(os.sep, '/') == target_dir.replace(os.sep, '/'):
result.skipped_count += 1
return
# Check for conflicts
file_name = os.path.basename(file_path)
target_file_path = os.path.join(target_dir, file_name)
if os.path.exists(target_file_path):
self._add_result(result, model_name, False,
f"Target file already exists: {target_file_path}")
result.failure_count += 1
return
# Store the source directory for potential cleanup
if source_directories is not None:
source_directories.add(current_dir)
# Perform the move
success = await self.scanner.move_model(file_path, target_dir)
if success:
result.success_count += 1
else:
self._add_result(result, model_name, False, "Failed to move model")
result.failure_count += 1
except Exception as e:
logger.error(f"Error processing model {model.get('model_name', 'Unknown')}: {e}", exc_info=True)
self._add_result(result, model.get('model_name', 'Unknown'), False, f"Error: {str(e)}")
result.failure_count += 1
def _find_model_root(self, file_path: str, model_roots: List[str]) -> Optional[str]:
"""Find which model root the file belongs to"""
for root in model_roots:
# Normalize paths for comparison
normalized_root = os.path.normpath(root).replace(os.sep, '/')
normalized_file = os.path.normpath(file_path).replace(os.sep, '/')
if normalized_file.startswith(normalized_root):
return root
return None
async def _calculate_target_directory(
self,
model: Dict[str, Any],
current_root: str,
is_flat_structure: bool
) -> Optional[str]:
"""Calculate the target directory for a model"""
if is_flat_structure:
file_path = model.get('file_path')
current_dir = os.path.dirname(file_path)
# Check if already in root directory
if os.path.normpath(current_dir) == os.path.normpath(current_root):
return None # Signal to skip
return current_root
else:
# Calculate new relative path based on settings
new_relative_path = calculate_relative_path_for_model(model, self.model_type)
if not new_relative_path:
return None # Signal to skip
return os.path.join(current_root, new_relative_path).replace(os.sep, '/')
def _add_result(
self,
result: AutoOrganizeResult,
model_name: str,
success: bool,
message: str
) -> None:
"""Add a result entry if under the limit"""
if len(result.results) < 100: # Limit detailed results
result.results.append({
"model": model_name,
"success": success,
"message": message
})
elif len(result.results) == 100:
# Mark as truncated and save sample
result.results_truncated = True
result.sample_results = result.results[:50]
async def _cleanup_empty_directories(self, paths: List[str]) -> Dict[str, int]:
"""Clean up empty directories after organizing
Args:
paths: List of paths to check for empty directories
Returns:
Dictionary with counts of removed directories by root path
"""
cleanup_counts = {}
for path in paths:
removed = remove_empty_dirs(path)
cleanup_counts[path] = removed
return cleanup_counts
class ModelMoveService:
"""Service for handling individual model moves"""
def __init__(self, scanner):
"""Initialize the service
Args:
scanner: Model scanner instance
"""
self.scanner = scanner
async def move_model(self, file_path: str, target_path: str) -> Dict[str, Any]:
"""Move a single model file
Args:
file_path: Source file path
target_path: Target directory path
Returns:
Dictionary with move result
"""
try:
source_dir = os.path.dirname(file_path)
if os.path.normpath(source_dir) == os.path.normpath(target_path):
logger.info(f"Source and target directories are the same: {source_dir}")
return {
'success': True,
'message': 'Source and target directories are the same',
'original_file_path': file_path,
'new_file_path': file_path
}
new_file_path = await self.scanner.move_model(file_path, target_path)
if new_file_path:
return {
'success': True,
'original_file_path': file_path,
'new_file_path': new_file_path
}
else:
return {
'success': False,
'error': 'Failed to move model',
'original_file_path': file_path,
'new_file_path': None
}
except Exception as e:
logger.error(f"Error moving model: {e}", exc_info=True)
return {
'success': False,
'error': str(e),
'original_file_path': file_path,
'new_file_path': None
}
async def move_models_bulk(self, file_paths: List[str], target_path: str) -> Dict[str, Any]:
"""Move multiple model files
Args:
file_paths: List of source file paths
target_path: Target directory path
Returns:
Dictionary with bulk move results
"""
try:
results = []
for file_path in file_paths:
result = await self.move_model(file_path, target_path)
results.append({
"original_file_path": file_path,
"new_file_path": result.get('new_file_path'),
"success": result['success'],
"message": result.get('message', result.get('error', 'Unknown'))
})
success_count = sum(1 for r in results if r["success"])
failure_count = len(results) - success_count
return {
'success': True,
'message': f'Moved {success_count} of {len(file_paths)} models',
'results': results,
'success_count': success_count,
'failure_count': failure_count
}
except Exception as e:
logger.error(f"Error moving models in bulk: {e}", exc_info=True)
return {
'success': False,
'error': str(e),
'results': [],
'success_count': 0,
'failure_count': len(file_paths)
}

View File

@@ -0,0 +1,245 @@
"""Service routines for model lifecycle mutations."""
from __future__ import annotations
import logging
import os
from typing import Awaitable, Callable, Dict, Iterable, List, Optional
from ..services.service_registry import ServiceRegistry
from ..utils.constants import PREVIEW_EXTENSIONS
logger = logging.getLogger(__name__)
async def delete_model_artifacts(target_dir: str, file_name: str) -> List[str]:
"""Delete the primary model artefacts within ``target_dir``."""
patterns = [
f"{file_name}.safetensors",
f"{file_name}.metadata.json",
]
for ext in PREVIEW_EXTENSIONS:
patterns.append(f"{file_name}{ext}")
deleted: List[str] = []
main_file = patterns[0]
main_path = os.path.join(target_dir, main_file).replace(os.sep, "/")
if os.path.exists(main_path):
os.remove(main_path)
deleted.append(main_path)
else:
logger.warning("Model file not found: %s", main_file)
for pattern in patterns[1:]:
path = os.path.join(target_dir, pattern)
if os.path.exists(path):
try:
os.remove(path)
deleted.append(pattern)
except Exception as exc: # pragma: no cover - defensive path
logger.warning("Failed to delete %s: %s", pattern, exc)
return deleted
class ModelLifecycleService:
"""Co-ordinate destructive and mutating model operations."""
def __init__(
self,
*,
scanner,
metadata_manager,
metadata_loader: Callable[[str], Awaitable[Dict[str, object]]],
recipe_scanner_factory: Callable[[], Awaitable] | None = None,
) -> None:
self._scanner = scanner
self._metadata_manager = metadata_manager
self._metadata_loader = metadata_loader
self._recipe_scanner_factory = (
recipe_scanner_factory or ServiceRegistry.get_recipe_scanner
)
async def delete_model(self, file_path: str) -> Dict[str, object]:
"""Delete a model file and associated artefacts."""
if not file_path:
raise ValueError("Model path is required")
target_dir = os.path.dirname(file_path)
file_name = os.path.splitext(os.path.basename(file_path))[0]
deleted_files = await delete_model_artifacts(target_dir, file_name)
cache = await self._scanner.get_cached_data()
cache.raw_data = [item for item in cache.raw_data if item["file_path"] != file_path]
await cache.resort()
if hasattr(self._scanner, "_hash_index") and self._scanner._hash_index:
self._scanner._hash_index.remove_by_path(file_path)
return {"success": True, "deleted_files": deleted_files}
async def exclude_model(self, file_path: str) -> Dict[str, object]:
"""Mark a model as excluded and prune cache references."""
if not file_path:
raise ValueError("Model path is required")
metadata_path = os.path.splitext(file_path)[0] + ".metadata.json"
metadata = await self._metadata_loader(metadata_path)
metadata["exclude"] = True
await self._metadata_manager.save_metadata(file_path, metadata)
cache = await self._scanner.get_cached_data()
model_to_remove = next(
(item for item in cache.raw_data if item["file_path"] == file_path),
None,
)
if model_to_remove:
for tag in model_to_remove.get("tags", []):
if tag in getattr(self._scanner, "_tags_count", {}):
self._scanner._tags_count[tag] = max(
0, self._scanner._tags_count[tag] - 1
)
if self._scanner._tags_count[tag] == 0:
del self._scanner._tags_count[tag]
if hasattr(self._scanner, "_hash_index") and self._scanner._hash_index:
self._scanner._hash_index.remove_by_path(file_path)
cache.raw_data = [
item for item in cache.raw_data if item["file_path"] != file_path
]
await cache.resort()
excluded = getattr(self._scanner, "_excluded_models", None)
if isinstance(excluded, list):
excluded.append(file_path)
message = f"Model {os.path.basename(file_path)} excluded"
return {"success": True, "message": message}
async def bulk_delete_models(self, file_paths: Iterable[str]) -> Dict[str, object]:
"""Delete a collection of models via the scanner bulk operation."""
file_paths = list(file_paths)
if not file_paths:
raise ValueError("No file paths provided for deletion")
return await self._scanner.bulk_delete_models(file_paths)
async def rename_model(
self, *, file_path: str, new_file_name: str
) -> Dict[str, object]:
"""Rename a model and its companion artefacts."""
if not file_path or not new_file_name:
raise ValueError("File path and new file name are required")
invalid_chars = {"/", "\\", ":", "*", "?", '"', "<", ">", "|"}
if any(char in new_file_name for char in invalid_chars):
raise ValueError("Invalid characters in file name")
target_dir = os.path.dirname(file_path)
old_file_name = os.path.splitext(os.path.basename(file_path))[0]
new_file_path = os.path.join(target_dir, f"{new_file_name}.safetensors").replace(
os.sep, "/"
)
if os.path.exists(new_file_path):
raise ValueError("A file with this name already exists")
patterns = [
f"{old_file_name}.safetensors",
f"{old_file_name}.metadata.json",
f"{old_file_name}.metadata.json.bak",
]
for ext in PREVIEW_EXTENSIONS:
patterns.append(f"{old_file_name}{ext}")
existing_files: List[tuple[str, str]] = []
for pattern in patterns:
path = os.path.join(target_dir, pattern)
if os.path.exists(path):
existing_files.append((path, pattern))
metadata_path = os.path.join(target_dir, f"{old_file_name}.metadata.json")
metadata: Optional[Dict[str, object]] = None
hash_value: Optional[str] = None
if os.path.exists(metadata_path):
metadata = await self._metadata_loader(metadata_path)
hash_value = metadata.get("sha256") if isinstance(metadata, dict) else None
renamed_files: List[str] = []
new_metadata_path: Optional[str] = None
new_preview: Optional[str] = None
for old_path, pattern in existing_files:
ext = self._get_multipart_ext(pattern)
new_path = os.path.join(target_dir, f"{new_file_name}{ext}").replace(
os.sep, "/"
)
os.rename(old_path, new_path)
renamed_files.append(new_path)
if ext == ".metadata.json":
new_metadata_path = new_path
if metadata and new_metadata_path:
metadata["file_name"] = new_file_name
metadata["file_path"] = new_file_path
if metadata.get("preview_url"):
old_preview = str(metadata["preview_url"])
ext = self._get_multipart_ext(old_preview)
new_preview = os.path.join(target_dir, f"{new_file_name}{ext}").replace(
os.sep, "/"
)
metadata["preview_url"] = new_preview
await self._metadata_manager.save_metadata(new_file_path, metadata)
if metadata:
await self._scanner.update_single_model_cache(
file_path, new_file_path, metadata
)
if hash_value and getattr(self._scanner, "model_type", "") == "lora":
recipe_scanner = await self._recipe_scanner_factory()
if recipe_scanner:
try:
await recipe_scanner.update_lora_filename_by_hash(
hash_value, new_file_name
)
except Exception as exc: # pragma: no cover - defensive logging
logger.error(
"Error updating recipe references for %s: %s",
file_path,
exc,
)
return {
"success": True,
"new_file_path": new_file_path,
"new_preview_path": new_preview,
"renamed_files": renamed_files,
"reload_required": False,
}
@staticmethod
def _get_multipart_ext(filename: str) -> str:
"""Return the extension for files with compound suffixes."""
parts = filename.split(".")
if len(parts) == 3:
return "." + ".".join(parts[-2:])
if len(parts) >= 4:
return "." + ".".join(parts[-3:])
return os.path.splitext(filename)[1]

View File

@@ -0,0 +1,566 @@
from abc import ABC, abstractmethod
import json
import logging
from typing import Optional, Dict, Tuple, Any, List
from .downloader import get_downloader
try:
from bs4 import BeautifulSoup
except ImportError as exc:
BeautifulSoup = None # type: ignore[assignment]
_BS4_IMPORT_ERROR = exc
else:
_BS4_IMPORT_ERROR = None
try:
import aiosqlite
except ImportError as exc:
aiosqlite = None # type: ignore[assignment]
_AIOSQLITE_IMPORT_ERROR = exc
else:
_AIOSQLITE_IMPORT_ERROR = None
def _require_beautifulsoup() -> Any:
if BeautifulSoup is None:
raise RuntimeError(
"BeautifulSoup (bs4) is required for CivArchiveModelMetadataProvider. "
"Install it with 'pip install beautifulsoup4'."
) from _BS4_IMPORT_ERROR
return BeautifulSoup
def _require_aiosqlite() -> Any:
if aiosqlite is None:
raise RuntimeError(
"aiosqlite is required for SQLiteModelMetadataProvider. "
"Install it with 'pip install aiosqlite'."
) from _AIOSQLITE_IMPORT_ERROR
return aiosqlite
logger = logging.getLogger(__name__)
class ModelMetadataProvider(ABC):
"""Base abstract class for all model metadata providers"""
@abstractmethod
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Find model by hash value"""
pass
@abstractmethod
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
"""Get all versions of a model with their details"""
pass
@abstractmethod
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
"""Get specific model version with additional metadata"""
pass
@abstractmethod
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Fetch model version metadata"""
pass
@abstractmethod
async def get_user_models(self, username: str) -> Optional[List[Dict]]:
"""Fetch models owned by the specified user"""
pass
class CivitaiModelMetadataProvider(ModelMetadataProvider):
"""Provider that uses Civitai API for metadata"""
def __init__(self, civitai_client):
self.client = civitai_client
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
return await self.client.get_model_by_hash(model_hash)
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
return await self.client.get_model_versions(model_id)
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
return await self.client.get_model_version(model_id, version_id)
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
return await self.client.get_model_version_info(version_id)
async def get_user_models(self, username: str) -> Optional[List[Dict]]:
return await self.client.get_user_models(username)
class CivArchiveModelMetadataProvider(ModelMetadataProvider):
"""Provider that uses CivArchive HTML page parsing for metadata"""
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Not supported by CivArchive provider"""
return None, "CivArchive provider does not support hash lookup"
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
"""Not supported by CivArchive provider"""
return None
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
"""Get specific model version by parsing CivArchive HTML page"""
if model_id is None or version_id is None:
return None
try:
# Construct CivArchive URL
url = f"https://civarchive.com/models/{model_id}?modelVersionId={version_id}"
downloader = await get_downloader()
session = await downloader.session
async with session.get(url) as response:
if response.status != 200:
return None
html_content = await response.text()
# Parse HTML to extract JSON data
soup_parser = _require_beautifulsoup()
soup = soup_parser(html_content, 'html.parser')
script_tag = soup.find('script', {'id': '__NEXT_DATA__', 'type': 'application/json'})
if not script_tag:
return None
# Parse JSON content
json_data = json.loads(script_tag.string)
model_data = json_data.get('props', {}).get('pageProps', {}).get('model')
if not model_data or 'version' not in model_data:
return None
# Extract version data as base
version = model_data['version'].copy()
# Restructure stats
if 'downloadCount' in version and 'ratingCount' in version and 'rating' in version:
version['stats'] = {
'downloadCount': version.pop('downloadCount'),
'ratingCount': version.pop('ratingCount'),
'rating': version.pop('rating')
}
# Rename trigger to trainedWords
if 'trigger' in version:
version['trainedWords'] = version.pop('trigger')
# Transform files data to expected format
if 'files' in version:
transformed_files = []
for file_data in version['files']:
# Find first available mirror (deletedAt is null)
available_mirror = None
for mirror in file_data.get('mirrors', []):
if mirror.get('deletedAt') is None:
available_mirror = mirror
break
# Create transformed file entry
transformed_file = {
'id': file_data.get('id'),
'sizeKB': file_data.get('sizeKB'),
'name': available_mirror.get('filename', file_data.get('name')) if available_mirror else file_data.get('name'),
'type': file_data.get('type'),
'downloadUrl': available_mirror.get('url') if available_mirror else None,
'primary': True,
'mirrors': file_data.get('mirrors', [])
}
# Transform hash format
if 'sha256' in file_data:
transformed_file['hashes'] = {
'SHA256': file_data['sha256'].upper()
}
transformed_files.append(transformed_file)
version['files'] = transformed_files
# Add model information
version['model'] = {
'name': model_data.get('name'),
'type': model_data.get('type'),
'nsfw': model_data.get('is_nsfw', False),
'description': model_data.get('description'),
'tags': model_data.get('tags', [])
}
version['creator'] = {
'username': model_data.get('username'),
'image': ''
}
# Add source identifier
version['source'] = 'civarchive'
version['is_deleted'] = json_data.get('query', {}).get('is_deleted', False)
return version
except Exception as e:
logger.error(f"Error fetching CivArchive model version {model_id}/{version_id}: {e}")
return None
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Not supported by CivArchive provider - requires both model_id and version_id"""
return None, "CivArchive provider requires both model_id and version_id"
async def get_user_models(self, username: str) -> Optional[List[Dict]]:
"""Not supported by CivArchive provider"""
return None
class SQLiteModelMetadataProvider(ModelMetadataProvider):
"""Provider that uses SQLite database for metadata"""
def __init__(self, db_path: str):
self.db_path = db_path
self._aiosqlite = _require_aiosqlite()
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Find model by hash value from SQLite database"""
async with self._aiosqlite.connect(self.db_path) as db:
# Look up in model_files table to get model_id and version_id
query = """
SELECT model_id, version_id
FROM model_files
WHERE sha256 = ?
LIMIT 1
"""
db.row_factory = self._aiosqlite.Row
cursor = await db.execute(query, (model_hash.upper(),))
file_row = await cursor.fetchone()
if not file_row:
return None, "Model not found"
# Get version details
model_id = file_row['model_id']
version_id = file_row['version_id']
# Build response in the same format as Civitai API
result = await self._get_version_with_model_data(db, model_id, version_id)
return result, None if result else "Error retrieving model data"
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
"""Get all versions of a model from SQLite database"""
async with self._aiosqlite.connect(self.db_path) as db:
db.row_factory = self._aiosqlite.Row
# First check if model exists
model_query = "SELECT * FROM models WHERE id = ?"
cursor = await db.execute(model_query, (model_id,))
model_row = await cursor.fetchone()
if not model_row:
return None
model_data = json.loads(model_row['data'])
model_type = model_row['type']
model_name = model_row['name']
# Get all versions for this model
versions_query = """
SELECT id, name, base_model, data, position, published_at
FROM model_versions
WHERE model_id = ?
ORDER BY position ASC
"""
cursor = await db.execute(versions_query, (model_id,))
version_rows = await cursor.fetchall()
if not version_rows:
return {'modelVersions': [], 'type': model_type}
# Format versions similar to Civitai API
model_versions = []
for row in version_rows:
version_data = json.loads(row['data'])
# Add fields from the row to ensure we have the basic fields
version_entry = {
'id': row['id'],
'modelId': int(model_id),
'name': row['name'],
'baseModel': row['base_model'],
'model': {
'name': model_row['name'],
'type': model_type,
},
'source': 'archive_db'
}
# Update with any additional data
version_entry.update(version_data)
model_versions.append(version_entry)
return {
'modelVersions': model_versions,
'type': model_type,
'name': model_name
}
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
"""Get specific model version with additional metadata from SQLite database"""
if not model_id and not version_id:
return None
async with self._aiosqlite.connect(self.db_path) as db:
db.row_factory = self._aiosqlite.Row
# Case 1: Only version_id is provided
if model_id is None and version_id is not None:
# First get the version info to extract model_id
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
cursor = await db.execute(version_query, (version_id,))
version_row = await cursor.fetchone()
if not version_row:
return None
model_id = version_row['model_id']
# Case 2: model_id is provided but version_id is not
elif model_id is not None and version_id is None:
# Find the latest version
version_query = """
SELECT id FROM model_versions
WHERE model_id = ?
ORDER BY position ASC
LIMIT 1
"""
cursor = await db.execute(version_query, (model_id,))
version_row = await cursor.fetchone()
if not version_row:
return None
version_id = version_row['id']
# Now we have both model_id and version_id, get the full data
return await self._get_version_with_model_data(db, model_id, version_id)
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
"""Fetch model version metadata from SQLite database"""
async with self._aiosqlite.connect(self.db_path) as db:
db.row_factory = self._aiosqlite.Row
# Get version details
version_query = "SELECT model_id FROM model_versions WHERE id = ?"
cursor = await db.execute(version_query, (version_id,))
version_row = await cursor.fetchone()
if not version_row:
return None, "Model version not found"
model_id = version_row['model_id']
# Build complete version data with model info
version_data = await self._get_version_with_model_data(db, model_id, version_id)
return version_data, None
async def get_user_models(self, username: str) -> Optional[List[Dict]]:
"""Listing models by username is not supported for archive database"""
return None
async def _get_version_with_model_data(self, db, model_id, version_id) -> Optional[Dict]:
"""Helper to build version data with model information"""
# Get version details
version_query = "SELECT name, base_model, data FROM model_versions WHERE id = ? AND model_id = ?"
cursor = await db.execute(version_query, (version_id, model_id))
version_row = await cursor.fetchone()
if not version_row:
return None
# Get model details
model_query = "SELECT name, type, data, username FROM models WHERE id = ?"
cursor = await db.execute(model_query, (model_id,))
model_row = await cursor.fetchone()
if not model_row:
return None
# Parse JSON data
try:
version_data = json.loads(version_row['data'])
model_data = json.loads(model_row['data'])
# Build response
result = {
"id": int(version_id),
"modelId": int(model_id),
"name": version_row['name'],
"baseModel": version_row['base_model'],
"model": {
"name": model_row['name'],
"description": model_data.get("description"),
"type": model_row['type'],
"tags": model_data.get("tags", [])
},
"creator": {
"username": model_row['username'] or model_data.get("creator", {}).get("username"),
"image": model_data.get("creator", {}).get("image")
},
"source": "archive_db"
}
# Add any additional fields from version data
result.update(version_data)
# Attach files associated with this version from model_files table
files_query = """
SELECT data
FROM model_files
WHERE version_id = ? AND type = 'Model'
ORDER BY id ASC
"""
cursor = await db.execute(files_query, (version_id,))
file_rows = await cursor.fetchall()
files = []
for file_row in file_rows:
try:
file_data = json.loads(file_row['data'])
except json.JSONDecodeError:
logger.warning(
"Skipping model_files entry with invalid JSON for version_id %s", version_id
)
continue
# Remove 'modelId' and 'modelVersionId' fields if present
file_data.pop('modelId', None)
file_data.pop('modelVersionId', None)
files.append(file_data)
if 'files' in result:
existing_files = result['files']
if isinstance(existing_files, list):
existing_files.extend(files)
result['files'] = existing_files
else:
merged_files = files.copy()
if existing_files:
merged_files.insert(0, existing_files)
result['files'] = merged_files
elif files:
result['files'] = files
else:
result['files'] = []
return result
except json.JSONDecodeError:
return None
class FallbackMetadataProvider(ModelMetadataProvider):
"""Try providers in order, return first successful result."""
def __init__(self, providers: list):
self.providers = providers
async def get_model_by_hash(self, model_hash: str) -> Tuple[Optional[Dict], Optional[str]]:
for provider in self.providers:
try:
result, error = await provider.get_model_by_hash(model_hash)
if result:
return result, error
except Exception as e:
logger.debug(f"Provider failed for get_model_by_hash: {e}")
continue
return None, "Model not found"
async def get_model_versions(self, model_id: str) -> Optional[Dict]:
for provider in self.providers:
try:
result = await provider.get_model_versions(model_id)
if result:
return result
except Exception as e:
logger.debug(f"Provider failed for get_model_versions: {e}")
continue
return None
async def get_model_version(self, model_id: int = None, version_id: int = None) -> Optional[Dict]:
for provider in self.providers:
try:
result = await provider.get_model_version(model_id, version_id)
if result:
return result
except Exception as e:
logger.debug(f"Provider failed for get_model_version: {e}")
continue
return None
async def get_model_version_info(self, version_id: str) -> Tuple[Optional[Dict], Optional[str]]:
for provider in self.providers:
try:
result, error = await provider.get_model_version_info(version_id)
if result:
return result, error
except Exception as e:
logger.debug(f"Provider failed for get_model_version_info: {e}")
continue
return None, "No provider could retrieve the data"
async def get_user_models(self, username: str) -> Optional[List[Dict]]:
for provider in self.providers:
try:
result = await provider.get_user_models(username)
if result is not None:
return result
except Exception as e:
logger.debug(f"Provider failed for get_user_models: {e}")
continue
return None
class ModelMetadataProviderManager:
"""Manager for selecting and using model metadata providers"""
_instance = None
@classmethod
async def get_instance(cls):
"""Get singleton instance of ModelMetadataProviderManager"""
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
self.providers = {}
self.default_provider = None
def register_provider(self, name: str, provider: ModelMetadataProvider, is_default: bool = False):
"""Register a metadata provider"""
self.providers[name] = provider
if is_default or self.default_provider is None:
self.default_provider = name
async def get_model_by_hash(self, model_hash: str, provider_name: str = None) -> Tuple[Optional[Dict], Optional[str]]:
"""Find model by hash using specified or default provider"""
provider = self._get_provider(provider_name)
return await provider.get_model_by_hash(model_hash)
async def get_model_versions(self, model_id: str, provider_name: str = None) -> Optional[Dict]:
"""Get model versions using specified or default provider"""
provider = self._get_provider(provider_name)
return await provider.get_model_versions(model_id)
async def get_model_version(self, model_id: int = None, version_id: int = None, provider_name: str = None) -> Optional[Dict]:
"""Get specific model version using specified or default provider"""
provider = self._get_provider(provider_name)
return await provider.get_model_version(model_id, version_id)
async def get_model_version_info(self, version_id: str, provider_name: str = None) -> Tuple[Optional[Dict], Optional[str]]:
"""Fetch model version info using specified or default provider"""
provider = self._get_provider(provider_name)
return await provider.get_model_version_info(version_id)
async def get_user_models(self, username: str, provider_name: str = None) -> Optional[List[Dict]]:
"""Fetch models owned by the specified user"""
provider = self._get_provider(provider_name)
return await provider.get_user_models(username)
def _get_provider(self, provider_name: str = None) -> ModelMetadataProvider:
"""Get provider by name or default provider"""
if provider_name and provider_name in self.providers:
return self.providers[provider_name]
if self.default_provider is None:
raise ValueError("No default provider set and no valid provider specified")
return self.providers[self.default_provider]

196
py/services/model_query.py Normal file
View File

@@ -0,0 +1,196 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Protocol, Callable
from ..utils.constants import NSFW_LEVELS
from ..utils.utils import fuzzy_match as default_fuzzy_match
class SettingsProvider(Protocol):
"""Protocol describing the SettingsManager contract used by query helpers."""
def get(self, key: str, default: Any = None) -> Any:
...
@dataclass(frozen=True)
class SortParams:
"""Normalized representation of sorting instructions."""
key: str
order: str
@dataclass(frozen=True)
class FilterCriteria:
"""Container for model list filtering options."""
folder: Optional[str] = None
base_models: Optional[Sequence[str]] = None
tags: Optional[Sequence[str]] = None
favorites_only: bool = False
search_options: Optional[Dict[str, Any]] = None
class ModelCacheRepository:
"""Adapter around scanner cache access and sort normalisation."""
def __init__(self, scanner) -> None:
self._scanner = scanner
async def get_cache(self):
"""Return the underlying cache instance from the scanner."""
return await self._scanner.get_cached_data()
async def fetch_sorted(self, params: SortParams) -> List[Dict[str, Any]]:
"""Fetch cached data pre-sorted according to ``params``."""
cache = await self.get_cache()
return await cache.get_sorted_data(params.key, params.order)
@staticmethod
def parse_sort(sort_by: str) -> SortParams:
"""Parse an incoming sort string into key/order primitives."""
if not sort_by:
return SortParams(key="name", order="asc")
if ":" in sort_by:
raw_key, raw_order = sort_by.split(":", 1)
sort_key = raw_key.strip().lower() or "name"
order = raw_order.strip().lower()
else:
sort_key = sort_by.strip().lower() or "name"
order = "asc"
if order not in ("asc", "desc"):
order = "asc"
return SortParams(key=sort_key, order=order)
class ModelFilterSet:
"""Applies common filtering rules to the model collection."""
def __init__(self, settings: SettingsProvider, nsfw_levels: Optional[Dict[str, int]] = None) -> None:
self._settings = settings
self._nsfw_levels = nsfw_levels or NSFW_LEVELS
def apply(self, data: Iterable[Dict[str, Any]], criteria: FilterCriteria) -> List[Dict[str, Any]]:
"""Return items that satisfy the provided criteria."""
items = list(data)
if self._settings.get("show_only_sfw", False):
threshold = self._nsfw_levels.get("R", 0)
items = [
item for item in items
if not item.get("preview_nsfw_level") or item.get("preview_nsfw_level") < threshold
]
if criteria.favorites_only:
items = [item for item in items if item.get("favorite", False)]
folder = criteria.folder
options = criteria.search_options or {}
recursive = bool(options.get("recursive", True))
if folder is not None:
if recursive:
if folder:
folder_with_sep = f"{folder}/"
items = [
item for item in items
if item.get("folder") == folder or item.get("folder", "").startswith(folder_with_sep)
]
else:
items = [item for item in items if item.get("folder") == folder]
base_models = criteria.base_models or []
if base_models:
base_model_set = set(base_models)
items = [item for item in items if item.get("base_model") in base_model_set]
tags = criteria.tags or []
if tags:
tag_set = set(tags)
items = [
item for item in items
if any(tag in tag_set for tag in item.get("tags", []))
]
return items
class SearchStrategy:
"""Encapsulates text and fuzzy matching behaviour for model queries."""
DEFAULT_OPTIONS: Dict[str, Any] = {
"filename": True,
"modelname": True,
"tags": False,
"recursive": True,
"creator": False,
}
def __init__(self, fuzzy_matcher: Optional[Callable[[str, str], bool]] = None) -> None:
self._fuzzy_match = fuzzy_matcher or default_fuzzy_match
def normalize_options(self, options: Optional[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge provided options with defaults without mutating input."""
normalized = dict(self.DEFAULT_OPTIONS)
if options:
normalized.update(options)
return normalized
def apply(
self,
data: Iterable[Dict[str, Any]],
search_term: str,
options: Dict[str, Any],
fuzzy: bool = False,
) -> List[Dict[str, Any]]:
"""Return items matching the search term using the configured strategy."""
if not search_term:
return list(data)
search_lower = search_term.lower()
results: List[Dict[str, Any]] = []
for item in data:
if options.get("filename", True):
candidate = item.get("file_name", "")
if self._matches(candidate, search_term, search_lower, fuzzy):
results.append(item)
continue
if options.get("modelname", True):
candidate = item.get("model_name", "")
if self._matches(candidate, search_term, search_lower, fuzzy):
results.append(item)
continue
if options.get("tags", False):
tags = item.get("tags", []) or []
if any(self._matches(tag, search_term, search_lower, fuzzy) for tag in tags):
results.append(item)
continue
if options.get("creator", False):
creator_username = ""
civitai = item.get("civitai")
if isinstance(civitai, dict):
creator = civitai.get("creator")
if isinstance(creator, dict):
creator_username = creator.get("username", "")
if creator_username and self._matches(creator_username, search_term, search_lower, fuzzy):
results.append(item)
continue
return results
def _matches(self, candidate: str, search_term: str, search_lower: str, fuzzy: bool) -> bool:
if not candidate:
return False
candidate_lower = candidate.lower()
if fuzzy:
return self._fuzzy_match(candidate, search_term)
return search_lower in candidate_lower

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,357 @@
import json
import logging
import os
import re
import sqlite3
import threading
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple
from ..utils.settings_paths import get_settings_dir
logger = logging.getLogger(__name__)
@dataclass
class PersistedCacheData:
"""Lightweight structure returned by the persistent cache."""
raw_data: List[Dict]
hash_rows: List[Tuple[str, str]]
excluded_models: List[str]
class PersistentModelCache:
"""Persist core model metadata and hash index data in SQLite."""
_DEFAULT_FILENAME = "model_cache.sqlite"
_instances: Dict[str, "PersistentModelCache"] = {}
_instance_lock = threading.Lock()
def __init__(self, library_name: str = "default", db_path: Optional[str] = None) -> None:
self._library_name = library_name or "default"
self._db_path = db_path or self._resolve_default_path(self._library_name)
self._db_lock = threading.Lock()
self._schema_initialized = False
try:
directory = os.path.dirname(self._db_path)
if directory:
os.makedirs(directory, exist_ok=True)
except Exception as exc: # pragma: no cover - defensive guard
logger.warning("Could not create cache directory %s: %s", directory, exc)
if self.is_enabled():
self._initialize_schema()
@classmethod
def get_default(cls, library_name: Optional[str] = None) -> "PersistentModelCache":
name = (library_name or "default")
with cls._instance_lock:
if name not in cls._instances:
cls._instances[name] = cls(name)
return cls._instances[name]
def is_enabled(self) -> bool:
return os.environ.get("LORA_MANAGER_DISABLE_PERSISTENT_CACHE", "0") != "1"
def load_cache(self, model_type: str) -> Optional[PersistedCacheData]:
if not self.is_enabled():
return None
if not self._schema_initialized:
self._initialize_schema()
if not self._schema_initialized:
return None
try:
with self._db_lock:
conn = self._connect(readonly=True)
try:
rows = conn.execute(
"SELECT file_path, file_name, model_name, folder, size, modified, sha256, base_model,"
" preview_url, preview_nsfw_level, from_civitai, favorite, notes, usage_tips,"
" civitai_id, civitai_model_id, civitai_name, trained_words, exclude, db_checked,"
" last_checked_at"
" FROM models WHERE model_type = ?",
(model_type,),
).fetchall()
if not rows:
return None
tags = self._load_tags(conn, model_type)
hash_rows = conn.execute(
"SELECT sha256, file_path FROM hash_index WHERE model_type = ?",
(model_type,),
).fetchall()
excluded = conn.execute(
"SELECT file_path FROM excluded_models WHERE model_type = ?",
(model_type,),
).fetchall()
finally:
conn.close()
except Exception as exc:
logger.warning("Failed to load persisted cache for %s: %s", model_type, exc)
return None
raw_data: List[Dict] = []
for row in rows:
file_path: str = row["file_path"]
trained_words = []
if row["trained_words"]:
try:
trained_words = json.loads(row["trained_words"])
except json.JSONDecodeError:
trained_words = []
civitai: Optional[Dict] = None
if any(row[col] is not None for col in ("civitai_id", "civitai_model_id", "civitai_name")):
civitai = {}
if row["civitai_id"] is not None:
civitai["id"] = row["civitai_id"]
if row["civitai_model_id"] is not None:
civitai["modelId"] = row["civitai_model_id"]
if row["civitai_name"]:
civitai["name"] = row["civitai_name"]
if trained_words:
civitai["trainedWords"] = trained_words
item = {
"file_path": file_path,
"file_name": row["file_name"],
"model_name": row["model_name"],
"folder": row["folder"] or "",
"size": row["size"] or 0,
"modified": row["modified"] or 0.0,
"sha256": row["sha256"] or "",
"base_model": row["base_model"] or "",
"preview_url": row["preview_url"] or "",
"preview_nsfw_level": row["preview_nsfw_level"] or 0,
"from_civitai": bool(row["from_civitai"]),
"favorite": bool(row["favorite"]),
"notes": row["notes"] or "",
"usage_tips": row["usage_tips"] or "",
"exclude": bool(row["exclude"]),
"db_checked": bool(row["db_checked"]),
"last_checked_at": row["last_checked_at"] or 0.0,
"tags": tags.get(file_path, []),
"civitai": civitai,
}
raw_data.append(item)
hash_pairs = [(entry["sha256"].lower(), entry["file_path"]) for entry in hash_rows if entry["sha256"]]
if not hash_pairs:
# Fall back to hashes stored on the model rows
for item in raw_data:
sha_value = item.get("sha256")
if sha_value:
hash_pairs.append((sha_value.lower(), item["file_path"]))
excluded_paths = [row["file_path"] for row in excluded]
return PersistedCacheData(raw_data=raw_data, hash_rows=hash_pairs, excluded_models=excluded_paths)
def save_cache(self, model_type: str, raw_data: Sequence[Dict], hash_index: Dict[str, List[str]], excluded_models: Sequence[str]) -> None:
if not self.is_enabled():
return
if not self._schema_initialized:
self._initialize_schema()
if not self._schema_initialized:
return
try:
with self._db_lock:
conn = self._connect()
try:
conn.execute("PRAGMA foreign_keys = ON")
conn.execute("DELETE FROM models WHERE model_type = ?", (model_type,))
conn.execute("DELETE FROM model_tags WHERE model_type = ?", (model_type,))
conn.execute("DELETE FROM hash_index WHERE model_type = ?", (model_type,))
conn.execute("DELETE FROM excluded_models WHERE model_type = ?", (model_type,))
model_rows = [self._prepare_model_row(model_type, item) for item in raw_data]
conn.executemany(self._insert_model_sql(), model_rows)
tag_rows = []
for item in raw_data:
file_path = item.get("file_path")
if not file_path:
continue
for tag in item.get("tags") or []:
tag_rows.append((model_type, file_path, tag))
if tag_rows:
conn.executemany(
"INSERT INTO model_tags (model_type, file_path, tag) VALUES (?, ?, ?)",
tag_rows,
)
hash_rows: List[Tuple[str, str, str]] = []
for sha_value, paths in hash_index.items():
for path in paths:
if not sha_value or not path:
continue
hash_rows.append((model_type, sha_value.lower(), path))
if hash_rows:
conn.executemany(
"INSERT OR IGNORE INTO hash_index (model_type, sha256, file_path) VALUES (?, ?, ?)",
hash_rows,
)
excluded_rows = [(model_type, path) for path in excluded_models]
if excluded_rows:
conn.executemany(
"INSERT OR IGNORE INTO excluded_models (model_type, file_path) VALUES (?, ?)",
excluded_rows,
)
conn.commit()
finally:
conn.close()
except Exception as exc:
logger.warning("Failed to persist cache for %s: %s", model_type, exc)
# Internal helpers -------------------------------------------------
def _resolve_default_path(self, library_name: str) -> str:
override = os.environ.get("LORA_MANAGER_CACHE_DB")
if override:
return override
try:
settings_dir = get_settings_dir(create=True)
except Exception as exc: # pragma: no cover - defensive guard
logger.warning("Falling back to project directory for cache: %s", exc)
settings_dir = os.path.dirname(os.path.dirname(self._db_path)) if hasattr(self, "_db_path") else os.getcwd()
safe_name = re.sub(r"[^A-Za-z0-9_.-]", "_", library_name or "default")
if safe_name.lower() in ("default", ""):
legacy_path = os.path.join(settings_dir, self._DEFAULT_FILENAME)
if os.path.exists(legacy_path):
return legacy_path
return os.path.join(settings_dir, "model_cache", f"{safe_name}.sqlite")
def _initialize_schema(self) -> None:
with self._db_lock:
if self._schema_initialized:
return
try:
with self._connect() as conn:
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA foreign_keys = ON")
conn.executescript(
"""
CREATE TABLE IF NOT EXISTS models (
model_type TEXT NOT NULL,
file_path TEXT NOT NULL,
file_name TEXT,
model_name TEXT,
folder TEXT,
size INTEGER,
modified REAL,
sha256 TEXT,
base_model TEXT,
preview_url TEXT,
preview_nsfw_level INTEGER,
from_civitai INTEGER,
favorite INTEGER,
notes TEXT,
usage_tips TEXT,
civitai_id INTEGER,
civitai_model_id INTEGER,
civitai_name TEXT,
trained_words TEXT,
exclude INTEGER,
db_checked INTEGER,
last_checked_at REAL,
PRIMARY KEY (model_type, file_path)
);
CREATE TABLE IF NOT EXISTS model_tags (
model_type TEXT NOT NULL,
file_path TEXT NOT NULL,
tag TEXT NOT NULL,
PRIMARY KEY (model_type, file_path, tag)
);
CREATE TABLE IF NOT EXISTS hash_index (
model_type TEXT NOT NULL,
sha256 TEXT NOT NULL,
file_path TEXT NOT NULL,
PRIMARY KEY (model_type, sha256, file_path)
);
CREATE TABLE IF NOT EXISTS excluded_models (
model_type TEXT NOT NULL,
file_path TEXT NOT NULL,
PRIMARY KEY (model_type, file_path)
);
"""
)
conn.commit()
self._schema_initialized = True
except Exception as exc: # pragma: no cover - defensive guard
logger.warning("Failed to initialize persistent cache schema: %s", exc)
def _connect(self, readonly: bool = False) -> sqlite3.Connection:
uri = False
path = self._db_path
if readonly:
if not os.path.exists(path):
raise FileNotFoundError(path)
path = f"file:{path}?mode=ro"
uri = True
conn = sqlite3.connect(path, check_same_thread=False, uri=uri, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
return conn
def _prepare_model_row(self, model_type: str, item: Dict) -> Tuple:
civitai = item.get("civitai") or {}
trained_words = civitai.get("trainedWords")
if isinstance(trained_words, str):
trained_words_json = trained_words
elif trained_words is None:
trained_words_json = None
else:
trained_words_json = json.dumps(trained_words)
return (
model_type,
item.get("file_path"),
item.get("file_name"),
item.get("model_name"),
item.get("folder"),
int(item.get("size") or 0),
float(item.get("modified") or 0.0),
(item.get("sha256") or "").lower() or None,
item.get("base_model"),
item.get("preview_url"),
int(item.get("preview_nsfw_level") or 0),
1 if item.get("from_civitai", True) else 0,
1 if item.get("favorite") else 0,
item.get("notes"),
item.get("usage_tips"),
civitai.get("id"),
civitai.get("modelId"),
civitai.get("name"),
trained_words_json,
1 if item.get("exclude") else 0,
1 if item.get("db_checked") else 0,
float(item.get("last_checked_at") or 0.0),
)
def _insert_model_sql(self) -> str:
return (
"INSERT INTO models (model_type, file_path, file_name, model_name, folder, size, modified, sha256,"
" base_model, preview_url, preview_nsfw_level, from_civitai, favorite, notes, usage_tips,"
" civitai_id, civitai_model_id, civitai_name, trained_words, exclude, db_checked, last_checked_at)"
" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
)
def _load_tags(self, conn: sqlite3.Connection, model_type: str) -> Dict[str, List[str]]:
tag_rows = conn.execute(
"SELECT file_path, tag FROM model_tags WHERE model_type = ?",
(model_type,),
).fetchall()
result: Dict[str, List[str]] = {}
for row in tag_rows:
result.setdefault(row["file_path"], []).append(row["tag"])
return result
def get_persistent_cache() -> PersistentModelCache:
from .settings_manager import get_settings_manager # Local import to avoid cycles
library_name = get_settings_manager().get_active_library_name()
return PersistentModelCache.get_default(library_name)

View File

@@ -0,0 +1,206 @@
"""Service for processing preview assets for models."""
from __future__ import annotations
import logging
import os
from typing import Awaitable, Callable, Dict, Optional, Sequence
from urllib.parse import urlparse
from ..utils.constants import CARD_PREVIEW_WIDTH, PREVIEW_EXTENSIONS
from ..utils.civitai_utils import rewrite_preview_url
logger = logging.getLogger(__name__)
class PreviewAssetService:
"""Manage fetching and persisting preview assets."""
def __init__(
self,
*,
metadata_manager,
downloader_factory: Callable[[], Awaitable],
exif_utils,
) -> None:
self._metadata_manager = metadata_manager
self._downloader_factory = downloader_factory
self._exif_utils = exif_utils
async def ensure_preview_for_metadata(
self,
metadata_path: str,
local_metadata: Dict[str, object],
images: Sequence[Dict[str, object]] | None,
) -> None:
"""Ensure preview assets exist for the supplied metadata entry."""
if local_metadata.get("preview_url") and os.path.exists(
str(local_metadata["preview_url"])
):
return
if not images:
return
first_preview = images[0]
base_name = os.path.splitext(os.path.splitext(os.path.basename(metadata_path))[0])[0]
preview_dir = os.path.dirname(metadata_path)
is_video = first_preview.get("type") == "video"
preview_url = first_preview.get("url")
if not preview_url:
return
def extension_from_url(url: str, fallback: str) -> str:
try:
parsed = urlparse(url)
except ValueError:
return fallback
ext = os.path.splitext(parsed.path)[1]
return ext or fallback
downloader = await self._downloader_factory()
if is_video:
extension = extension_from_url(preview_url, ".mp4")
preview_path = os.path.join(preview_dir, base_name + extension)
rewritten_url, rewritten = rewrite_preview_url(preview_url, media_type="video")
attempt_urls = []
if rewritten:
attempt_urls.append(rewritten_url)
attempt_urls.append(preview_url)
seen: set[str] = set()
for candidate in attempt_urls:
if not candidate or candidate in seen:
continue
seen.add(candidate)
success, _ = await downloader.download_file(candidate, preview_path, use_auth=False)
if success:
local_metadata["preview_url"] = preview_path.replace(os.sep, "/")
local_metadata["preview_nsfw_level"] = first_preview.get("nsfwLevel", 0)
return
else:
rewritten_url, rewritten = rewrite_preview_url(preview_url, media_type="image")
if rewritten:
extension = extension_from_url(preview_url, ".png")
preview_path = os.path.join(preview_dir, base_name + extension)
success, _ = await downloader.download_file(
rewritten_url, preview_path, use_auth=False
)
if success:
local_metadata["preview_url"] = preview_path.replace(os.sep, "/")
local_metadata["preview_nsfw_level"] = first_preview.get("nsfwLevel", 0)
return
extension = ".webp"
preview_path = os.path.join(preview_dir, base_name + extension)
success, content, _headers = await downloader.download_to_memory(
preview_url, use_auth=False
)
if not success:
return
try:
optimized_data, _ = self._exif_utils.optimize_image(
image_data=content,
target_width=CARD_PREVIEW_WIDTH,
format="webp",
quality=85,
preserve_metadata=False,
)
with open(preview_path, "wb") as handle:
handle.write(optimized_data)
except Exception as exc: # pragma: no cover - defensive path
logger.error("Error optimizing preview image: %s", exc)
try:
with open(preview_path, "wb") as handle:
handle.write(content)
except Exception as save_exc:
logger.error("Error saving preview image: %s", save_exc)
return
local_metadata["preview_url"] = preview_path.replace(os.sep, "/")
local_metadata["preview_nsfw_level"] = first_preview.get("nsfwLevel", 0)
async def replace_preview(
self,
*,
model_path: str,
preview_data: bytes,
content_type: str,
original_filename: Optional[str],
nsfw_level: int,
update_preview_in_cache: Callable[[str, str, int], Awaitable[bool]],
metadata_loader: Callable[[str], Awaitable[Dict[str, object]]],
) -> Dict[str, object]:
"""Replace an existing preview asset for a model."""
base_name = os.path.splitext(os.path.basename(model_path))[0]
folder = os.path.dirname(model_path)
extension, optimized_data = await self._convert_preview(
preview_data, content_type, original_filename
)
for ext in PREVIEW_EXTENSIONS:
existing_preview = os.path.join(folder, base_name + ext)
if os.path.exists(existing_preview):
try:
os.remove(existing_preview)
except Exception as exc: # pragma: no cover - defensive path
logger.warning(
"Failed to delete existing preview %s: %s", existing_preview, exc
)
preview_path = os.path.join(folder, base_name + extension).replace(os.sep, "/")
with open(preview_path, "wb") as handle:
handle.write(optimized_data)
metadata_path = os.path.splitext(model_path)[0] + ".metadata.json"
metadata = await metadata_loader(metadata_path)
metadata["preview_url"] = preview_path
metadata["preview_nsfw_level"] = nsfw_level
await self._metadata_manager.save_metadata(model_path, metadata)
await update_preview_in_cache(model_path, preview_path, nsfw_level)
return {"preview_path": preview_path, "preview_nsfw_level": nsfw_level}
async def _convert_preview(
self, data: bytes, content_type: str, original_filename: Optional[str]
) -> tuple[str, bytes]:
"""Convert preview bytes to the persisted representation."""
if content_type.startswith("video/"):
extension = self._resolve_video_extension(content_type, original_filename)
return extension, data
original_ext = (original_filename or "").lower()
if original_ext.endswith(".gif") or content_type.lower() == "image/gif":
return ".gif", data
optimized_data, _ = self._exif_utils.optimize_image(
image_data=data,
target_width=CARD_PREVIEW_WIDTH,
format="webp",
quality=85,
preserve_metadata=False,
)
return ".webp", optimized_data
def _resolve_video_extension(self, content_type: str, original_filename: Optional[str]) -> str:
"""Infer the best extension for a video preview."""
if original_filename:
extension = os.path.splitext(original_filename)[1].lower()
if extension in {".mp4", ".webm", ".mov", ".avi"}:
return extension
if "webm" in content_type:
return ".webm"
return ".mp4"

View File

@@ -1,5 +1,5 @@
import asyncio
from typing import List, Dict
from typing import Iterable, List, Dict, Optional
from dataclasses import dataclass
from operator import itemgetter
from natsort import natsorted
@@ -10,77 +10,115 @@ class RecipeCache:
raw_data: List[Dict]
sorted_by_name: List[Dict]
sorted_by_date: List[Dict]
def __post_init__(self):
self._lock = asyncio.Lock()
async def resort(self, name_only: bool = False):
"""Resort all cached data views"""
async with self._lock:
self.sorted_by_name = natsorted(
self.raw_data,
key=lambda x: x.get('title', '').lower() # Case-insensitive sort
)
if not name_only:
self.sorted_by_date = sorted(
self.raw_data,
key=itemgetter('created_date', 'file_path'),
reverse=True
)
async def update_recipe_metadata(self, recipe_id: str, metadata: Dict) -> bool:
self._resort_locked(name_only=name_only)
async def update_recipe_metadata(self, recipe_id: str, metadata: Dict, *, resort: bool = True) -> bool:
"""Update metadata for a specific recipe in all cached data
Args:
recipe_id: The ID of the recipe to update
metadata: The new metadata
Returns:
bool: True if the update was successful, False if the recipe wasn't found
"""
async with self._lock:
for item in self.raw_data:
if str(item.get('id')) == str(recipe_id):
item.update(metadata)
if resort:
self._resort_locked()
return True
return False # Recipe not found
async def add_recipe(self, recipe_data: Dict, *, resort: bool = False) -> None:
"""Add a new recipe to the cache."""
# Update in raw_data
for item in self.raw_data:
if item.get('id') == recipe_id:
item.update(metadata)
break
else:
return False # Recipe not found
# Resort to reflect changes
await self.resort()
return True
async def add_recipe(self, recipe_data: Dict) -> None:
"""Add a new recipe to the cache
Args:
recipe_data: The recipe data to add
"""
async with self._lock:
self.raw_data.append(recipe_data)
await self.resort()
if resort:
self._resort_locked()
async def remove_recipe(self, recipe_id: str, *, resort: bool = False) -> Optional[Dict]:
"""Remove a recipe from the cache by ID.
async def remove_recipe(self, recipe_id: str) -> bool:
"""Remove a recipe from the cache by ID
Args:
recipe_id: The ID of the recipe to remove
Returns:
bool: True if the recipe was found and removed, False otherwise
The removed recipe data if found, otherwise ``None``.
"""
# Find the recipe in raw_data
recipe_index = next((i for i, recipe in enumerate(self.raw_data)
if recipe.get('id') == recipe_id), None)
if recipe_index is None:
return False
# Remove from raw_data
self.raw_data.pop(recipe_index)
# Resort to update sorted lists
await self.resort()
return True
async with self._lock:
for index, recipe in enumerate(self.raw_data):
if str(recipe.get('id')) == str(recipe_id):
removed = self.raw_data.pop(index)
if resort:
self._resort_locked()
return removed
return None
async def bulk_remove(self, recipe_ids: Iterable[str], *, resort: bool = False) -> List[Dict]:
"""Remove multiple recipes from the cache."""
id_set = {str(recipe_id) for recipe_id in recipe_ids}
if not id_set:
return []
async with self._lock:
removed = [item for item in self.raw_data if str(item.get('id')) in id_set]
if not removed:
return []
self.raw_data = [item for item in self.raw_data if str(item.get('id')) not in id_set]
if resort:
self._resort_locked()
return removed
async def replace_recipe(self, recipe_id: str, new_data: Dict, *, resort: bool = False) -> bool:
"""Replace cached data for a recipe."""
async with self._lock:
for index, recipe in enumerate(self.raw_data):
if str(recipe.get('id')) == str(recipe_id):
self.raw_data[index] = new_data
if resort:
self._resort_locked()
return True
return False
async def get_recipe(self, recipe_id: str) -> Optional[Dict]:
"""Return a shallow copy of a cached recipe."""
async with self._lock:
for recipe in self.raw_data:
if str(recipe.get('id')) == str(recipe_id):
return dict(recipe)
return None
async def snapshot(self) -> List[Dict]:
"""Return a copy of all cached recipes."""
async with self._lock:
return [dict(item) for item in self.raw_data]
def _resort_locked(self, *, name_only: bool = False) -> None:
"""Sort cached views. Caller must hold ``_lock``."""
self.sorted_by_name = natsorted(
self.raw_data,
key=lambda x: x.get('title', '').lower()
)
if not name_only:
self.sorted_by_date = sorted(
self.raw_data,
key=itemgetter('created_date', 'file_path'),
reverse=True
)

View File

@@ -3,12 +3,14 @@ import logging
import asyncio
import json
import time
from typing import List, Dict, Optional, Any, Tuple
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from ..config import config
from .recipe_cache import RecipeCache
from .service_registry import ServiceRegistry
from .lora_scanner import LoraScanner
from ..utils.utils import fuzzy_match
from .metadata_service import get_default_metadata_provider
from .recipes.errors import RecipeNotFoundError
from ..utils.utils import calculate_recipe_fingerprint, fuzzy_match
from natsort import natsorted
import sys
@@ -45,9 +47,36 @@ class RecipeScanner:
self._initialization_lock = asyncio.Lock()
self._initialization_task: Optional[asyncio.Task] = None
self._is_initializing = False
self._mutation_lock = asyncio.Lock()
self._resort_tasks: Set[asyncio.Task] = set()
if lora_scanner:
self._lora_scanner = lora_scanner
self._initialized = True
def on_library_changed(self) -> None:
"""Reset cached state when the active library changes."""
# Cancel any in-flight initialization or resorting work so the next
# access rebuilds the cache for the new library.
if self._initialization_task and not self._initialization_task.done():
self._initialization_task.cancel()
for task in list(self._resort_tasks):
if not task.done():
task.cancel()
self._resort_tasks.clear()
self._cache = None
self._initialization_task = None
self._is_initializing = False
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and not loop.is_closed():
loop.create_task(self.initialize_in_background())
async def _get_civitai_client(self):
"""Lazily initialize CivitaiClient from registry"""
@@ -190,6 +219,22 @@ class RecipeScanner:
# Clean up the event loop
loop.close()
def _schedule_resort(self, *, name_only: bool = False) -> None:
"""Schedule a background resort of the recipe cache."""
if not self._cache:
return
async def _resort_wrapper() -> None:
try:
await self._cache.resort(name_only=name_only)
except Exception as exc: # pragma: no cover - defensive logging
logger.error("Recipe Scanner: error resorting cache: %s", exc, exc_info=True)
task = asyncio.create_task(_resort_wrapper())
self._resort_tasks.add(task)
task.add_done_callback(lambda finished: self._resort_tasks.discard(finished))
@property
def recipes_dir(self) -> str:
"""Get path to recipes directory"""
@@ -254,7 +299,45 @@ class RecipeScanner:
# Return the cache (may be empty or partially initialized)
return self._cache or RecipeCache(raw_data=[], sorted_by_name=[], sorted_by_date=[])
async def refresh_cache(self, force: bool = False) -> RecipeCache:
"""Public helper to refresh or return the recipe cache."""
return await self.get_cached_data(force_refresh=force)
async def add_recipe(self, recipe_data: Dict[str, Any]) -> None:
"""Add a recipe to the in-memory cache."""
if not recipe_data:
return
cache = await self.get_cached_data()
await cache.add_recipe(recipe_data, resort=False)
self._schedule_resort()
async def remove_recipe(self, recipe_id: str) -> bool:
"""Remove a recipe from the cache by ID."""
if not recipe_id:
return False
cache = await self.get_cached_data()
removed = await cache.remove_recipe(recipe_id, resort=False)
if removed is None:
return False
self._schedule_resort()
return True
async def bulk_remove(self, recipe_ids: Iterable[str]) -> int:
"""Remove multiple recipes from the cache."""
cache = await self.get_cached_data()
removed = await cache.bulk_remove(recipe_ids, resort=False)
if removed:
self._schedule_resort()
return len(removed)
async def scan_all_recipes(self) -> List[Dict]:
"""Scan all recipe JSON files and return metadata"""
recipes = []
@@ -325,7 +408,6 @@ class RecipeScanner:
# Calculate and update fingerprint if missing
if 'loras' in recipe_data and 'fingerprint' not in recipe_data:
from ..utils.utils import calculate_recipe_fingerprint
fingerprint = calculate_recipe_fingerprint(recipe_data['loras'])
recipe_data['fingerprint'] = fingerprint
@@ -367,27 +449,29 @@ class RecipeScanner:
# If has modelVersionId but no hash, look in lora cache first, then fetch from Civitai
if 'modelVersionId' in lora and not lora.get('hash'):
model_version_id = lora['modelVersionId']
# Check if model_version_id is an integer and > 0
if isinstance(model_version_id, int) and model_version_id > 0:
# Try to find in lora cache first
hash_from_cache = await self._find_hash_in_lora_cache(model_version_id)
if hash_from_cache:
lora['hash'] = hash_from_cache
metadata_updated = True
else:
# If not in cache, fetch from Civitai
result = await self._get_hash_from_civitai(model_version_id)
if isinstance(result, tuple):
hash_from_civitai, is_deleted = result
if hash_from_civitai:
lora['hash'] = hash_from_civitai
metadata_updated = True
elif is_deleted:
# Mark the lora as deleted if it was not found on Civitai
lora['isDeleted'] = True
logger.warning(f"Marked lora with modelVersionId {model_version_id} as deleted")
metadata_updated = True
# Try to find in lora cache first
hash_from_cache = await self._find_hash_in_lora_cache(model_version_id)
if hash_from_cache:
lora['hash'] = hash_from_cache
metadata_updated = True
else:
logger.debug(f"Could not get hash for modelVersionId {model_version_id}")
# If not in cache, fetch from Civitai
result = await self._get_hash_from_civitai(model_version_id)
if isinstance(result, tuple):
hash_from_civitai, is_deleted = result
if hash_from_civitai:
lora['hash'] = hash_from_civitai
metadata_updated = True
elif is_deleted:
# Mark the lora as deleted if it was not found on Civitai
lora['isDeleted'] = True
logger.warning(f"Marked lora with modelVersionId {model_version_id} as deleted")
metadata_updated = True
else:
logger.debug(f"Could not get hash for modelVersionId {model_version_id}")
# If has hash but no file_name, look up in lora library
if 'hash' in lora and (not lora.get('file_name') or not lora['file_name']):
@@ -431,13 +515,13 @@ class RecipeScanner:
async def _get_hash_from_civitai(self, model_version_id: str) -> Optional[str]:
"""Get hash from Civitai API"""
try:
# Get CivitaiClient from ServiceRegistry
civitai_client = await self._get_civitai_client()
if not civitai_client:
logger.error("Failed to get CivitaiClient from ServiceRegistry")
# Get metadata provider instead of civitai client directly
metadata_provider = await get_default_metadata_provider()
if not metadata_provider:
logger.error("Failed to get metadata provider")
return None
version_info, error_msg = await civitai_client.get_model_version_info(model_version_id)
version_info, error_msg = await metadata_provider.get_model_version_info(model_version_id)
if not version_info:
if error_msg and "model not found" in error_msg.lower():
@@ -496,9 +580,36 @@ class RecipeScanner:
logger.error(f"Error getting base model for lora: {e}")
return None
def _enrich_lora_entry(self, lora: Dict[str, Any]) -> Dict[str, Any]:
"""Populate convenience fields for a LoRA entry."""
if not lora or not self._lora_scanner:
return lora
hash_value = (lora.get('hash') or '').lower()
if not hash_value:
return lora
try:
lora['inLibrary'] = self._lora_scanner.has_hash(hash_value)
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(hash_value)
lora['localPath'] = self._lora_scanner.get_path_by_hash(hash_value)
except Exception as exc: # pragma: no cover - defensive logging
logger.debug("Error enriching lora entry %s: %s", hash_value, exc)
return lora
async def get_local_lora(self, name: str) -> Optional[Dict[str, Any]]:
"""Lookup a local LoRA model by name."""
if not self._lora_scanner or not name:
return None
return await self._lora_scanner.get_model_info_by_name(name)
async def get_paginated_data(self, page: int, page_size: int, sort_by: str = 'date', search: str = None, filters: dict = None, search_options: dict = None, lora_hash: str = None, bypass_filters: bool = True):
"""Get paginated and filtered recipe data
Args:
page: Current page number (1-based)
page_size: Number of items per page
@@ -597,16 +708,12 @@ class RecipeScanner:
# Get paginated items
paginated_items = filtered_data[start_idx:end_idx]
# Add inLibrary information for each lora
for item in paginated_items:
if 'loras' in item:
for lora in item['loras']:
if 'hash' in lora and lora['hash']:
lora['inLibrary'] = self._lora_scanner.has_hash(lora['hash'].lower())
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora['hash'].lower())
lora['localPath'] = self._lora_scanner.get_path_by_hash(lora['hash'].lower())
item['loras'] = [self._enrich_lora_entry(dict(lora)) for lora in item['loras']]
result = {
'items': paginated_items,
'total': total_items,
@@ -652,33 +759,25 @@ class RecipeScanner:
# Add lora metadata
if 'loras' in formatted_recipe:
for lora in formatted_recipe['loras']:
if 'hash' in lora and lora['hash']:
lora_hash = lora['hash'].lower()
lora['inLibrary'] = self._lora_scanner.has_hash(lora_hash)
lora['preview_url'] = self._lora_scanner.get_preview_url_by_hash(lora_hash)
lora['localPath'] = self._lora_scanner.get_path_by_hash(lora_hash)
formatted_recipe['loras'] = [self._enrich_lora_entry(dict(lora)) for lora in formatted_recipe['loras']]
return formatted_recipe
def _format_file_url(self, file_path: str) -> str:
"""Format file path as URL for serving in web UI"""
if not file_path:
return '/loras_static/images/no-preview.png'
try:
# Format file path as a URL that will work with static file serving
recipes_dir = os.path.join(config.loras_roots[0], "recipes").replace(os.sep, '/')
if file_path.replace(os.sep, '/').startswith(recipes_dir):
relative_path = os.path.relpath(file_path, config.loras_roots[0]).replace(os.sep, '/')
return f"/loras_static/root1/preview/{relative_path}"
# If not in recipes dir, try to create a valid URL from the file name
file_name = os.path.basename(file_path)
return f"/loras_static/root1/preview/recipes/{file_name}"
normalized_path = os.path.normpath(file_path)
static_url = config.get_preview_static_url(normalized_path)
if static_url:
return static_url
except Exception as e:
logger.error(f"Error formatting file URL: {e}")
return '/loras_static/images/no-preview.png'
return '/loras_static/images/no-preview.png'
def _format_timestamp(self, timestamp: float) -> str:
"""Format timestamp for display"""
@@ -716,26 +815,159 @@ class RecipeScanner:
# Save updated recipe
with open(recipe_json_path, 'w', encoding='utf-8') as f:
json.dump(recipe_data, f, indent=4, ensure_ascii=False)
# Update the cache if it exists
if self._cache is not None:
await self._cache.update_recipe_metadata(recipe_id, metadata)
await self._cache.update_recipe_metadata(recipe_id, metadata, resort=False)
self._schedule_resort()
# If the recipe has an image, update its EXIF metadata
from ..utils.exif_utils import ExifUtils
image_path = recipe_data.get('file_path')
if image_path and os.path.exists(image_path):
ExifUtils.append_recipe_metadata(image_path, recipe_data)
return True
except Exception as e:
import logging
logging.getLogger(__name__).error(f"Error updating recipe metadata: {e}", exc_info=True)
return False
async def update_lora_entry(
self,
recipe_id: str,
lora_index: int,
*,
target_name: str,
target_lora: Optional[Dict[str, Any]] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Update a specific LoRA entry within a recipe.
Returns the updated recipe data and the refreshed LoRA metadata.
"""
if target_name is None:
raise ValueError("target_name must be provided")
recipe_json_path = os.path.join(self.recipes_dir, f"{recipe_id}.recipe.json")
if not os.path.exists(recipe_json_path):
raise RecipeNotFoundError("Recipe not found")
async with self._mutation_lock:
with open(recipe_json_path, 'r', encoding='utf-8') as file_obj:
recipe_data = json.load(file_obj)
loras = recipe_data.get('loras', [])
if lora_index >= len(loras):
raise RecipeNotFoundError("LoRA index out of range in recipe")
lora_entry = loras[lora_index]
lora_entry['isDeleted'] = False
lora_entry['exclude'] = False
lora_entry['file_name'] = target_name
if target_lora is not None:
sha_value = target_lora.get('sha256') or target_lora.get('sha')
if sha_value:
lora_entry['hash'] = sha_value.lower()
civitai_info = target_lora.get('civitai') or {}
if civitai_info:
lora_entry['modelName'] = civitai_info.get('model', {}).get('name', '')
lora_entry['modelVersionName'] = civitai_info.get('name', '')
lora_entry['modelVersionId'] = civitai_info.get('id')
recipe_data['fingerprint'] = calculate_recipe_fingerprint(recipe_data.get('loras', []))
recipe_data['modified'] = time.time()
with open(recipe_json_path, 'w', encoding='utf-8') as file_obj:
json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
cache = await self.get_cached_data()
replaced = await cache.replace_recipe(recipe_id, recipe_data, resort=False)
if not replaced:
await cache.add_recipe(recipe_data, resort=False)
self._schedule_resort()
updated_lora = dict(lora_entry)
if target_lora is not None:
preview_url = target_lora.get('preview_url')
if preview_url:
updated_lora['preview_url'] = config.get_preview_static_url(preview_url)
if target_lora.get('file_path'):
updated_lora['localPath'] = target_lora['file_path']
updated_lora = self._enrich_lora_entry(updated_lora)
return recipe_data, updated_lora
async def get_recipes_for_lora(self, lora_hash: str) -> List[Dict[str, Any]]:
"""Return recipes that reference a given LoRA hash."""
if not lora_hash:
return []
normalized_hash = lora_hash.lower()
cache = await self.get_cached_data()
matching_recipes: List[Dict[str, Any]] = []
for recipe in cache.raw_data:
loras = recipe.get('loras', [])
if any((entry.get('hash') or '').lower() == normalized_hash for entry in loras):
recipe_copy = {**recipe}
recipe_copy['loras'] = [self._enrich_lora_entry(dict(entry)) for entry in loras]
recipe_copy['file_url'] = self._format_file_url(recipe.get('file_path'))
matching_recipes.append(recipe_copy)
return matching_recipes
async def get_recipe_syntax_tokens(self, recipe_id: str) -> List[str]:
"""Build LoRA syntax tokens for a recipe."""
cache = await self.get_cached_data()
recipe = await cache.get_recipe(recipe_id)
if recipe is None:
raise RecipeNotFoundError("Recipe not found")
loras = recipe.get('loras', [])
if not loras:
return []
lora_cache = None
if self._lora_scanner is not None:
lora_cache = await self._lora_scanner.get_cached_data()
syntax_parts: List[str] = []
for lora in loras:
if lora.get('isDeleted', False):
continue
file_name = None
hash_value = (lora.get('hash') or '').lower()
if hash_value and self._lora_scanner is not None and hasattr(self._lora_scanner, '_hash_index'):
file_path = self._lora_scanner._hash_index.get_path(hash_value)
if file_path:
file_name = os.path.splitext(os.path.basename(file_path))[0]
if not file_name and lora.get('modelVersionId') and lora_cache is not None:
for cached_lora in getattr(lora_cache, 'raw_data', []):
civitai_info = cached_lora.get('civitai')
if civitai_info and civitai_info.get('id') == lora.get('modelVersionId'):
cached_path = cached_lora.get('path') or cached_lora.get('file_path')
if cached_path:
file_name = os.path.splitext(os.path.basename(cached_path))[0]
break
if not file_name:
file_name = lora.get('file_name', 'unknown-lora')
strength = lora.get('strength', 1.0)
syntax_parts.append(f"<lora:{file_name}:{strength}>")
return syntax_parts
async def update_lora_filename_by_hash(self, hash_value: str, new_file_name: str) -> Tuple[int, int]:
"""Update file_name in all recipes that contain a LoRA with the specified hash.
Args:
hash_value: The SHA256 hash value of the LoRA
new_file_name: The new file_name to set

View File

@@ -0,0 +1,23 @@
"""Recipe service layer implementations."""
from .analysis_service import RecipeAnalysisService
from .persistence_service import RecipePersistenceService
from .sharing_service import RecipeSharingService
from .errors import (
RecipeServiceError,
RecipeValidationError,
RecipeNotFoundError,
RecipeDownloadError,
RecipeConflictError,
)
__all__ = [
"RecipeAnalysisService",
"RecipePersistenceService",
"RecipeSharingService",
"RecipeServiceError",
"RecipeValidationError",
"RecipeNotFoundError",
"RecipeDownloadError",
"RecipeConflictError",
]

View File

@@ -0,0 +1,289 @@
"""Services responsible for recipe metadata analysis."""
from __future__ import annotations
import base64
import io
import os
import re
import tempfile
from dataclasses import dataclass
from typing import Any, Callable, Optional
import numpy as np
from PIL import Image
from ...utils.utils import calculate_recipe_fingerprint
from .errors import (
RecipeDownloadError,
RecipeNotFoundError,
RecipeServiceError,
RecipeValidationError,
)
@dataclass(frozen=True)
class AnalysisResult:
"""Return payload from analysis operations."""
payload: dict[str, Any]
status: int = 200
class RecipeAnalysisService:
"""Extract recipe metadata from various image sources."""
def __init__(
self,
*,
exif_utils,
recipe_parser_factory,
downloader_factory: Callable[[], Any],
metadata_collector: Optional[Callable[[], Any]] = None,
metadata_processor_cls: Optional[type] = None,
metadata_registry_cls: Optional[type] = None,
standalone_mode: bool = False,
logger,
) -> None:
self._exif_utils = exif_utils
self._recipe_parser_factory = recipe_parser_factory
self._downloader_factory = downloader_factory
self._metadata_collector = metadata_collector
self._metadata_processor_cls = metadata_processor_cls
self._metadata_registry_cls = metadata_registry_cls
self._standalone_mode = standalone_mode
self._logger = logger
async def analyze_uploaded_image(
self,
*,
image_bytes: bytes | None,
recipe_scanner,
) -> AnalysisResult:
"""Analyze an uploaded image payload."""
if not image_bytes:
raise RecipeValidationError("No image data provided")
temp_path = self._write_temp_file(image_bytes)
try:
metadata = self._exif_utils.extract_image_metadata(temp_path)
if not metadata:
return AnalysisResult({"error": "No metadata found in this image", "loras": []})
return await self._parse_metadata(
metadata,
recipe_scanner=recipe_scanner,
image_path=None,
include_image_base64=False,
)
finally:
self._safe_cleanup(temp_path)
async def analyze_remote_image(
self,
*,
url: str | None,
recipe_scanner,
civitai_client,
) -> AnalysisResult:
"""Analyze an image accessible via URL, including Civitai integration."""
if not url:
raise RecipeValidationError("No URL provided")
if civitai_client is None:
raise RecipeServiceError("Civitai client unavailable")
temp_path = self._create_temp_path()
metadata: Optional[dict[str, Any]] = None
try:
civitai_match = re.match(r"https://civitai\.com/images/(\d+)", url)
if civitai_match:
image_info = await civitai_client.get_image_info(civitai_match.group(1))
if not image_info:
raise RecipeDownloadError("Failed to fetch image information from Civitai")
image_url = image_info.get("url")
if not image_url:
raise RecipeDownloadError("No image URL found in Civitai response")
await self._download_image(image_url, temp_path)
metadata = image_info.get("meta") if "meta" in image_info else None
else:
await self._download_image(url, temp_path)
if metadata is None:
metadata = self._exif_utils.extract_image_metadata(temp_path)
if not metadata:
return self._metadata_not_found_response(temp_path)
return await self._parse_metadata(
metadata,
recipe_scanner=recipe_scanner,
image_path=temp_path,
include_image_base64=True,
)
finally:
self._safe_cleanup(temp_path)
async def analyze_local_image(
self,
*,
file_path: str | None,
recipe_scanner,
) -> AnalysisResult:
"""Analyze a file already present on disk."""
if not file_path:
raise RecipeValidationError("No file path provided")
normalized_path = os.path.normpath(file_path.strip('"').strip("'"))
if not os.path.isfile(normalized_path):
raise RecipeNotFoundError("File not found")
metadata = self._exif_utils.extract_image_metadata(normalized_path)
if not metadata:
return self._metadata_not_found_response(normalized_path)
return await self._parse_metadata(
metadata,
recipe_scanner=recipe_scanner,
image_path=normalized_path,
include_image_base64=True,
)
async def analyze_widget_metadata(self, *, recipe_scanner) -> AnalysisResult:
"""Analyse the most recent generation metadata for widget saves."""
if self._metadata_collector is None or self._metadata_processor_cls is None:
raise RecipeValidationError("Metadata collection not available")
raw_metadata = self._metadata_collector()
metadata_dict = self._metadata_processor_cls.to_dict(raw_metadata)
if not metadata_dict:
raise RecipeValidationError("No generation metadata found")
latest_image = None
if not self._standalone_mode and self._metadata_registry_cls is not None:
metadata_registry = self._metadata_registry_cls()
latest_image = metadata_registry.get_first_decoded_image()
if latest_image is None:
raise RecipeValidationError(
"No recent images found to use for recipe. Try generating an image first."
)
image_bytes = self._convert_tensor_to_png_bytes(latest_image)
if image_bytes is None:
raise RecipeValidationError("Cannot handle this data shape from metadata registry")
return AnalysisResult(
{
"metadata": metadata_dict,
"image_bytes": image_bytes,
}
)
# Internal helpers -------------------------------------------------
async def _parse_metadata(
self,
metadata: dict[str, Any],
*,
recipe_scanner,
image_path: Optional[str],
include_image_base64: bool,
) -> AnalysisResult:
parser = self._recipe_parser_factory.create_parser(metadata)
if parser is None:
payload = {"error": "No parser found for this image", "loras": []}
if include_image_base64 and image_path:
payload["image_base64"] = self._encode_file(image_path)
return AnalysisResult(payload)
result = await parser.parse_metadata(metadata, recipe_scanner=recipe_scanner)
if include_image_base64 and image_path:
result["image_base64"] = self._encode_file(image_path)
if "error" in result and not result.get("loras"):
return AnalysisResult(result)
fingerprint = calculate_recipe_fingerprint(result.get("loras", []))
result["fingerprint"] = fingerprint
matching_recipes: list[str] = []
if fingerprint:
matching_recipes = await recipe_scanner.find_recipes_by_fingerprint(fingerprint)
result["matching_recipes"] = matching_recipes
return AnalysisResult(result)
async def _download_image(self, url: str, temp_path: str) -> None:
downloader = await self._downloader_factory()
success, result = await downloader.download_file(url, temp_path, use_auth=False)
if not success:
raise RecipeDownloadError(f"Failed to download image from URL: {result}")
def _metadata_not_found_response(self, path: str) -> AnalysisResult:
payload: dict[str, Any] = {"error": "No metadata found in this image", "loras": []}
if os.path.exists(path):
payload["image_base64"] = self._encode_file(path)
return AnalysisResult(payload)
def _write_temp_file(self, data: bytes) -> str:
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
temp_file.write(data)
return temp_file.name
def _create_temp_path(self) -> str:
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
return temp_file.name
def _safe_cleanup(self, path: Optional[str]) -> None:
if path and os.path.exists(path):
try:
os.unlink(path)
except Exception as exc: # pragma: no cover - defensive logging
self._logger.error("Error deleting temporary file: %s", exc)
def _encode_file(self, path: str) -> str:
with open(path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def _convert_tensor_to_png_bytes(self, latest_image: Any) -> Optional[bytes]:
try:
if isinstance(latest_image, tuple):
tensor_image = latest_image[0] if latest_image else None
if tensor_image is None:
return None
else:
tensor_image = latest_image
if hasattr(tensor_image, "shape"):
self._logger.debug(
"Tensor shape: %s, dtype: %s", tensor_image.shape, getattr(tensor_image, "dtype", None)
)
import torch # type: ignore[import-not-found]
if isinstance(tensor_image, torch.Tensor):
image_np = tensor_image.cpu().numpy()
else:
image_np = np.array(tensor_image)
while len(image_np.shape) > 3:
image_np = image_np[0]
if image_np.dtype in (np.float32, np.float64) and image_np.max() <= 1.0:
image_np = (image_np * 255).astype(np.uint8)
if len(image_np.shape) == 3 and image_np.shape[2] == 3:
pil_image = Image.fromarray(image_np)
img_byte_arr = io.BytesIO()
pil_image.save(img_byte_arr, format="PNG")
return img_byte_arr.getvalue()
except Exception as exc: # pragma: no cover - defensive logging path
self._logger.error("Error processing image data: %s", exc, exc_info=True)
return None
return None

View File

@@ -0,0 +1,22 @@
"""Shared exceptions for recipe services."""
from __future__ import annotations
class RecipeServiceError(Exception):
"""Base exception for recipe service failures."""
class RecipeValidationError(RecipeServiceError):
"""Raised when a request payload fails validation."""
class RecipeNotFoundError(RecipeServiceError):
"""Raised when a recipe resource cannot be located."""
class RecipeDownloadError(RecipeServiceError):
"""Raised when remote recipe assets cannot be downloaded."""
class RecipeConflictError(RecipeServiceError):
"""Raised when a conflicting recipe state is detected."""

View File

@@ -0,0 +1,407 @@
"""Services encapsulating recipe persistence workflows."""
from __future__ import annotations
import base64
import json
import os
import re
import time
import uuid
from dataclasses import dataclass
from typing import Any, Dict, Iterable, Optional
from ...config import config
from ...utils.utils import calculate_recipe_fingerprint
from .errors import RecipeNotFoundError, RecipeValidationError
@dataclass(frozen=True)
class PersistenceResult:
"""Return payload from persistence operations."""
payload: dict[str, Any]
status: int = 200
class RecipePersistenceService:
"""Coordinate recipe persistence tasks across storage and caches."""
def __init__(
self,
*,
exif_utils,
card_preview_width: int,
logger,
) -> None:
self._exif_utils = exif_utils
self._card_preview_width = card_preview_width
self._logger = logger
async def save_recipe(
self,
*,
recipe_scanner,
image_bytes: bytes | None,
image_base64: str | None,
name: str | None,
tags: Iterable[str],
metadata: Optional[dict[str, Any]],
) -> PersistenceResult:
"""Persist a user uploaded recipe."""
missing_fields = []
if not name:
missing_fields.append("name")
if metadata is None:
missing_fields.append("metadata")
if missing_fields:
raise RecipeValidationError(
f"Missing required fields: {', '.join(missing_fields)}"
)
resolved_image_bytes = self._resolve_image_bytes(image_bytes, image_base64)
recipes_dir = recipe_scanner.recipes_dir
os.makedirs(recipes_dir, exist_ok=True)
recipe_id = str(uuid.uuid4())
optimized_image, extension = self._exif_utils.optimize_image(
image_data=resolved_image_bytes,
target_width=self._card_preview_width,
format="webp",
quality=85,
preserve_metadata=True,
)
image_filename = f"{recipe_id}{extension}"
image_path = os.path.join(recipes_dir, image_filename)
with open(image_path, "wb") as file_obj:
file_obj.write(optimized_image)
current_time = time.time()
loras_data = [self._normalise_lora_entry(lora) for lora in metadata.get("loras", [])]
gen_params = metadata.get("gen_params", {})
if not gen_params and "raw_metadata" in metadata:
raw_metadata = metadata.get("raw_metadata", {})
gen_params = {
"prompt": raw_metadata.get("prompt", ""),
"negative_prompt": raw_metadata.get("negative_prompt", ""),
"checkpoint": raw_metadata.get("checkpoint", {}),
"steps": raw_metadata.get("steps", ""),
"sampler": raw_metadata.get("sampler", ""),
"cfg_scale": raw_metadata.get("cfg_scale", ""),
"seed": raw_metadata.get("seed", ""),
"size": raw_metadata.get("size", ""),
"clip_skip": raw_metadata.get("clip_skip", ""),
}
fingerprint = calculate_recipe_fingerprint(loras_data)
recipe_data: Dict[str, Any] = {
"id": recipe_id,
"file_path": image_path,
"title": name,
"modified": current_time,
"created_date": current_time,
"base_model": metadata.get("base_model", ""),
"loras": loras_data,
"gen_params": gen_params,
"fingerprint": fingerprint,
}
tags_list = list(tags)
if tags_list:
recipe_data["tags"] = tags_list
if metadata.get("source_path"):
recipe_data["source_path"] = metadata.get("source_path")
json_filename = f"{recipe_id}.recipe.json"
json_path = os.path.join(recipes_dir, json_filename)
with open(json_path, "w", encoding="utf-8") as file_obj:
json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
self._exif_utils.append_recipe_metadata(image_path, recipe_data)
matching_recipes = await self._find_matching_recipes(recipe_scanner, fingerprint, exclude_id=recipe_id)
await recipe_scanner.add_recipe(recipe_data)
return PersistenceResult(
{
"success": True,
"recipe_id": recipe_id,
"image_path": image_path,
"json_path": json_path,
"matching_recipes": matching_recipes,
}
)
async def delete_recipe(self, *, recipe_scanner, recipe_id: str) -> PersistenceResult:
"""Delete an existing recipe."""
recipes_dir = recipe_scanner.recipes_dir
if not recipes_dir or not os.path.exists(recipes_dir):
raise RecipeNotFoundError("Recipes directory not found")
recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
if not os.path.exists(recipe_json_path):
raise RecipeNotFoundError("Recipe not found")
with open(recipe_json_path, "r", encoding="utf-8") as file_obj:
recipe_data = json.load(file_obj)
image_path = recipe_data.get("file_path")
os.remove(recipe_json_path)
if image_path and os.path.exists(image_path):
os.remove(image_path)
await recipe_scanner.remove_recipe(recipe_id)
return PersistenceResult({"success": True, "message": "Recipe deleted successfully"})
async def update_recipe(self, *, recipe_scanner, recipe_id: str, updates: dict[str, Any]) -> PersistenceResult:
"""Update persisted metadata for a recipe."""
if not any(key in updates for key in ("title", "tags", "source_path", "preview_nsfw_level")):
raise RecipeValidationError(
"At least one field to update must be provided (title or tags or source_path or preview_nsfw_level)"
)
success = await recipe_scanner.update_recipe_metadata(recipe_id, updates)
if not success:
raise RecipeNotFoundError("Recipe not found or update failed")
return PersistenceResult({"success": True, "recipe_id": recipe_id, "updates": updates})
async def reconnect_lora(
self,
*,
recipe_scanner,
recipe_id: str,
lora_index: int,
target_name: str,
) -> PersistenceResult:
"""Reconnect a LoRA entry within an existing recipe."""
recipe_path = os.path.join(recipe_scanner.recipes_dir, f"{recipe_id}.recipe.json")
if not os.path.exists(recipe_path):
raise RecipeNotFoundError("Recipe not found")
target_lora = await recipe_scanner.get_local_lora(target_name)
if not target_lora:
raise RecipeNotFoundError(f"Local LoRA not found with name: {target_name}")
recipe_data, updated_lora = await recipe_scanner.update_lora_entry(
recipe_id,
lora_index,
target_name=target_name,
target_lora=target_lora,
)
image_path = recipe_data.get("file_path")
if image_path and os.path.exists(image_path):
self._exif_utils.append_recipe_metadata(image_path, recipe_data)
matching_recipes = []
if "fingerprint" in recipe_data:
matching_recipes = await recipe_scanner.find_recipes_by_fingerprint(recipe_data["fingerprint"])
if recipe_id in matching_recipes:
matching_recipes.remove(recipe_id)
return PersistenceResult(
{
"success": True,
"recipe_id": recipe_id,
"updated_lora": updated_lora,
"matching_recipes": matching_recipes,
}
)
async def bulk_delete(
self,
*,
recipe_scanner,
recipe_ids: Iterable[str],
) -> PersistenceResult:
"""Delete multiple recipes in a single request."""
recipe_ids = list(recipe_ids)
if not recipe_ids:
raise RecipeValidationError("No recipe IDs provided")
recipes_dir = recipe_scanner.recipes_dir
if not recipes_dir or not os.path.exists(recipes_dir):
raise RecipeNotFoundError("Recipes directory not found")
deleted_recipes: list[str] = []
failed_recipes: list[dict[str, Any]] = []
for recipe_id in recipe_ids:
recipe_json_path = os.path.join(recipes_dir, f"{recipe_id}.recipe.json")
if not os.path.exists(recipe_json_path):
failed_recipes.append({"id": recipe_id, "reason": "Recipe not found"})
continue
try:
with open(recipe_json_path, "r", encoding="utf-8") as file_obj:
recipe_data = json.load(file_obj)
image_path = recipe_data.get("file_path")
os.remove(recipe_json_path)
if image_path and os.path.exists(image_path):
os.remove(image_path)
deleted_recipes.append(recipe_id)
except Exception as exc:
failed_recipes.append({"id": recipe_id, "reason": str(exc)})
if deleted_recipes:
await recipe_scanner.bulk_remove(deleted_recipes)
return PersistenceResult(
{
"success": True,
"deleted": deleted_recipes,
"failed": failed_recipes,
"total_deleted": len(deleted_recipes),
"total_failed": len(failed_recipes),
}
)
async def save_recipe_from_widget(
self,
*,
recipe_scanner,
metadata: dict[str, Any],
image_bytes: bytes,
) -> PersistenceResult:
"""Save a recipe constructed from widget metadata."""
if not metadata:
raise RecipeValidationError("No generation metadata found")
recipes_dir = recipe_scanner.recipes_dir
os.makedirs(recipes_dir, exist_ok=True)
recipe_id = str(uuid.uuid4())
optimized_image, extension = self._exif_utils.optimize_image(
image_data=image_bytes,
target_width=self._card_preview_width,
format="webp",
quality=85,
preserve_metadata=True,
)
image_filename = f"{recipe_id}{extension}"
image_path = os.path.join(recipes_dir, image_filename)
with open(image_path, "wb") as file_obj:
file_obj.write(optimized_image)
lora_stack = metadata.get("loras", "")
lora_matches = re.findall(r"<lora:([^:]+):([^>]+)>", lora_stack)
if not lora_matches:
raise RecipeValidationError("No LoRAs found in the generation metadata")
loras_data = []
base_model_counts: Dict[str, int] = {}
for name, strength in lora_matches:
lora_info = await recipe_scanner.get_local_lora(name)
lora_data = {
"file_name": name,
"strength": float(strength),
"hash": (lora_info.get("sha256") or "").lower() if lora_info else "",
"modelVersionId": (lora_info.get("civitai") or {}).get("id", 0) if lora_info else 0,
"modelName": ((lora_info.get("civitai") or {}).get("model") or {}).get("name", name) if lora_info else "",
"modelVersionName": (lora_info.get("civitai") or {}).get("name", "") if lora_info else "",
"isDeleted": False,
"exclude": False,
}
loras_data.append(lora_data)
if lora_info and "base_model" in lora_info:
base_model = lora_info["base_model"]
base_model_counts[base_model] = base_model_counts.get(base_model, 0) + 1
recipe_name = self._derive_recipe_name(lora_matches)
most_common_base_model = (
max(base_model_counts.items(), key=lambda item: item[1])[0] if base_model_counts else ""
)
recipe_data = {
"id": recipe_id,
"file_path": image_path,
"title": recipe_name,
"modified": time.time(),
"created_date": time.time(),
"base_model": most_common_base_model,
"loras": loras_data,
"checkpoint": metadata.get("checkpoint", ""),
"gen_params": {
key: value
for key, value in metadata.items()
if key not in ["checkpoint", "loras"]
},
"loras_stack": lora_stack,
}
json_filename = f"{recipe_id}.recipe.json"
json_path = os.path.join(recipes_dir, json_filename)
with open(json_path, "w", encoding="utf-8") as file_obj:
json.dump(recipe_data, file_obj, indent=4, ensure_ascii=False)
self._exif_utils.append_recipe_metadata(image_path, recipe_data)
await recipe_scanner.add_recipe(recipe_data)
return PersistenceResult(
{
"success": True,
"recipe_id": recipe_id,
"image_path": image_path,
"json_path": json_path,
"recipe_name": recipe_name,
}
)
# Helper methods ---------------------------------------------------
def _resolve_image_bytes(self, image_bytes: bytes | None, image_base64: str | None) -> bytes:
if image_bytes is not None:
return image_bytes
if image_base64:
try:
payload = image_base64.split(",", 1)[1] if "," in image_base64 else image_base64
return base64.b64decode(payload)
except Exception as exc: # pragma: no cover - validation guard
raise RecipeValidationError(f"Invalid base64 image data: {exc}") from exc
raise RecipeValidationError("No image data provided")
def _normalise_lora_entry(self, lora: dict[str, Any]) -> dict[str, Any]:
return {
"file_name": lora.get("file_name", "")
or (
os.path.splitext(os.path.basename(lora.get("localPath", "")))[0]
if lora.get("localPath")
else ""
),
"hash": (lora.get("hash") or "").lower(),
"strength": float(lora.get("weight", 1.0)),
"modelVersionId": lora.get("id", 0),
"modelName": lora.get("name", ""),
"modelVersionName": lora.get("version", ""),
"isDeleted": lora.get("isDeleted", False),
"exclude": lora.get("exclude", False),
}
async def _find_matching_recipes(
self,
recipe_scanner,
fingerprint: str | None,
*,
exclude_id: Optional[str] = None,
) -> list[str]:
if not fingerprint:
return []
matches = await recipe_scanner.find_recipes_by_fingerprint(fingerprint)
if exclude_id and exclude_id in matches:
matches.remove(exclude_id)
return matches
def _derive_recipe_name(self, lora_matches: list[tuple[str, str]]) -> str:
recipe_name_parts = [f"{name.strip()}-{float(strength):.2f}" for name, strength in lora_matches[:3]]
recipe_name = "_".join(recipe_name_parts)
return recipe_name or "recipe"

View File

@@ -0,0 +1,105 @@
"""Services handling recipe sharing and downloads."""
from __future__ import annotations
import os
import shutil
import tempfile
import time
from dataclasses import dataclass
from typing import Any, Dict
from .errors import RecipeNotFoundError
@dataclass(frozen=True)
class SharingResult:
"""Return payload for share operations."""
payload: dict[str, Any]
status: int = 200
@dataclass(frozen=True)
class DownloadInfo:
"""Information required to stream a shared recipe file."""
file_path: str
download_filename: str
class RecipeSharingService:
"""Prepare temporary recipe downloads with TTL cleanup."""
def __init__(self, *, ttl_seconds: int = 300, logger) -> None:
self._ttl_seconds = ttl_seconds
self._logger = logger
self._shared_recipes: Dict[str, Dict[str, Any]] = {}
async def share_recipe(self, *, recipe_scanner, recipe_id: str) -> SharingResult:
"""Prepare a temporary downloadable copy of a recipe image."""
recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
if not recipe:
raise RecipeNotFoundError("Recipe not found")
image_path = recipe.get("file_path")
if not image_path or not os.path.exists(image_path):
raise RecipeNotFoundError("Recipe image not found")
ext = os.path.splitext(image_path)[1]
with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as temp_file:
temp_path = temp_file.name
shutil.copy2(image_path, temp_path)
timestamp = int(time.time())
self._shared_recipes[recipe_id] = {
"path": temp_path,
"timestamp": timestamp,
"expires": time.time() + self._ttl_seconds,
}
self._cleanup_shared_recipes()
safe_title = recipe.get("title", "").replace(" ", "_").lower()
filename = f"recipe_{safe_title}{ext}" if safe_title else f"recipe_{recipe_id}{ext}"
url_path = f"/api/recipe/{recipe_id}/share/download?t={timestamp}"
return SharingResult({"success": True, "download_url": url_path, "filename": filename})
async def prepare_download(self, *, recipe_scanner, recipe_id: str) -> DownloadInfo:
"""Return file path and filename for a prepared shared recipe."""
shared_info = self._shared_recipes.get(recipe_id)
if not shared_info or time.time() > shared_info.get("expires", 0):
self._cleanup_entry(recipe_id)
raise RecipeNotFoundError("Shared recipe not found or expired")
file_path = shared_info["path"]
if not os.path.exists(file_path):
self._cleanup_entry(recipe_id)
raise RecipeNotFoundError("Shared recipe file not found")
recipe = await recipe_scanner.get_recipe_by_id(recipe_id)
filename_base = (
f"recipe_{recipe.get('title', '').replace(' ', '_').lower()}" if recipe else recipe_id
)
ext = os.path.splitext(file_path)[1]
download_filename = f"{filename_base}{ext}"
return DownloadInfo(file_path=file_path, download_filename=download_filename)
def _cleanup_shared_recipes(self) -> None:
for recipe_id in list(self._shared_recipes.keys()):
shared = self._shared_recipes.get(recipe_id)
if not shared:
continue
if time.time() > shared.get("expires", 0):
self._cleanup_entry(recipe_id)
def _cleanup_entry(self, recipe_id: str) -> None:
shared_info = self._shared_recipes.pop(recipe_id, None)
if not shared_info:
return
file_path = shared_info.get("path")
if file_path and os.path.exists(file_path):
try:
os.unlink(file_path)
except Exception as exc: # pragma: no cover - defensive logging
self._logger.error("Error cleaning up shared recipe %s: %s", recipe_id, exc)

114
py/services/server_i18n.py Normal file
View File

@@ -0,0 +1,114 @@
import os
import json
import logging
from typing import Dict, Any, Optional
logger = logging.getLogger(__name__)
class ServerI18nManager:
"""Server-side internationalization manager for template rendering"""
def __init__(self):
self.translations = {}
self.current_locale = 'en'
self._load_translations()
def _load_translations(self):
"""Load all translation files from the locales directory"""
i18n_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'locales'
)
if not os.path.exists(i18n_path):
logger.warning(f"I18n directory not found: {i18n_path}")
return
# Load all available locale files
for filename in os.listdir(i18n_path):
if filename.endswith('.json'):
locale_code = filename[:-5] # Remove .json extension
try:
self._load_locale_file(i18n_path, filename, locale_code)
except Exception as e:
logger.error(f"Error loading locale file {filename}: {e}")
def _load_locale_file(self, path: str, filename: str, locale_code: str):
"""Load a single locale JSON file"""
file_path = os.path.join(path, filename)
try:
with open(file_path, 'r', encoding='utf-8') as f:
translations = json.load(f)
self.translations[locale_code] = translations
logger.debug(f"Loaded translations for {locale_code} from {filename}")
except Exception as e:
logger.error(f"Error parsing locale file {filename}: {e}")
def set_locale(self, locale: str):
"""Set the current locale"""
if locale in self.translations:
self.current_locale = locale
else:
logger.warning(f"Locale {locale} not found, using 'en'")
self.current_locale = 'en'
def get_translation(self, key: str, params: Dict[str, Any] = None, **kwargs) -> str:
"""Get translation for a key with optional parameters (supports both dict and keyword args)"""
# Merge kwargs into params for convenience
if params is None:
params = {}
if kwargs:
params = {**params, **kwargs}
if self.current_locale not in self.translations:
return key
# Navigate through nested object using dot notation
keys = key.split('.')
value = self.translations[self.current_locale]
for k in keys:
if isinstance(value, dict) and k in value:
value = value[k]
else:
# Fallback to English if current locale doesn't have the key
if self.current_locale != 'en' and 'en' in self.translations:
en_value = self.translations['en']
for k in keys:
if isinstance(en_value, dict) and k in en_value:
en_value = en_value[k]
else:
return key
value = en_value
else:
return key
break
if not isinstance(value, str):
return key
# Replace parameters if provided
if params:
for param_key, param_value in params.items():
placeholder = f"{{{param_key}}}"
double_placeholder = f"{{{{{param_key}}}}}"
value = value.replace(placeholder, str(param_value))
value = value.replace(double_placeholder, str(param_value))
return value
def get_available_locales(self) -> list:
"""Get list of available locales"""
return list(self.translations.keys())
def create_template_filter(self):
"""Create a Jinja2 filter function for templates"""
def t_filter(key: str, **params) -> str:
return self.get_translation(key, params)
return t_filter
# Create global instance
server_i18n = ServerI18nManager()

View File

@@ -1,14 +1,51 @@
import os
import copy
import json
import os
import logging
from typing import Any, Dict
from datetime import datetime, timezone
from threading import Lock
from typing import Any, Dict, Iterable, List, Mapping, Optional
from ..utils.settings_paths import ensure_settings_file
logger = logging.getLogger(__name__)
DEFAULT_SETTINGS: Dict[str, Any] = {
"civitai_api_key": "",
"language": "en",
"show_only_sfw": False,
"enable_metadata_archive_db": False,
"proxy_enabled": False,
"proxy_host": "",
"proxy_port": "",
"proxy_username": "",
"proxy_password": "",
"proxy_type": "http",
"default_lora_root": "",
"default_checkpoint_root": "",
"default_embedding_root": "",
"base_model_path_mappings": {},
"download_path_templates": {},
"example_images_path": "",
"optimize_example_images": True,
"auto_download_example_images": False,
"blur_mature_content": True,
"autoplay_on_hover": False,
"display_density": "default",
"card_info_display": "always",
"include_trigger_words": False,
"compact_mode": False,
}
class SettingsManager:
def __init__(self):
self.settings_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'settings.json')
self.settings_file = ensure_settings_file(logger)
self.settings = self._load_settings()
self._migrate_setting_keys()
self._ensure_default_settings()
self._migrate_to_library_registry()
self._migrate_download_path_template()
self._auto_set_default_roots()
self._check_environment_variables()
@@ -23,11 +60,266 @@ class SettingsManager:
logger.error(f"Error loading settings: {e}")
return self._get_default_settings()
def _ensure_default_settings(self) -> None:
"""Ensure all default settings keys exist"""
updated = False
for key, value in self._get_default_settings().items():
if key not in self.settings:
if isinstance(value, dict):
self.settings[key] = value.copy()
else:
self.settings[key] = value
updated = True
if updated:
self._save_settings()
def _migrate_to_library_registry(self) -> None:
"""Ensure settings include the multi-library registry structure."""
libraries = self.settings.get("libraries")
active_name = self.settings.get("active_library")
if not isinstance(libraries, dict) or not libraries:
library_name = active_name or "default"
library_payload = self._build_library_payload(
folder_paths=self.settings.get("folder_paths", {}),
default_lora_root=self.settings.get("default_lora_root", ""),
default_checkpoint_root=self.settings.get("default_checkpoint_root", ""),
default_embedding_root=self.settings.get("default_embedding_root", ""),
)
libraries = {library_name: library_payload}
self.settings["libraries"] = libraries
self.settings["active_library"] = library_name
self._sync_active_library_to_root(save=False)
self._save_settings()
return
sanitized_libraries: Dict[str, Dict[str, Any]] = {}
changed = False
for name, data in libraries.items():
if not isinstance(data, dict):
data = {}
changed = True
payload = self._build_library_payload(
folder_paths=data.get("folder_paths"),
default_lora_root=data.get("default_lora_root"),
default_checkpoint_root=data.get("default_checkpoint_root"),
default_embedding_root=data.get("default_embedding_root"),
metadata=data.get("metadata"),
base=data,
)
sanitized_libraries[name] = payload
if payload is not data:
changed = True
if changed:
self.settings["libraries"] = sanitized_libraries
if not active_name or active_name not in sanitized_libraries:
if sanitized_libraries:
self.settings["active_library"] = next(iter(sanitized_libraries.keys()))
else:
self.settings["active_library"] = "default"
self._sync_active_library_to_root(save=changed)
def _sync_active_library_to_root(self, *, save: bool = False) -> None:
"""Update top-level folder path settings to mirror the active library."""
libraries = self.settings.get("libraries", {})
active_name = self.settings.get("active_library")
if not libraries:
return
if active_name not in libraries:
active_name = next(iter(libraries.keys()))
self.settings["active_library"] = active_name
active_library = libraries.get(active_name, {})
folder_paths = copy.deepcopy(active_library.get("folder_paths", {}))
self.settings["folder_paths"] = folder_paths
self.settings["default_lora_root"] = active_library.get("default_lora_root", "")
self.settings["default_checkpoint_root"] = active_library.get("default_checkpoint_root", "")
self.settings["default_embedding_root"] = active_library.get("default_embedding_root", "")
if save:
self._save_settings()
def _current_timestamp(self) -> str:
return datetime.now(timezone.utc).replace(microsecond=0).isoformat()
def _build_library_payload(
self,
*,
folder_paths: Optional[Mapping[str, Iterable[str]]] = None,
default_lora_root: Optional[str] = None,
default_checkpoint_root: Optional[str] = None,
default_embedding_root: Optional[str] = None,
metadata: Optional[Mapping[str, Any]] = None,
base: Optional[Mapping[str, Any]] = None,
) -> Dict[str, Any]:
payload: Dict[str, Any] = dict(base or {})
timestamp = self._current_timestamp()
if folder_paths is not None:
payload["folder_paths"] = self._normalize_folder_paths(folder_paths)
else:
payload.setdefault("folder_paths", {})
if default_lora_root is not None:
payload["default_lora_root"] = default_lora_root
else:
payload.setdefault("default_lora_root", "")
if default_checkpoint_root is not None:
payload["default_checkpoint_root"] = default_checkpoint_root
else:
payload.setdefault("default_checkpoint_root", "")
if default_embedding_root is not None:
payload["default_embedding_root"] = default_embedding_root
else:
payload.setdefault("default_embedding_root", "")
if metadata:
merged_meta = dict(payload.get("metadata", {}))
merged_meta.update(metadata)
payload["metadata"] = merged_meta
payload.setdefault("created_at", timestamp)
payload["updated_at"] = timestamp
return payload
def _normalize_folder_paths(
self, folder_paths: Mapping[str, Iterable[str]]
) -> Dict[str, List[str]]:
normalized: Dict[str, List[str]] = {}
for key, values in folder_paths.items():
if not isinstance(values, Iterable):
continue
cleaned: List[str] = []
seen = set()
for value in values:
if not isinstance(value, str):
continue
stripped = value.strip()
if not stripped:
continue
if stripped not in seen:
cleaned.append(stripped)
seen.add(stripped)
normalized[key] = cleaned
return normalized
def _validate_folder_paths(
self,
library_name: str,
folder_paths: Mapping[str, Iterable[str]],
) -> None:
"""Ensure folder paths do not overlap with other libraries."""
libraries = self.settings.get("libraries", {})
normalized_new: Dict[str, Dict[str, str]] = {}
for key, values in folder_paths.items():
path_map: Dict[str, str] = {}
for value in values:
if not isinstance(value, str):
continue
stripped = value.strip()
if not stripped:
continue
normalized_value = os.path.normcase(os.path.normpath(stripped))
path_map[normalized_value] = stripped
if path_map:
normalized_new[key] = path_map
if not normalized_new:
return
for other_name, other in libraries.items():
if other_name == library_name:
continue
other_paths = other.get("folder_paths", {})
for key, new_paths in normalized_new.items():
existing = {
os.path.normcase(os.path.normpath(path))
for path in other_paths.get(key, [])
if isinstance(path, str) and path
}
overlap = existing.intersection(new_paths.keys())
if overlap:
collisions = ", ".join(sorted(new_paths[value] for value in overlap))
raise ValueError(
f"Folder path(s) {collisions} already assigned to library '{other_name}'"
)
def _update_active_library_entry(
self,
*,
folder_paths: Optional[Mapping[str, Iterable[str]]] = None,
default_lora_root: Optional[str] = None,
default_checkpoint_root: Optional[str] = None,
default_embedding_root: Optional[str] = None,
) -> bool:
libraries = self.settings.get("libraries", {})
active_name = self.settings.get("active_library")
if not active_name or active_name not in libraries:
return False
library = libraries[active_name]
changed = False
if folder_paths is not None:
normalized_paths = self._normalize_folder_paths(folder_paths)
if library.get("folder_paths") != normalized_paths:
library["folder_paths"] = normalized_paths
changed = True
if default_lora_root is not None and library.get("default_lora_root") != default_lora_root:
library["default_lora_root"] = default_lora_root
changed = True
if default_checkpoint_root is not None and library.get("default_checkpoint_root") != default_checkpoint_root:
library["default_checkpoint_root"] = default_checkpoint_root
changed = True
if default_embedding_root is not None and library.get("default_embedding_root") != default_embedding_root:
library["default_embedding_root"] = default_embedding_root
changed = True
if changed:
library.setdefault("created_at", self._current_timestamp())
library["updated_at"] = self._current_timestamp()
return changed
def _migrate_setting_keys(self) -> None:
"""Migrate legacy camelCase setting keys to snake_case"""
key_migrations = {
'optimizeExampleImages': 'optimize_example_images',
'autoDownloadExampleImages': 'auto_download_example_images',
'blurMatureContent': 'blur_mature_content',
'autoplayOnHover': 'autoplay_on_hover',
'displayDensity': 'display_density',
'cardInfoDisplay': 'card_info_display',
'includeTriggerWords': 'include_trigger_words',
'compactMode': 'compact_mode',
}
updated = False
for old_key, new_key in key_migrations.items():
if old_key in self.settings:
if new_key not in self.settings:
self.settings[new_key] = self.settings[old_key]
del self.settings[old_key]
updated = True
if updated:
logger.info("Migrated legacy setting keys to snake_case")
self._save_settings()
def _migrate_download_path_template(self):
"""Migrate old download_path_template to new download_path_templates"""
old_template = self.settings.get('download_path_template')
templates = self.settings.get('download_path_templates')
# If old template exists and new templates don't exist, migrate
if old_template is not None and not templates:
logger.info("Migrating download_path_template to download_path_templates")
@@ -42,25 +334,36 @@ class SettingsManager:
logger.info("Migration completed")
def _auto_set_default_roots(self):
"""Auto set default root paths if only one folder is present and default is empty."""
"""Auto set default root paths when only one folder is present and the current default is unset or not among the options."""
folder_paths = self.settings.get('folder_paths', {})
updated = False
# loras
loras = folder_paths.get('loras', [])
if isinstance(loras, list) and len(loras) == 1 and not self.settings.get('default_lora_root'):
self.settings['default_lora_root'] = loras[0]
updated = True
if isinstance(loras, list) and len(loras) == 1:
current_lora_root = self.settings.get('default_lora_root')
if current_lora_root not in loras:
self.settings['default_lora_root'] = loras[0]
updated = True
# checkpoints
checkpoints = folder_paths.get('checkpoints', [])
if isinstance(checkpoints, list) and len(checkpoints) == 1 and not self.settings.get('default_checkpoint_root'):
self.settings['default_checkpoint_root'] = checkpoints[0]
updated = True
if isinstance(checkpoints, list) and len(checkpoints) == 1:
current_checkpoint_root = self.settings.get('default_checkpoint_root')
if current_checkpoint_root not in checkpoints:
self.settings['default_checkpoint_root'] = checkpoints[0]
updated = True
# embeddings
embeddings = folder_paths.get('embeddings', [])
if isinstance(embeddings, list) and len(embeddings) == 1 and not self.settings.get('default_embedding_root'):
self.settings['default_embedding_root'] = embeddings[0]
updated = True
if isinstance(embeddings, list) and len(embeddings) == 1:
current_embedding_root = self.settings.get('default_embedding_root')
if current_embedding_root not in embeddings:
self.settings['default_embedding_root'] = embeddings[0]
updated = True
if updated:
self._update_active_library_entry(
default_lora_root=self.settings.get('default_lora_root'),
default_checkpoint_root=self.settings.get('default_checkpoint_root'),
default_embedding_root=self.settings.get('default_embedding_root'),
)
self._save_settings()
def _check_environment_variables(self) -> None:
@@ -78,10 +381,11 @@ class SettingsManager:
def _get_default_settings(self) -> Dict[str, Any]:
"""Return default settings"""
return {
"civitai_api_key": "",
"show_only_sfw": False
}
defaults = DEFAULT_SETTINGS.copy()
# Ensure nested dicts are independent copies
defaults['base_model_path_mappings'] = {}
defaults['download_path_templates'] = {}
return defaults
def get(self, key: str, default: Any = None) -> Any:
"""Get setting value"""
@@ -90,8 +394,23 @@ class SettingsManager:
def set(self, key: str, value: Any) -> None:
"""Set setting value and save"""
self.settings[key] = value
if key == 'folder_paths' and isinstance(value, Mapping):
self._update_active_library_entry(folder_paths=value) # type: ignore[arg-type]
elif key == 'default_lora_root':
self._update_active_library_entry(default_lora_root=str(value))
elif key == 'default_checkpoint_root':
self._update_active_library_entry(default_checkpoint_root=str(value))
elif key == 'default_embedding_root':
self._update_active_library_entry(default_embedding_root=str(value))
self._save_settings()
def delete(self, key: str) -> None:
"""Delete setting key and save"""
if key in self.settings:
del self.settings[key]
self._save_settings()
logger.info(f"Deleted setting: {key}")
def _save_settings(self) -> None:
"""Save settings to file"""
try:
@@ -100,6 +419,227 @@ class SettingsManager:
except Exception as e:
logger.error(f"Error saving settings: {e}")
def get_libraries(self) -> Dict[str, Dict[str, Any]]:
"""Return a copy of the registered libraries."""
libraries = self.settings.get("libraries", {})
return copy.deepcopy(libraries)
def get_active_library_name(self) -> str:
"""Return the currently active library name."""
libraries = self.settings.get("libraries", {})
active_name = self.settings.get("active_library")
if active_name and active_name in libraries:
return active_name
if libraries:
return next(iter(libraries.keys()))
return "default"
def get_active_library(self) -> Dict[str, Any]:
"""Return a copy of the active library configuration."""
libraries = self.settings.get("libraries", {})
active_name = self.get_active_library_name()
return copy.deepcopy(libraries.get(active_name, {}))
def activate_library(self, library_name: str) -> None:
"""Activate a library by name and refresh dependent services."""
libraries = self.settings.get("libraries", {})
if library_name not in libraries:
raise KeyError(f"Library '{library_name}' does not exist")
current_active = self.get_active_library_name()
if current_active == library_name:
# Ensure root settings stay in sync even if already active
self._sync_active_library_to_root(save=False)
self._save_settings()
self._notify_library_change(library_name)
return
self.settings["active_library"] = library_name
self._sync_active_library_to_root(save=False)
self._save_settings()
self._notify_library_change(library_name)
def upsert_library(
self,
library_name: str,
*,
folder_paths: Optional[Mapping[str, Iterable[str]]] = None,
default_lora_root: Optional[str] = None,
default_checkpoint_root: Optional[str] = None,
default_embedding_root: Optional[str] = None,
metadata: Optional[Mapping[str, Any]] = None,
activate: bool = False,
) -> Dict[str, Any]:
"""Create or update a library definition."""
name = library_name.strip()
if not name:
raise ValueError("Library name cannot be empty")
if folder_paths is not None:
self._validate_folder_paths(name, folder_paths)
libraries = self.settings.setdefault("libraries", {})
existing = libraries.get(name, {})
payload = self._build_library_payload(
folder_paths=folder_paths if folder_paths is not None else existing.get("folder_paths"),
default_lora_root=default_lora_root if default_lora_root is not None else existing.get("default_lora_root"),
default_checkpoint_root=(
default_checkpoint_root
if default_checkpoint_root is not None
else existing.get("default_checkpoint_root")
),
default_embedding_root=(
default_embedding_root
if default_embedding_root is not None
else existing.get("default_embedding_root")
),
metadata=metadata if metadata is not None else existing.get("metadata"),
base=existing,
)
libraries[name] = payload
if activate or not self.settings.get("active_library"):
self.settings["active_library"] = name
self._sync_active_library_to_root(save=False)
self._save_settings()
if self.settings.get("active_library") == name:
self._notify_library_change(name)
return payload
def create_library(
self,
library_name: str,
*,
folder_paths: Mapping[str, Iterable[str]],
default_lora_root: str = "",
default_checkpoint_root: str = "",
default_embedding_root: str = "",
metadata: Optional[Mapping[str, Any]] = None,
activate: bool = False,
) -> Dict[str, Any]:
"""Create a new library entry."""
libraries = self.settings.get("libraries", {})
if library_name in libraries:
raise ValueError(f"Library '{library_name}' already exists")
return self.upsert_library(
library_name,
folder_paths=folder_paths,
default_lora_root=default_lora_root,
default_checkpoint_root=default_checkpoint_root,
default_embedding_root=default_embedding_root,
metadata=metadata,
activate=activate,
)
def rename_library(self, old_name: str, new_name: str) -> None:
"""Rename an existing library."""
libraries = self.settings.get("libraries", {})
if old_name not in libraries:
raise KeyError(f"Library '{old_name}' does not exist")
new_name_stripped = new_name.strip()
if not new_name_stripped:
raise ValueError("New library name cannot be empty")
if new_name_stripped in libraries:
raise ValueError(f"Library '{new_name_stripped}' already exists")
libraries[new_name_stripped] = libraries.pop(old_name)
if self.settings.get("active_library") == old_name:
self.settings["active_library"] = new_name_stripped
active_name = new_name_stripped
else:
active_name = self.settings.get("active_library")
self._sync_active_library_to_root(save=False)
self._save_settings()
if active_name == new_name_stripped:
self._notify_library_change(new_name_stripped)
def delete_library(self, library_name: str) -> None:
"""Remove a library definition."""
libraries = self.settings.get("libraries", {})
if library_name not in libraries:
raise KeyError(f"Library '{library_name}' does not exist")
if len(libraries) == 1:
raise ValueError("At least one library must remain")
was_active = self.settings.get("active_library") == library_name
libraries.pop(library_name)
if was_active:
new_active = next(iter(libraries.keys()))
self.settings["active_library"] = new_active
self._sync_active_library_to_root(save=False)
self._save_settings()
if was_active:
self._notify_library_change(self.settings["active_library"])
def update_active_library_paths(
self,
folder_paths: Mapping[str, Iterable[str]],
*,
default_lora_root: Optional[str] = None,
default_checkpoint_root: Optional[str] = None,
default_embedding_root: Optional[str] = None,
) -> None:
"""Update folder paths for the active library."""
active_name = self.get_active_library_name()
self.upsert_library(
active_name,
folder_paths=folder_paths,
default_lora_root=default_lora_root,
default_checkpoint_root=default_checkpoint_root,
default_embedding_root=default_embedding_root,
activate=True,
)
def _notify_library_change(self, library_name: str) -> None:
"""Notify dependent services that the active library changed."""
libraries = self.settings.get("libraries", {})
library_config = libraries.get(library_name, {})
library_snapshot = copy.deepcopy(library_config)
try:
from ..config import config # Local import to avoid circular dependency
config.apply_library_settings(library_snapshot)
except Exception as exc: # pragma: no cover - defensive logging
logger.debug("Failed to apply library settings to config: %s", exc)
try:
from .service_registry import ServiceRegistry # type: ignore
for service_name in (
"lora_scanner",
"checkpoint_scanner",
"embedding_scanner",
"recipe_scanner",
):
service = ServiceRegistry.get_service_sync(service_name)
if service and hasattr(service, "on_library_changed"):
try:
service.on_library_changed()
except Exception as service_exc: # pragma: no cover - defensive logging
logger.debug(
"Service %s failed to handle library change: %s",
service_name,
service_exc,
)
except Exception as exc: # pragma: no cover - defensive logging
logger.debug("Failed to notify services about library change: %s", exc)
def get_download_path_template(self, model_type: str) -> str:
"""Get download path template for specific model type
@@ -149,4 +689,38 @@ class SettingsManager:
return templates.get(model_type, '{base_model}/{first_tag}')
settings = SettingsManager()
_SETTINGS_MANAGER: Optional["SettingsManager"] = None
_SETTINGS_MANAGER_LOCK = Lock()
# Legacy module-level alias for backwards compatibility with callers that
# monkeypatch ``py.services.settings_manager.settings`` during tests.
settings: Optional["SettingsManager"] = None
def get_settings_manager() -> "SettingsManager":
"""Return the lazily initialised global :class:`SettingsManager`."""
global _SETTINGS_MANAGER, settings
if settings is not None:
return settings
if _SETTINGS_MANAGER is None:
with _SETTINGS_MANAGER_LOCK:
if _SETTINGS_MANAGER is None:
_SETTINGS_MANAGER = SettingsManager()
settings = _SETTINGS_MANAGER
return _SETTINGS_MANAGER
def reset_settings_manager() -> None:
"""Reset the cached settings manager instance.
Primarily intended for tests so they can configure the settings
directory before the manager touches the filesystem.
"""
global _SETTINGS_MANAGER, settings
with _SETTINGS_MANAGER_LOCK:
_SETTINGS_MANAGER = None
settings = None

View File

@@ -0,0 +1,47 @@
"""Service for updating tag collections on metadata records."""
from __future__ import annotations
import os
from typing import Awaitable, Callable, Dict, List, Sequence
class TagUpdateService:
"""Encapsulate tag manipulation for models."""
def __init__(self, *, metadata_manager) -> None:
self._metadata_manager = metadata_manager
async def add_tags(
self,
*,
file_path: str,
new_tags: Sequence[str],
metadata_loader: Callable[[str], Awaitable[Dict[str, object]]],
update_cache: Callable[[str, str, Dict[str, object]], Awaitable[bool]],
) -> List[str]:
"""Add tags to a metadata entry while keeping case-insensitive uniqueness."""
base, _ = os.path.splitext(file_path)
metadata_path = f"{base}.metadata.json"
metadata = await metadata_loader(metadata_path)
existing_tags = list(metadata.get("tags", []))
existing_lower = [tag.lower() for tag in existing_tags]
tags_added: List[str] = []
for tag in new_tags:
if isinstance(tag, str) and tag.strip():
normalized = tag.strip()
if normalized.lower() not in existing_lower:
existing_tags.append(normalized)
existing_lower.append(normalized.lower())
tags_added.append(normalized)
metadata["tags"] = existing_tags
await self._metadata_manager.save_metadata(file_path, metadata)
await update_cache(file_path, file_path, metadata)
return existing_tags

View File

@@ -0,0 +1,37 @@
"""Application-level orchestration services for model routes."""
from .auto_organize_use_case import (
AutoOrganizeInProgressError,
AutoOrganizeUseCase,
)
from .bulk_metadata_refresh_use_case import (
BulkMetadataRefreshUseCase,
MetadataRefreshProgressReporter,
)
from .download_model_use_case import (
DownloadModelEarlyAccessError,
DownloadModelUseCase,
DownloadModelValidationError,
)
from .example_images import (
DownloadExampleImagesConfigurationError,
DownloadExampleImagesInProgressError,
DownloadExampleImagesUseCase,
ImportExampleImagesUseCase,
ImportExampleImagesValidationError,
)
__all__ = [
"AutoOrganizeInProgressError",
"AutoOrganizeUseCase",
"BulkMetadataRefreshUseCase",
"MetadataRefreshProgressReporter",
"DownloadModelEarlyAccessError",
"DownloadModelUseCase",
"DownloadModelValidationError",
"DownloadExampleImagesConfigurationError",
"DownloadExampleImagesInProgressError",
"DownloadExampleImagesUseCase",
"ImportExampleImagesUseCase",
"ImportExampleImagesValidationError",
]

View File

@@ -0,0 +1,56 @@
"""Auto-organize use case orchestrating concurrency and progress handling."""
from __future__ import annotations
import asyncio
from typing import Optional, Protocol, Sequence
from ..model_file_service import AutoOrganizeResult, ModelFileService, ProgressCallback
class AutoOrganizeLockProvider(Protocol):
"""Minimal protocol for objects exposing auto-organize locking primitives."""
def is_auto_organize_running(self) -> bool:
"""Return ``True`` when an auto-organize operation is in-flight."""
async def get_auto_organize_lock(self) -> asyncio.Lock:
"""Return the asyncio lock guarding auto-organize operations."""
class AutoOrganizeInProgressError(RuntimeError):
"""Raised when an auto-organize run is already active."""
class AutoOrganizeUseCase:
"""Coordinate auto-organize execution behind a shared lock."""
def __init__(
self,
*,
file_service: ModelFileService,
lock_provider: AutoOrganizeLockProvider,
) -> None:
self._file_service = file_service
self._lock_provider = lock_provider
async def execute(
self,
*,
file_paths: Optional[Sequence[str]] = None,
progress_callback: Optional[ProgressCallback] = None,
) -> AutoOrganizeResult:
"""Run the auto-organize routine guarded by a shared lock."""
if self._lock_provider.is_auto_organize_running():
raise AutoOrganizeInProgressError("Auto-organize is already running")
lock = await self._lock_provider.get_auto_organize_lock()
if lock.locked():
raise AutoOrganizeInProgressError("Auto-organize is already running")
async with lock:
return await self._file_service.auto_organize_models(
file_paths=list(file_paths) if file_paths is not None else None,
progress_callback=progress_callback,
)

View File

@@ -0,0 +1,124 @@
"""Use case encapsulating the bulk metadata refresh orchestration."""
from __future__ import annotations
import logging
from typing import Any, Dict, Optional, Protocol, Sequence
from ..metadata_sync_service import MetadataSyncService
from ...utils.metadata_manager import MetadataManager
class MetadataRefreshProgressReporter(Protocol):
"""Protocol for progress reporters used during metadata refresh."""
async def on_progress(self, payload: Dict[str, Any]) -> None:
"""Handle a metadata refresh progress update."""
class BulkMetadataRefreshUseCase:
"""Coordinate bulk metadata refreshes with progress emission."""
def __init__(
self,
*,
service,
metadata_sync: MetadataSyncService,
settings_service,
logger: Optional[logging.Logger] = None,
) -> None:
self._service = service
self._metadata_sync = metadata_sync
self._settings = settings_service
self._logger = logger or logging.getLogger(__name__)
async def execute(
self,
*,
progress_callback: Optional[MetadataRefreshProgressReporter] = None,
) -> Dict[str, Any]:
"""Refresh metadata for all qualifying models."""
cache = await self._service.scanner.get_cached_data()
total_models = len(cache.raw_data)
enable_metadata_archive_db = self._settings.get("enable_metadata_archive_db", False)
to_process: Sequence[Dict[str, Any]] = [
model
for model in cache.raw_data
if model.get("sha256")
and (not model.get("civitai") or not model["civitai"].get("id"))
and (
(enable_metadata_archive_db and not model.get("db_checked", False))
or (not enable_metadata_archive_db and model.get("from_civitai") is True)
)
]
total_to_process = len(to_process)
processed = 0
success = 0
needs_resort = False
async def emit(status: str, **extra: Any) -> None:
if progress_callback is None:
return
payload = {"status": status, "total": total_to_process, "processed": processed, "success": success}
payload.update(extra)
await progress_callback.on_progress(payload)
await emit("started")
for model in to_process:
try:
original_name = model.get("model_name")
await MetadataManager.hydrate_model_data(model)
result, _ = await self._metadata_sync.fetch_and_update_model(
sha256=model["sha256"],
file_path=model["file_path"],
model_data=model,
update_cache_func=self._service.scanner.update_single_model_cache,
)
if result:
success += 1
if original_name != model.get("model_name"):
needs_resort = True
processed += 1
await emit(
"processing",
processed=processed,
success=success,
current_name=model.get("model_name", "Unknown"),
)
except Exception as exc: # pragma: no cover - logging path
processed += 1
self._logger.error(
"Error fetching CivitAI data for %s: %s",
model.get("file_path"),
exc,
)
if needs_resort:
await cache.resort()
await emit("completed", processed=processed, success=success)
message = (
"Successfully updated "
f"{success} of {processed} processed {self._service.model_type}s (total: {total_models})"
)
return {"success": True, "message": message, "processed": processed, "updated": success, "total": total_models}
async def execute_with_error_handling(
self,
*,
progress_callback: Optional[MetadataRefreshProgressReporter] = None,
) -> Dict[str, Any]:
"""Wrapper providing progress notification on unexpected failures."""
try:
return await self.execute(progress_callback=progress_callback)
except Exception as exc:
if progress_callback is not None:
await progress_callback.on_progress({"status": "error", "error": str(exc)})
raise

View File

@@ -0,0 +1,37 @@
"""Use case for scheduling model downloads with consistent error handling."""
from __future__ import annotations
from typing import Any, Dict
from ..download_coordinator import DownloadCoordinator
class DownloadModelValidationError(ValueError):
"""Raised when incoming payload validation fails."""
class DownloadModelEarlyAccessError(RuntimeError):
"""Raised when the download is gated behind Civitai early access."""
class DownloadModelUseCase:
"""Coordinate download scheduling through the coordinator service."""
def __init__(self, *, download_coordinator: DownloadCoordinator) -> None:
self._download_coordinator = download_coordinator
async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
"""Schedule a download and normalize error conditions."""
try:
return await self._download_coordinator.schedule_download(payload)
except ValueError as exc:
raise DownloadModelValidationError(str(exc)) from exc
except Exception as exc: # pragma: no cover - defensive logging path
message = str(exc)
if "401" in message:
raise DownloadModelEarlyAccessError(
"Early Access Restriction: This model requires purchase. Please buy early access on Civitai.com."
) from exc
raise

View File

@@ -0,0 +1,19 @@
"""Example image specific use case exports."""
from .download_example_images_use_case import (
DownloadExampleImagesUseCase,
DownloadExampleImagesInProgressError,
DownloadExampleImagesConfigurationError,
)
from .import_example_images_use_case import (
ImportExampleImagesUseCase,
ImportExampleImagesValidationError,
)
__all__ = [
"DownloadExampleImagesUseCase",
"DownloadExampleImagesInProgressError",
"DownloadExampleImagesConfigurationError",
"ImportExampleImagesUseCase",
"ImportExampleImagesValidationError",
]

View File

@@ -0,0 +1,42 @@
"""Use case coordinating example image downloads."""
from __future__ import annotations
from typing import Any, Dict
from ....utils.example_images_download_manager import (
DownloadConfigurationError,
DownloadInProgressError,
ExampleImagesDownloadError,
)
class DownloadExampleImagesInProgressError(RuntimeError):
"""Raised when a download is already running."""
def __init__(self, progress: Dict[str, Any]) -> None:
super().__init__("Download already in progress")
self.progress = progress
class DownloadExampleImagesConfigurationError(ValueError):
"""Raised when settings prevent downloads from starting."""
class DownloadExampleImagesUseCase:
"""Validate payloads and trigger the download manager."""
def __init__(self, *, download_manager) -> None:
self._download_manager = download_manager
async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
"""Start a download and translate manager errors."""
try:
return await self._download_manager.start_download(payload)
except DownloadInProgressError as exc:
raise DownloadExampleImagesInProgressError(exc.progress_snapshot) from exc
except DownloadConfigurationError as exc:
raise DownloadExampleImagesConfigurationError(str(exc)) from exc
except ExampleImagesDownloadError:
raise

Some files were not shown because too many files have changed in this diff Show More